idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
54,200
def prune_candidates ( candidates ) : pruned = [ ] for first , second in candidates : if first . __class__ is Linearization : nodes1 = first . curve . nodes else : nodes1 = first . nodes if second . __class__ is Linearization : nodes2 = second . curve . nodes else : nodes2 = second . nodes if convex_hull_collide ( nodes1 , nodes2 ) : pruned . append ( ( first , second ) ) return pruned
Reduce number of candidate intersection pairs .
54,201
def make_same_degree ( nodes1 , nodes2 ) : _ , num_nodes1 = nodes1 . shape _ , num_nodes2 = nodes2 . shape for _ in six . moves . xrange ( num_nodes2 - num_nodes1 ) : nodes1 = _curve_helpers . elevate_nodes ( nodes1 ) for _ in six . moves . xrange ( num_nodes1 - num_nodes2 ) : nodes2 = _curve_helpers . elevate_nodes ( nodes2 ) return nodes1 , nodes2
Degree - elevate a curve so two curves have matching degree .
54,202
def coincident_parameters ( nodes1 , nodes2 ) : r nodes1 , nodes2 = make_same_degree ( nodes1 , nodes2 ) s_initial = _curve_helpers . locate_point ( nodes1 , nodes2 [ : , 0 ] . reshape ( ( 2 , 1 ) , order = "F" ) ) s_final = _curve_helpers . locate_point ( nodes1 , nodes2 [ : , - 1 ] . reshape ( ( 2 , 1 ) , order = "F" ) ) if s_initial is not None and s_final is not None : specialized1 = _curve_helpers . specialize_curve ( nodes1 , s_initial , s_final ) if _helpers . vector_close ( specialized1 . ravel ( order = "F" ) , nodes2 . ravel ( order = "F" ) ) : return ( ( s_initial , 0.0 ) , ( s_final , 1.0 ) ) else : return None t_initial = _curve_helpers . locate_point ( nodes2 , nodes1 [ : , 0 ] . reshape ( ( 2 , 1 ) , order = "F" ) ) t_final = _curve_helpers . locate_point ( nodes2 , nodes1 [ : , - 1 ] . reshape ( ( 2 , 1 ) , order = "F" ) ) if t_initial is None and t_final is None : return None if t_initial is not None and t_final is not None : specialized2 = _curve_helpers . specialize_curve ( nodes2 , t_initial , t_final ) if _helpers . vector_close ( nodes1 . ravel ( order = "F" ) , specialized2 . ravel ( order = "F" ) ) : return ( ( 0.0 , t_initial ) , ( 1.0 , t_final ) ) else : return None if s_initial is None and s_final is None : return None if s_initial is None : if t_initial is None : start_s = s_final end_s = 1.0 start_t = 1.0 end_t = t_final else : start_s = 0.0 end_s = s_final start_t = t_initial end_t = 1.0 else : if t_initial is None : start_s = s_initial end_s = 1.0 start_t = 0.0 end_t = t_final else : start_s = 0.0 end_s = s_initial start_t = t_initial end_t = 0.0 width_s = abs ( start_s - end_s ) width_t = abs ( start_t - end_t ) if width_s < _MIN_INTERVAL_WIDTH and width_t < _MIN_INTERVAL_WIDTH : return None specialized1 = _curve_helpers . specialize_curve ( nodes1 , start_s , end_s ) specialized2 = _curve_helpers . specialize_curve ( nodes2 , start_t , end_t ) if _helpers . vector_close ( specialized1 . ravel ( order = "F" ) , specialized2 . ravel ( order = "F" ) ) : return ( ( start_s , start_t ) , ( end_s , end_t ) ) else : return None
r Check if two B |eacute| zier curves are coincident .
54,203
def check_lines ( first , second ) : if not ( first . __class__ is Linearization and second . __class__ is Linearization and first . error == 0.0 and second . error == 0.0 ) : return False , None s , t , success = segment_intersection ( first . start_node , first . end_node , second . start_node , second . end_node ) if success : if _helpers . in_interval ( s , 0.0 , 1.0 ) and _helpers . in_interval ( t , 0.0 , 1.0 ) : intersections = np . asfortranarray ( [ [ s ] , [ t ] ] ) result = intersections , False else : result = np . empty ( ( 2 , 0 ) , order = "F" ) , False else : disjoint , params = parallel_lines_parameters ( first . start_node , first . end_node , second . start_node , second . end_node , ) if disjoint : result = np . empty ( ( 2 , 0 ) , order = "F" ) , False else : result = params , True return True , result
Checks if two curves are lines and tries to intersect them .
54,204
def subdivide ( self ) : left_nodes , right_nodes = _curve_helpers . subdivide_nodes ( self . nodes ) midpoint = 0.5 * ( self . start + self . end ) left = SubdividedCurve ( left_nodes , self . original_nodes , start = self . start , end = midpoint ) right = SubdividedCurve ( right_nodes , self . original_nodes , start = midpoint , end = self . end ) return left , right
Split the curve into a left and right half .
54,205
def plot ( self , num_pts , color = None , alpha = None , ax = None ) : if self . _dimension != 2 : raise NotImplementedError ( "2D is the only supported dimension" , "Current dimension" , self . _dimension , ) s_vals = np . linspace ( 0.0 , 1.0 , num_pts ) points = self . evaluate_multi ( s_vals ) if ax is None : ax = _plot_helpers . new_axis ( ) ax . plot ( points [ 0 , : ] , points [ 1 , : ] , color = color , alpha = alpha ) return ax
Plot the current curve .
54,206
def intersect ( self , other , strategy = IntersectionStrategy . GEOMETRIC , _verify = True ) : if _verify : if not isinstance ( other , Curve ) : raise TypeError ( "Can only intersect with another curve" , "Received" , other ) if self . _dimension != 2 or other . _dimension != 2 : raise NotImplementedError ( "Intersection only implemented in 2D" ) if strategy == IntersectionStrategy . GEOMETRIC : all_intersections = _geometric_intersection . all_intersections elif strategy == IntersectionStrategy . ALGEBRAIC : all_intersections = _algebraic_intersection . all_intersections else : raise ValueError ( "Unexpected strategy." , strategy ) st_vals , _ = all_intersections ( self . _nodes , other . _nodes ) return st_vals
Find the points of intersection with another curve .
54,207
def elevate ( self ) : r new_nodes = _curve_helpers . elevate_nodes ( self . _nodes ) return Curve ( new_nodes , self . _degree + 1 , _copy = False )
r Return a degree - elevated version of the current curve .
54,208
def reduce_ ( self ) : r new_nodes = _curve_helpers . reduce_pseudo_inverse ( self . _nodes ) return Curve ( new_nodes , self . _degree - 1 , _copy = False )
r Return a degree - reduced version of the current curve .
54,209
def specialize ( self , start , end ) : new_nodes = _curve_helpers . specialize_curve ( self . _nodes , start , end ) return Curve ( new_nodes , self . _degree , _copy = False )
Specialize the curve to a given sub - interval .
54,210
def locate ( self , point ) : r if point . shape != ( self . _dimension , 1 ) : point_dimensions = " x " . join ( str ( dimension ) for dimension in point . shape ) msg = _LOCATE_ERROR_TEMPLATE . format ( self . _dimension , self . _dimension , point , point_dimensions ) raise ValueError ( msg ) return _curve_helpers . locate_point ( self . _nodes , point )
r Find a point on the current curve .
54,211
def clean_file ( c_source , virtualenv_dirname ) : with open ( c_source , "r" ) as file_obj : contents = file_obj . read ( ) . rstrip ( ) py_version = "python{}.{}" . format ( * sys . version_info [ : 2 ] ) lib_path = os . path . join ( ".nox" , virtualenv_dirname , "lib" , py_version , "site-packages" , "" ) contents = contents . replace ( lib_path , "" ) lines = contents . split ( "\n" ) with open ( c_source , "w" ) as file_obj : for line in lines : file_obj . write ( line . rstrip ( ) + "\n" )
Strip trailing whitespace and clean up local names in C source .
54,212
def get_version ( ) : sys . modules [ "setup_helpers" ] = object ( ) sys . modules [ "setup_helpers_macos" ] = object ( ) sys . modules [ "setup_helpers_windows" ] = object ( ) filename = os . path . join ( _ROOT_DIR , "setup.py" ) loader = importlib . machinery . SourceFileLoader ( "setup" , filename ) setup_mod = loader . load_module ( ) return setup_mod . VERSION
Get the current version from setup . py .
54,213
def populate_readme ( version , circleci_build , appveyor_build , coveralls_build , travis_build ) : with open ( RELEASE_README_FILE , "r" ) as file_obj : template = file_obj . read ( ) contents = template . format ( version = version , circleci_build = circleci_build , appveyor_build = appveyor_build , coveralls_build = coveralls_build , travis_build = travis_build , ) with open ( README_FILE , "w" ) as file_obj : file_obj . write ( contents )
Populates README . rst with release - specific data .
54,214
def populate_native_libraries ( version ) : with open ( BINARY_EXT_TEMPLATE , "r" ) as file_obj : template = file_obj . read ( ) contents = template . format ( revision = version ) with open ( BINARY_EXT_FILE , "w" ) as file_obj : file_obj . write ( contents )
Populates binary - extension . rst with release - specific data .
54,215
def populate_development ( version ) : with open ( DEVELOPMENT_TEMPLATE , "r" ) as file_obj : template = file_obj . read ( ) contents = template . format ( revision = version , rtd_version = version ) with open ( DEVELOPMENT_FILE , "w" ) as file_obj : file_obj . write ( contents )
Populates DEVELOPMENT . rst with release - specific data .
54,216
def main ( ) : version = get_version ( ) circleci_build = six . moves . input ( "CircleCI Build ID: " ) appveyor_build = six . moves . input ( "AppVeyor Build ID: " ) coveralls_build = six . moves . input ( "Coveralls Build ID: " ) travis_build = six . moves . input ( "Travis Build ID: " ) populate_readme ( version , circleci_build , appveyor_build , coveralls_build , travis_build ) populate_index ( version , circleci_build , appveyor_build , coveralls_build , travis_build ) populate_native_libraries ( version ) populate_development ( version )
Populate the templates with release - specific fields .
54,217
def make_subdivision_matrices ( degree ) : left = np . zeros ( ( degree + 1 , degree + 1 ) , order = "F" ) right = np . zeros ( ( degree + 1 , degree + 1 ) , order = "F" ) left [ 0 , 0 ] = 1.0 right [ - 1 , - 1 ] = 1.0 for col in six . moves . xrange ( 1 , degree + 1 ) : half_prev = 0.5 * left [ : col , col - 1 ] left [ : col , col ] = half_prev left [ 1 : col + 1 , col ] += half_prev complement = degree - col right [ - ( col + 1 ) : , complement ] = left [ : col + 1 , col ] return left , right
Make the matrix used to subdivide a curve .
54,218
def _subdivide_nodes ( nodes ) : _ , num_nodes = np . shape ( nodes ) if num_nodes == 2 : left_nodes = _helpers . matrix_product ( nodes , _LINEAR_SUBDIVIDE_LEFT ) right_nodes = _helpers . matrix_product ( nodes , _LINEAR_SUBDIVIDE_RIGHT ) elif num_nodes == 3 : left_nodes = _helpers . matrix_product ( nodes , _QUADRATIC_SUBDIVIDE_LEFT ) right_nodes = _helpers . matrix_product ( nodes , _QUADRATIC_SUBDIVIDE_RIGHT ) elif num_nodes == 4 : left_nodes = _helpers . matrix_product ( nodes , _CUBIC_SUBDIVIDE_LEFT ) right_nodes = _helpers . matrix_product ( nodes , _CUBIC_SUBDIVIDE_RIGHT ) else : left_mat , right_mat = make_subdivision_matrices ( num_nodes - 1 ) left_nodes = _helpers . matrix_product ( nodes , left_mat ) right_nodes = _helpers . matrix_product ( nodes , right_mat ) return left_nodes , right_nodes
Subdivide a curve into two sub - curves .
54,219
def _evaluate_multi_barycentric ( nodes , lambda1 , lambda2 ) : r num_vals , = lambda1 . shape dimension , num_nodes = nodes . shape degree = num_nodes - 1 lambda1 = lambda1 [ np . newaxis , : ] lambda2 = lambda2 [ np . newaxis , : ] result = np . zeros ( ( dimension , num_vals ) , order = "F" ) result += lambda1 * nodes [ : , [ 0 ] ] binom_val = 1.0 lambda2_pow = np . ones ( ( 1 , num_vals ) , order = "F" ) for index in six . moves . xrange ( 1 , degree ) : lambda2_pow *= lambda2 binom_val = ( binom_val * ( degree - index + 1 ) ) / index result += binom_val * lambda2_pow * nodes [ : , [ index ] ] result *= lambda1 result += lambda2 * lambda2_pow * nodes [ : , [ degree ] ] return result
r Evaluates a B |eacute| zier type - function .
54,220
def _compute_length ( nodes ) : r _ , num_nodes = np . shape ( nodes ) first_deriv = ( num_nodes - 1 ) * ( nodes [ : , 1 : ] - nodes [ : , : - 1 ] ) if num_nodes == 2 : return np . linalg . norm ( first_deriv [ : , 0 ] , ord = 2 ) if _scipy_int is None : raise OSError ( "This function requires SciPy for quadrature." ) size_func = functools . partial ( vec_size , first_deriv ) length , _ = _scipy_int . quad ( size_func , 0.0 , 1.0 ) return length
r Approximately compute the length of a curve .
54,221
def _elevate_nodes ( nodes ) : r dimension , num_nodes = np . shape ( nodes ) new_nodes = np . empty ( ( dimension , num_nodes + 1 ) , order = "F" ) multipliers = np . arange ( 1 , num_nodes , dtype = _FLOAT64 ) [ np . newaxis , : ] denominator = float ( num_nodes ) new_nodes [ : , 1 : - 1 ] = ( multipliers * nodes [ : , : - 1 ] + ( denominator - multipliers ) * nodes [ : , 1 : ] ) new_nodes /= denominator new_nodes [ : , 0 ] = nodes [ : , 0 ] new_nodes [ : , - 1 ] = nodes [ : , - 1 ] return new_nodes
r Degree - elevate a B |eacute| zier curves .
54,222
def de_casteljau_one_round ( nodes , lambda1 , lambda2 ) : return np . asfortranarray ( lambda1 * nodes [ : , : - 1 ] + lambda2 * nodes [ : , 1 : ] )
Perform one round of de Casteljau s algorithm .
54,223
def _specialize_curve ( nodes , start , end ) : _ , num_nodes = np . shape ( nodes ) weights = ( ( 1.0 - start , start ) , ( 1.0 - end , end ) ) partial_vals = { ( 0 , ) : de_casteljau_one_round ( nodes , * weights [ 0 ] ) , ( 1 , ) : de_casteljau_one_round ( nodes , * weights [ 1 ] ) , } for _ in six . moves . xrange ( num_nodes - 2 , 0 , - 1 ) : new_partial = { } for key , sub_nodes in six . iteritems ( partial_vals ) : for next_id in six . moves . xrange ( key [ - 1 ] , 1 + 1 ) : new_key = key + ( next_id , ) new_partial [ new_key ] = de_casteljau_one_round ( sub_nodes , * weights [ next_id ] ) partial_vals = new_partial result = np . empty ( nodes . shape , order = "F" ) for index in six . moves . xrange ( num_nodes ) : key = ( 0 , ) * ( num_nodes - index - 1 ) + ( 1 , ) * index result [ : , [ index ] ] = partial_vals [ key ] return result
Specialize a curve to a re - parameterization
54,224
def _locate_point ( nodes , point ) : r candidates = [ ( 0.0 , 1.0 , nodes ) ] for _ in six . moves . xrange ( _MAX_LOCATE_SUBDIVISIONS + 1 ) : next_candidates = [ ] for start , end , candidate in candidates : if _helpers . contains_nd ( candidate , point . ravel ( order = "F" ) ) : midpoint = 0.5 * ( start + end ) left , right = subdivide_nodes ( candidate ) next_candidates . extend ( ( ( start , midpoint , left ) , ( midpoint , end , right ) ) ) candidates = next_candidates if not candidates : return None params = [ ( start , end ) for start , end , _ in candidates ] if np . std ( params ) > _LOCATE_STD_CAP : raise ValueError ( "Parameters not close enough to one another" , params ) s_approx = np . mean ( params ) s_approx = newton_refine ( nodes , point , s_approx ) if s_approx < 0.0 : return 0.0 elif s_approx > 1.0 : return 1.0 else : return s_approx
r Locate a point on a curve .
54,225
def _reduce_pseudo_inverse ( nodes ) : _ , num_nodes = np . shape ( nodes ) if num_nodes == 2 : reduction = _REDUCTION0 denom = _REDUCTION_DENOM0 elif num_nodes == 3 : reduction = _REDUCTION1 denom = _REDUCTION_DENOM1 elif num_nodes == 4 : reduction = _REDUCTION2 denom = _REDUCTION_DENOM2 elif num_nodes == 5 : reduction = _REDUCTION3 denom = _REDUCTION_DENOM3 else : raise _helpers . UnsupportedDegree ( num_nodes - 1 , supported = ( 1 , 2 , 3 , 4 ) ) result = _helpers . matrix_product ( nodes , reduction ) result /= denom return result
Performs degree - reduction for a B |eacute| zier curve .
54,226
def projection_error ( nodes , projected ) : relative_err = np . linalg . norm ( nodes - projected , ord = "fro" ) if relative_err != 0.0 : relative_err /= np . linalg . norm ( nodes , ord = "fro" ) return relative_err
Compute the error between nodes and the projected nodes .
54,227
def maybe_reduce ( nodes ) : r _ , num_nodes = nodes . shape if num_nodes < 2 : return False , nodes elif num_nodes == 2 : projection = _PROJECTION0 denom = _PROJ_DENOM0 elif num_nodes == 3 : projection = _PROJECTION1 denom = _PROJ_DENOM1 elif num_nodes == 4 : projection = _PROJECTION2 denom = _PROJ_DENOM2 elif num_nodes == 5 : projection = _PROJECTION3 denom = _PROJ_DENOM3 else : raise _helpers . UnsupportedDegree ( num_nodes - 1 , supported = ( 0 , 1 , 2 , 3 , 4 ) ) projected = _helpers . matrix_product ( nodes , projection ) / denom relative_err = projection_error ( nodes , projected ) if relative_err < _REDUCE_THRESHOLD : return True , reduce_pseudo_inverse ( nodes ) else : return False , nodes
r Reduce nodes in a curve if they are degree - elevated .
54,228
def _full_reduce ( nodes ) : was_reduced , nodes = maybe_reduce ( nodes ) while was_reduced : was_reduced , nodes = maybe_reduce ( nodes ) return nodes
Apply degree reduction to nodes until it can no longer be reduced .
54,229
def get_desired ( ) : public_members = get_public_members ( ) if public_members : members = "\n :members: {}" . format ( ", " . join ( public_members ) ) else : members = "" return DESIRED_TEMPLATE . format ( members = members )
Populate DESIRED_TEMPLATE with public members .
54,230
def main ( ) : with open ( FILENAME , "r" ) as file_obj : contents = file_obj . read ( ) desired = get_desired ( ) if contents == EXPECTED : with open ( FILENAME , "w" ) as file_obj : file_obj . write ( desired ) elif contents != desired : raise ValueError ( "Unexpected contents" , contents , "Expected" , EXPECTED )
Main entry point to replace autogenerated contents .
54,231
def run_cleanup ( build_ext_cmd ) : if not build_ext_cmd . inplace : return bezier_dir = os . path . join ( "src" , "bezier" ) shutil . move ( os . path . join ( build_ext_cmd . build_lib , LIB_DIR ) , bezier_dir ) shutil . move ( os . path . join ( build_ext_cmd . build_lib , DLL_DIR ) , bezier_dir )
Cleanup after BuildFortranThenExt . run .
54,232
def clean ( session ) : clean_dirs = ( get_path ( ".cache" ) , get_path ( ".coverage" ) , get_path ( ".pytest_cache" ) , get_path ( "__pycache__" ) , get_path ( "build" ) , get_path ( "dist" ) , get_path ( "docs" , "__pycache__" ) , get_path ( "docs" , "build" ) , get_path ( "scripts" , "macos" , "__pycache__" ) , get_path ( "scripts" , "macos" , "dist_wheels" ) , get_path ( "scripts" , "macos" , "fixed_wheels" ) , get_path ( "src" , "bezier.egg-info" ) , get_path ( "src" , "bezier" , "__pycache__" ) , get_path ( "src" , "bezier" , "extra-dll" ) , get_path ( "src" , "bezier" , "lib" ) , get_path ( "tests" , "__pycache__" ) , get_path ( "tests" , "functional" , "__pycache__" ) , get_path ( "tests" , "unit" , "__pycache__" ) , get_path ( "wheelhouse" ) , ) clean_globs = ( get_path ( ".coverage" ) , get_path ( "*.mod" ) , get_path ( "*.pyc" ) , get_path ( "src" , "bezier" , "*.pyc" ) , get_path ( "src" , "bezier" , "*.pyd" ) , get_path ( "src" , "bezier" , "*.so" ) , get_path ( "src" , "bezier" , "quadpack" , "*.o" ) , get_path ( "src" , "bezier" , "*.o" ) , get_path ( "tests" , "*.pyc" ) , get_path ( "tests" , "functional" , "*.pyc" ) , get_path ( "tests" , "unit" , "*.pyc" ) , ) for dir_path in clean_dirs : session . run ( shutil . rmtree , dir_path , ignore_errors = True ) for glob_path in clean_globs : for filename in glob . glob ( glob_path ) : session . run ( os . remove , filename )
Clean up build files .
54,233
def register ( name , fn = None ) : def _hook_add ( func ) : if name not in _hooks : logger . debug ( "Creating new hook %s" % name ) _hooks [ name ] = [ ] logger . debug ( 'Registering hook %s for function %s' % ( name , fn ) ) _hooks [ name ] . append ( func ) if fn is None : def decorator ( func ) : _hook_add ( func ) return func return decorator else : _hook_add ( fn )
Decorator to register a function as a hook
54,234
def exec_request ( endpoint , func , raise_for_status = False , ** kwargs ) : try : endpoint = '{0}/api/v1/{1}' . format ( settings . SEAT_URL , endpoint ) headers = { 'X-Token' : settings . SEAT_XTOKEN , 'Accept' : 'application/json' } logger . debug ( headers ) logger . debug ( endpoint ) ret = getattr ( requests , func ) ( endpoint , headers = headers , data = kwargs ) ret . raise_for_status ( ) return ret . json ( ) except requests . HTTPError as e : if raise_for_status : raise e logger . exception ( "Error encountered while performing API request to SeAT with url {}" . format ( endpoint ) ) return { }
Send an https api request
54,235
def add_user ( cls , username , email ) : sanitized = str ( cls . __sanitize_username ( username ) ) logger . debug ( "Adding user to SeAT with username %s" % sanitized ) password = cls . __generate_random_pass ( ) ret = cls . exec_request ( 'user' , 'post' , username = sanitized , email = str ( email ) , password = password ) logger . debug ( ret ) if cls . _response_ok ( ret ) : logger . info ( "Added SeAT user with username %s" % sanitized ) return sanitized , password logger . info ( "Failed to add SeAT user with username %s" % sanitized ) return None , None
Add user to service
54,236
def _check_email_changed ( cls , username , email ) : ret = cls . exec_request ( 'user/{}' . format ( username ) , 'get' , raise_for_status = True ) return ret [ 'email' ] != email
Compares email to one set on SeAT
54,237
def update_user ( cls , username , email , password ) : if cls . _check_email_changed ( username , email ) : logger . debug ( "Updating SeAT username %s with email %s and password" % ( username , email ) ) ret = cls . exec_request ( 'user/{}' . format ( username ) , 'put' , email = email ) logger . debug ( ret ) if not cls . _response_ok ( ret ) : logger . warn ( "Failed to update email for username {}" . format ( username ) ) ret = cls . exec_request ( 'user/{}' . format ( username ) , 'put' , password = password ) logger . debug ( ret ) if not cls . _response_ok ( ret ) : logger . warn ( "Failed to update password for username {}" . format ( username ) ) return None logger . info ( "Updated SeAT user with username %s" % username ) return username
Edit user info
54,238
def get_form_kwargs ( self ) : kwargs = super ( AddUpdateMixin , self ) . get_form_kwargs ( ) kwargs . update ( { 'user' : self . request . user } ) return kwargs
Inject the request user into the kwargs passed to the form
54,239
def create_auth_group ( sender , instance , created , ** kwargs ) : if created : AuthGroup . objects . create ( group = instance )
Creates the AuthGroup model when a group is created
54,240
def construct_command ( self , command , keys = None , opts = None ) : cstr = [ command ] if keys : for key in keys : if isinstance ( keys [ key ] , list ) : ncstr = [ ] for nest in keys [ key ] : ncstr . append ( "%s=%s" % ( key , self . _escape_str ( nest ) ) ) cstr . append ( "|" . join ( ncstr ) ) else : cstr . append ( "%s=%s" % ( key , self . _escape_str ( keys [ key ] ) ) ) if opts : for opt in opts : cstr . append ( "-%s" % opt ) return " " . join ( cstr )
Constructs a TS3 formatted command string Keys can have a single nested list to construct a nested parameter
54,241
def _escape_str ( value ) : if isinstance ( value , int ) : return "%d" % value value = value . replace ( "\\" , r'\\' ) for i , j in ts3_escape . items ( ) : value = value . replace ( i , j ) return value
Escape a value into a TS3 compatible string
54,242
def _unescape_str ( value ) : if isinstance ( value , int ) : return "%d" % value value = value . replace ( r"\\" , "\\" ) for i , j in ts3_escape . items ( ) : value = value . replace ( j , i ) return value
Unescape a TS3 compatible string into a normal string
54,243
def login ( self , username , password ) : d = self . send_command ( 'login' , keys = { 'client_login_name' : username , 'client_login_password' : password } ) if d == 0 : self . _log . info ( 'Login Successful' ) return True return False
Login to the TS3 Server
54,244
def use ( self , id ) : if self . _connected and id > 0 : self . send_command ( 'use' , keys = { 'sid' : id } )
Use a particular Virtual Server instance
54,245
def pre_save_config ( sender , instance , * args , ** kwargs ) : logger . debug ( "Received pre_save from {}" . format ( instance ) ) if not instance . pk : return try : old_instance = AutogroupsConfig . objects . get ( pk = instance . pk ) if old_instance . alliance_groups is True and instance . alliance_groups is False : instance . delete_alliance_managed_groups ( ) if old_instance . corp_groups is True and instance . corp_groups is False : instance . delete_corp_managed_groups ( ) except AutogroupsConfig . DoesNotExist : pass
Checks if enable was toggled on group config and deletes groups if necessary .
54,246
def check_groups_on_profile_update ( sender , instance , created , * args , ** kwargs ) : AutogroupsConfig . objects . update_groups_for_user ( instance . user )
Trigger check when main character or state changes .
54,247
def autogroups_states_changed ( sender , instance , action , reverse , model , pk_set , * args , ** kwargs ) : if action . startswith ( 'post_' ) : for pk in pk_set : try : state = State . objects . get ( pk = pk ) instance . update_group_membership_for_state ( state ) except State . DoesNotExist : pass
Trigger group membership update when a state is added or removed from an autogroup config .
54,248
def random_string ( string_length = 10 ) : random = str ( uuid . uuid4 ( ) ) random = random . upper ( ) random = random . replace ( "-" , "" ) return random [ 0 : string_length ]
Returns a random string of length string_length .
54,249
def getGenomeList ( ) : import rabaDB . filters as rfilt f = rfilt . RabaQuery ( Genome_Raba ) names = [ ] for g in f . iterRun ( ) : names . append ( g . name ) return names
Return the names of all imported genomes
54,250
def iterCodons ( self ) : for i in range ( len ( self . cDNA ) / 3 ) : yield self . getCodon ( i )
iterates through the codons
54,251
def removeDuplicates ( inFileName , outFileName ) : f = open ( inFileName ) legend = f . readline ( ) data = '' h = { } h [ legend ] = 0 lines = f . readlines ( ) for l in lines : if not h . has_key ( l ) : h [ l ] = 0 data += l f . flush ( ) f . close ( ) f = open ( outFileName , 'w' ) f . write ( legend + data ) f . flush ( ) f . close ( )
removes duplicated lines from a inFileName CSV file the results are witten in outFileName
54,252
def catCSVs ( folder , ouputFileName , removeDups = False ) : strCmd = r % ( folder , ouputFileName ) os . system ( strCmd ) if removeDups : removeDuplicates ( ouputFileName , ouputFileName )
Concatenates all csv in folder and wites the results in ouputFileName . My not work on non Unix systems
54,253
def joinCSVs ( csvFilePaths , column , ouputFileName , separator = ',' ) : res = '' legend = [ ] csvs = [ ] for f in csvFilePaths : c = CSVFile ( ) c . parse ( f ) csvs . append ( c ) legend . append ( separator . join ( c . legend . keys ( ) ) ) legend = separator . join ( legend ) lines = [ ] for i in range ( len ( csvs [ 0 ] ) ) : val = csvs [ 0 ] . get ( i , column ) line = separator . join ( csvs [ 0 ] [ i ] ) for c in csvs [ 1 : ] : for j in range ( len ( c ) ) : if val == c . get ( j , column ) : line += separator + separator . join ( c [ j ] ) lines . append ( line ) res = legend + '\n' + '\n' . join ( lines ) f = open ( ouputFileName , 'w' ) f . write ( res ) f . flush ( ) f . close ( ) return res
csvFilePaths should be an iterable . Joins all CSVs according to the values in the column column . Write the results in a new file ouputFileName
54,254
def addField ( self , field ) : if field . lower ( ) in self . legend : raise ValueError ( "%s is already in the legend" % field . lower ( ) ) self . legend [ field . lower ( ) ] = len ( self . legend ) if len ( self . strLegend ) > 0 : self . strLegend += self . separator + field else : self . strLegend += field
add a filed to the legend
54,255
def parse ( self , filePath , skipLines = 0 , separator = ',' , stringSeparator = '"' , lineSeparator = '\n' ) : self . filename = filePath f = open ( filePath ) if lineSeparator == '\n' : lines = f . readlines ( ) else : lines = f . read ( ) . split ( lineSeparator ) f . flush ( ) f . close ( ) lines = lines [ skipLines : ] self . lines = [ ] self . comments = [ ] for l in lines : if len ( l ) != 0 and l [ 0 ] != "#" : self . lines . append ( l ) elif l [ 0 ] == "#" : self . comments . append ( l ) self . separator = separator self . lineSeparator = lineSeparator self . stringSeparator = stringSeparator self . legend = collections . OrderedDict ( ) i = 0 for c in self . lines [ 0 ] . lower ( ) . replace ( stringSeparator , '' ) . split ( separator ) : legendElement = c . strip ( ) if legendElement not in self . legend : self . legend [ legendElement ] = i i += 1 self . strLegend = self . lines [ 0 ] . replace ( '\r' , '\n' ) . replace ( '\n' , '' ) self . lines = self . lines [ 1 : ]
Loads a CSV file
54,256
def commitLine ( self , line ) : if self . streamBuffer is None : raise ValueError ( "Commit lines is only for when you are streaming to a file" ) self . streamBuffer . append ( line ) if len ( self . streamBuffer ) % self . writeRate == 0 : for i in xrange ( len ( self . streamBuffer ) ) : self . streamBuffer [ i ] = str ( self . streamBuffer [ i ] ) self . streamFile . write ( "%s\n" % ( '\n' . join ( self . streamBuffer ) ) ) self . streamFile . flush ( ) self . streamBuffer = [ ]
Commits a line making it ready to be streamed to a file and saves the current buffer if needed . If no stream is active raises a ValueError
54,257
def closeStreamToFile ( self ) : if self . streamBuffer is None : raise ValueError ( "Commit lines is only for when you are streaming to a file" ) for i in xrange ( len ( self . streamBuffer ) ) : self . streamBuffer [ i ] = str ( self . streamBuffer [ i ] ) self . streamFile . write ( '\n' . join ( self . streamBuffer ) ) self . streamFile . close ( ) self . streamFile = None self . writeRate = None self . streamBuffer = None self . keepInMemory = True
Appends the remaining commited lines and closes the stream . If no stream is active raises a ValueError
54,258
def newLine ( self ) : l = CSVEntry ( self ) if self . keepInMemory : self . lines . append ( l ) return l
Appends an empty line at the end of the CSV and returns it
54,259
def insertLine ( self , i ) : self . data . insert ( i , CSVEntry ( self ) ) return self . lines [ i ]
Inserts an empty line at position i and returns it
54,260
def save ( self , filePath ) : self . filename = filePath f = open ( filePath , 'w' ) f . write ( self . toStr ( ) ) f . flush ( ) f . close ( )
save the CSV to a file
54,261
def toStr ( self ) : s = [ self . strLegend ] for l in self . lines : s . append ( l . toStr ( ) ) return self . lineSeparator . join ( s )
returns a string version of the CSV
54,262
def count ( self , objectType , * args , ** coolArgs ) : return self . _makeLoadQuery ( objectType , * args , ** coolArgs ) . count ( )
Returns the number of elements satisfying the query
54,263
def iterGet ( self , objectType , * args , ** coolArgs ) : for e in self . _makeLoadQuery ( objectType , * args , ** coolArgs ) . iterRun ( ) : if issubclass ( objectType , pyGenoRabaObjectWrapper ) : yield objectType ( wrapped_object_and_bag = ( e , self . bagKey ) ) else : yield e
Same as get . But retuns the elements one by one much more efficient for large outputs
54,264
def deleteSNPs ( setName ) : con = conf . db try : SMaster = SNPMaster ( setName = setName ) con . beginTransaction ( ) SNPType = SMaster . SNPType con . delete ( SNPType , 'setName = ?' , ( setName , ) ) SMaster . delete ( ) con . endTransaction ( ) except KeyError : raise KeyError ( "Can't delete the setName %s because i can't find it in SNPMaster, maybe there's not set by that name" % setName ) return False return True
deletes a set of polymorphisms
54,265
def getSNPSetsList ( ) : import rabaDB . filters as rfilt f = rfilt . RabaQuery ( SNPMaster ) names = [ ] for g in f . iterRun ( ) : names . append ( g . setName ) return names
Return the names of all imported snp sets
54,266
def parseFile ( self , fil ) : f = open ( fil ) self . parseStr ( f . read ( ) ) f . close ( )
Opens a file and parses it
54,267
def add ( self , header , data ) : if header [ 0 ] != '>' : self . data . append ( ( '>' + header , data ) ) else : self . data . append ( ( header , data ) )
appends a new entry to the file
54,268
def findAll ( haystack , needle ) : h = haystack res = [ ] f = haystack . find ( needle ) offset = 0 while ( f >= 0 ) : res . append ( f + offset ) offset += f + len ( needle ) h = h [ f + len ( needle ) : ] f = h . find ( needle ) return res
returns a list of all occurances of needle in haystack
54,269
def complementTab ( seq = [ ] ) : complement = { 'A' : 'T' , 'C' : 'G' , 'G' : 'C' , 'T' : 'A' , 'R' : 'Y' , 'Y' : 'R' , 'M' : 'K' , 'K' : 'M' , 'W' : 'W' , 'S' : 'S' , 'B' : 'V' , 'D' : 'H' , 'H' : 'D' , 'V' : 'B' , 'N' : 'N' , 'a' : 't' , 'c' : 'g' , 'g' : 'c' , 't' : 'a' , 'r' : 'y' , 'y' : 'r' , 'm' : 'k' , 'k' : 'm' , 'w' : 'w' , 's' : 's' , 'b' : 'v' , 'd' : 'h' , 'h' : 'd' , 'v' : 'b' , 'n' : 'n' } seq_tmp = [ ] for bps in seq : if len ( bps ) == 0 : seq_tmp . append ( '' ) elif len ( bps ) == 1 : seq_tmp . append ( complement [ bps ] ) else : seq_tmp . append ( reverseComplement ( bps ) ) return seq_tmp
returns a list of complementary sequence without inversing it
54,270
def translateDNA_6Frames ( sequence ) : trans = ( translateDNA ( sequence , 'f1' ) , translateDNA ( sequence , 'f2' ) , translateDNA ( sequence , 'f3' ) , translateDNA ( sequence , 'r1' ) , translateDNA ( sequence , 'r2' ) , translateDNA ( sequence , 'r3' ) , ) return trans
returns 6 translation of sequence . One for each reading frame
54,271
def getSequenceCombinaisons ( polymorphipolymorphicDnaSeqSeq , pos = 0 ) : if type ( polymorphipolymorphicDnaSeqSeq ) is not types . ListType : seq = list ( polymorphipolymorphicDnaSeqSeq ) else : seq = polymorphipolymorphicDnaSeqSeq if pos >= len ( seq ) : return [ '' . join ( seq ) ] variants = [ ] if seq [ pos ] in polymorphicNucleotides : chars = decodePolymorphicNucleotide ( seq [ pos ] ) else : chars = seq [ pos ] for c in chars : rseq = copy . copy ( seq ) rseq [ pos ] = c variants . extend ( getSequenceCombinaisons ( rseq , pos + 1 ) ) return variants
Takes a dna sequence with polymorphismes and returns all the possible sequences that it can yield
54,272
def getNucleotideCodon ( sequence , x1 ) : if x1 < 0 or x1 >= len ( sequence ) : return None p = x1 % 3 if p == 0 : return ( sequence [ x1 : x1 + 3 ] , 0 ) elif p == 1 : return ( sequence [ x1 - 1 : x1 + 2 ] , 1 ) elif p == 2 : return ( sequence [ x1 - 2 : x1 + 1 ] , 2 )
Returns the entire codon of the nucleotide at pos x1 in sequence and the position of that nocleotide in the codon in a tuple
54,273
def highlightSubsequence ( sequence , x1 , x2 , start = ' [' , stop = '] ' ) : seq = list ( sequence ) print x1 , x2 - 1 , len ( seq ) seq [ x1 ] = start + seq [ x1 ] seq [ x2 - 1 ] = seq [ x2 - 1 ] + stop return '' . join ( seq )
returns a sequence where the subsequence in [ x1 x2 [ is placed in bewteen start and stop
54,274
def deleteGenome ( species , name ) : printf ( 'deleting genome (%s, %s)...' % ( species , name ) ) conf . db . beginTransaction ( ) objs = [ ] allGood = True try : genome = Genome_Raba ( name = name , species = species . lower ( ) ) objs . append ( genome ) pBar = ProgressBar ( label = 'preparing' ) for typ in ( Chromosome_Raba , Gene_Raba , Transcript_Raba , Exon_Raba , Protein_Raba ) : pBar . update ( ) f = RabaQuery ( typ , namespace = genome . _raba_namespace ) f . addFilter ( { 'genome' : genome } ) for e in f . iterRun ( ) : objs . append ( e ) pBar . close ( ) pBar = ProgressBar ( nbEpochs = len ( objs ) , label = 'deleting objects' ) for e in objs : pBar . update ( ) e . delete ( ) pBar . close ( ) except KeyError as e : raise KeyError ( "\tWARNING, couldn't remove genome form db, maybe it's not there: " , e ) allGood = False printf ( '\tdeleting folder' ) try : shutil . rmtree ( conf . getGenomeSequencePath ( species , name ) ) except OSError as e : OSError ( '\tWARNING, Unable to delete folder: ' , e ) allGood = False conf . db . endTransaction ( ) return allGood
Removes a genome from the database
54,275
def _importSequence ( chromosome , fastaFile , targetDir ) : "Serializes fastas into .dat files" f = gzip . open ( fastaFile ) header = f . readline ( ) strRes = f . read ( ) . upper ( ) . replace ( '\n' , '' ) . replace ( '\r' , '' ) f . close ( ) fn = '%s/chromosome%s.dat' % ( targetDir , chromosome . number ) f = open ( fn , 'w' ) f . write ( strRes ) f . close ( ) chromosome . dataFile = fn chromosome . header = header return len ( strRes )
Serializes fastas into . dat files
54,276
def createDefaultConfigFile ( ) : s = "[pyGeno_config]\nsettings_dir=%s\nremote_location=%s" % ( pyGeno_SETTINGS_DIR , pyGeno_REMOTE_LOCATION ) f = open ( '%s/config.ini' % pyGeno_SETTINGS_DIR , 'w' ) f . write ( s ) f . close ( )
Creates a default configuration file
54,277
def getSettingsPath ( ) : parser = SafeConfigParser ( ) try : parser . read ( os . path . normpath ( pyGeno_SETTINGS_DIR + '/config.ini' ) ) return parser . get ( 'pyGeno_config' , 'settings_dir' ) except : createDefaultConfigFile ( ) return getSettingsPath ( )
Returns the path where the settings are stored
54,278
def pyGeno_init ( ) : global db , dbConf global pyGeno_SETTINGS_PATH global pyGeno_RABA_DBFILE global pyGeno_DATA_PATH if not checkPythonVersion ( ) : raise PythonVersionError ( "==> FATAL: pyGeno only works with python 2.7 and above, please upgrade your python version" ) if not os . path . exists ( pyGeno_SETTINGS_DIR ) : os . makedirs ( pyGeno_SETTINGS_DIR ) pyGeno_SETTINGS_PATH = getSettingsPath ( ) pyGeno_RABA_DBFILE = os . path . normpath ( os . path . join ( pyGeno_SETTINGS_PATH , "pyGenoRaba.db" ) ) pyGeno_DATA_PATH = os . path . normpath ( os . path . join ( pyGeno_SETTINGS_PATH , "data" ) ) if not os . path . exists ( pyGeno_SETTINGS_PATH ) : os . makedirs ( pyGeno_SETTINGS_PATH ) if not os . path . exists ( pyGeno_DATA_PATH ) : os . makedirs ( pyGeno_DATA_PATH ) rabaDB . rabaSetup . RabaConfiguration ( pyGeno_RABA_NAMESPACE , pyGeno_RABA_DBFILE ) db = rabaDB . rabaSetup . RabaConnection ( pyGeno_RABA_NAMESPACE ) dbConf = rabaDB . rabaSetup . RabaConfiguration ( pyGeno_RABA_NAMESPACE )
This function is automatically called at launch
54,279
def previousExon ( self ) : if self . number == 0 : return None try : return self . transcript . exons [ self . number - 1 ] except IndexError : return None
Returns the previous exon of the transcript or None if there is none
54,280
def parseStr ( self , st ) : self . data = st . replace ( '\r' , '\n' ) self . data = self . data . replace ( '\n\n' , '\n' ) self . data = self . data . split ( '\n' )
Parses a string
54,281
def get ( self , li ) : i = li * 4 self . __splitEntry ( i ) return self . data [ i ]
returns the ith entry
54,282
def newEntry ( self , ident = "" , seq = "" , plus = "" , qual = "" ) : e = FastqEntry ( ) self . data . append ( e ) return e
Appends an empty entry at the end of the CSV and returns it
54,283
def listRemoteDatawraps ( location = conf . pyGeno_REMOTE_LOCATION ) : loc = location + "/datawraps.json" response = urllib2 . urlopen ( loc ) js = json . loads ( response . read ( ) ) return js
Lists all the datawraps availabe from a remote a remote location .
54,284
def listDatawraps ( ) : l = { "Genomes" : [ ] , "SNPs" : [ ] } for f in os . listdir ( os . path . join ( this_dir , "bootstrap_data/genomes" ) ) : if f . find ( ".tar.gz" ) > - 1 : l [ "Genomes" ] . append ( f ) for f in os . listdir ( os . path . join ( this_dir , "bootstrap_data/SNPs" ) ) : if f . find ( ".tar.gz" ) > - 1 : l [ "SNPs" ] . append ( f ) return l
Lists all the datawraps pyGeno comes with
54,285
def printDatawraps ( ) : l = listDatawraps ( ) printf ( "Available datawraps for boostraping\n" ) for k , v in l . iteritems ( ) : printf ( k ) printf ( "~" * len ( k ) + "|" ) for vv in v : printf ( " " * len ( k ) + "|" + "~~~:> " + vv ) printf ( '\n' )
print all available datawraps for bootstraping
54,286
def importGenome ( name , batchSize = 100 ) : path = os . path . join ( this_dir , "bootstrap_data" , "genomes/" + name ) PG . importGenome ( path , batchSize )
Import a genome shipped with pyGeno . Most of the datawraps only contain URLs towards data provided by third parties .
54,287
def importSNPs ( name ) : path = os . path . join ( this_dir , "bootstrap_data" , "SNPs/" + name ) PS . importSNPs ( path )
Import a SNP set shipped with pyGeno . Most of the datawraps only contain URLs towards data provided by third parties .
54,288
def log ( self ) : self . logs [ 'epochDuration' ] . append ( self . lastEpochDuration ) self . logs [ 'avg' ] . append ( self . avg ) self . logs [ 'runtime' ] . append ( self . runtime ) self . logs [ 'remtime' ] . append ( self . remtime )
logs stats about the progression without printing anything on screen
54,289
def saveLogs ( self , filename ) : f = open ( filename , 'wb' ) cPickle . dump ( self . logs , f ) f . close ( )
dumps logs into a nice pickle
54,290
def filter ( self , chromosome , ** kwargs ) : def appendAllele ( alleles , sources , snp ) : pos = snp . start if snp . alt [ 0 ] == '-' : pass elif snp . ref [ 0 ] == '-' : pass else : sources [ snpSet ] = snp alleles . append ( snp . alt ) refAllele = chromosome . refSequence [ pos ] alleles . append ( refAllele ) sources [ 'ref' ] = refAllele return alleles , sources warn = 'Warning: the default snp filter ignores indels. IGNORED %s of SNP set: %s at pos: %s of chromosome: %s' sources = { } alleles = [ ] for snpSet , data in kwargs . iteritems ( ) : if type ( data ) is list : for snp in data : alleles , sources = appendAllele ( alleles , sources , snp ) else : allels , sources = appendAllele ( alleles , sources , data ) return SequenceSNP ( alleles , sources = sources )
The default filter mixes applied all SNPs and ignores Insertions and Deletions .
54,291
def insert ( self , x1 , x2 , name = '' , referedObject = [ ] ) : if x1 > x2 : xx1 , xx2 = x2 , x1 else : xx1 , xx2 = x1 , x2 rt = None insertId = None childrenToRemove = [ ] for i in range ( len ( self . children ) ) : if self . children [ i ] . x1 == xx1 and xx2 == self . children [ i ] . x2 : self . children [ i ] . name = self . children [ i ] . name + ' U ' + name self . children [ i ] . referedObject . append ( referedObject ) return self . children [ i ] if self . children [ i ] . x1 <= xx1 and xx2 <= self . children [ i ] . x2 : return self . children [ i ] . insert ( x1 , x2 , name , referedObject ) elif xx1 <= self . children [ i ] . x1 and self . children [ i ] . x2 <= xx2 : if rt == None : if type ( referedObject ) is types . ListType : rt = SegmentTree ( xx1 , xx2 , name , referedObject , self , self . level + 1 ) else : rt = SegmentTree ( xx1 , xx2 , name , [ referedObject ] , self , self . level + 1 ) insertId = i rt . __addChild ( self . children [ i ] ) self . children [ i ] . father = rt childrenToRemove . append ( self . children [ i ] ) elif xx1 <= self . children [ i ] . x1 and xx2 <= self . children [ i ] . x2 : insertId = i break if rt != None : self . __addChild ( rt , insertId ) for c in childrenToRemove : self . children . remove ( c ) else : if type ( referedObject ) is types . ListType : rt = SegmentTree ( xx1 , xx2 , name , referedObject , self , self . level + 1 ) else : rt = SegmentTree ( xx1 , xx2 , name , [ referedObject ] , self , self . level + 1 ) if insertId != None : self . __addChild ( rt , insertId ) else : self . __addChild ( rt ) return rt
Insert the segment in it s right place and returns it . If there s already a segment S as S . x1 == x1 and S . x2 == x2 . S . name will be changed to S . name U name and the referedObject will be appended to the already existing list
54,292
def removeGaps ( self ) : for i in range ( 1 , len ( self . children ) ) : if self . children [ i ] . x1 > self . children [ i - 1 ] . x2 : aux_moveTree ( self . children [ i - 1 ] . x2 - self . children [ i ] . x1 , self . children [ i ] )
Remove all gaps between regions
54,293
def getIndexedLength ( self ) : if self . x1 != None and self . x2 != None : return self . x2 - self . x1 else : if len ( self . children ) == 0 : return 0 else : l = self . children [ 0 ] . x2 - self . children [ 0 ] . x1 for i in range ( 1 , len ( self . children ) ) : l += self . children [ i ] . x2 - self . children [ i ] . x1 - max ( 0 , self . children [ i - 1 ] . x2 - self . children [ i ] . x1 ) return l
Returns the total length of indexed regions
54,294
def flatten ( self ) : if len ( self . children ) > 1 : children = self . children self . emptyChildren ( ) children [ 0 ] . emptyChildren ( ) x1 = children [ 0 ] . x1 x2 = children [ 0 ] . x2 refObjs = [ children [ 0 ] . referedObject ] name = children [ 0 ] . name for i in range ( 1 , len ( children ) ) : children [ i ] . emptyChildren ( ) if children [ i - 1 ] >= children [ i ] : x2 = children [ i ] . x2 refObjs . append ( children [ i ] . referedObject ) name += " U " + children [ i ] . name else : if len ( refObjs ) == 1 : refObjs = refObjs [ 0 ] self . insert ( x1 , x2 , name , refObjs ) x1 = children [ i ] . x1 x2 = children [ i ] . x2 refObjs = [ children [ i ] . referedObject ] name = children [ i ] . name if len ( refObjs ) == 1 : refObjs = refObjs [ 0 ] self . insert ( x1 , x2 , name , refObjs )
Flattens the tree . The tree become a tree of depth 1 where overlapping regions have been merged together
54,295
def move ( self , newX1 ) : if self . x1 != None and self . x2 != None : offset = newX1 - self . x1 aux_moveTree ( offset , self ) elif len ( self . children ) > 0 : offset = newX1 - self . children [ 0 ] . x1 aux_moveTree ( offset , self )
Moves tree to a new starting position updates x1s of children
54,296
def __getSequenceVariants ( self , x1 , polyStart , polyStop , listSequence ) : if polyStart < len ( self . polymorphisms ) and polyStart < polyStop : sequence = copy . copy ( listSequence ) ret = [ ] pk = self . polymorphisms [ polyStart ] posInSequence = pk [ 0 ] - x1 if posInSequence < len ( listSequence ) : for allele in pk [ 1 ] : sequence [ posInSequence ] = allele ret . extend ( self . __getSequenceVariants ( x1 , polyStart + 1 , polyStop , sequence ) ) return ret else : return [ '' . join ( listSequence ) ]
polyStop is the polymorphisme at wixh number where the calcul of combinaisons stops
54,297
def getNbVariants ( self , x1 , x2 = - 1 ) : if x2 == - 1 : xx2 = len ( self . defaultSequence ) else : xx2 = x2 nbP = 1 for p in self . polymorphisms : if x1 <= p [ 0 ] and p [ 0 ] <= xx2 : nbP *= len ( p [ 1 ] ) return nbP
returns the nb of variants of sequences between x1 and x2
54,298
def _kmp_construct_next ( self , pattern ) : next = [ [ 0 for state in pattern ] for input_token in self . ALPHABETA_KMP ] next [ pattern [ 0 ] ] [ 0 ] = 1 restart_state = 0 for state in range ( 1 , len ( pattern ) ) : for input_token in self . ALPHABETA_KMP : next [ input_token ] [ state ] = next [ input_token ] [ restart_state ] next [ pattern [ state ] ] [ state ] = state + 1 restart_state = next [ pattern [ state ] ] [ restart_state ] return next
the helper function for KMP - string - searching is to construct the DFA . pattern should be an integer array . return a 2D array representing the DFA for moving the pattern .
54,299
def _kmp_search_first ( self , pInput_sequence , pPattern ) : input_sequence , pattern = pInput_sequence , [ len ( bin ( e ) ) for e in pPattern ] n , m = len ( input_sequence ) , len ( pattern ) d = p = 0 next = self . _kmp_construct_next ( pattern ) while d < n and p < m : p = next [ len ( bin ( input_sequence [ d ] ) ) ] [ p ] d += 1 if p == m : return d - p else : return - 1
use KMP algorithm to search the first occurrence in the input_sequence of the pattern . both arguments are integer arrays . return the position of the occurence if found ; otherwise - 1 .