idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
60,900
def semilogy ( self , p , T , * args , ** kwargs ) : r no_plot = SkewTAxes . plot SkewTAxes . plot = Axes . plot Axes . semilogy ( self , T , p , * args , ** kwargs ) SkewTAxes . plot = no_plot self . yaxis . set_major_formatter ( ScalarFormatter ( ) ) self . yaxis . set_major_locator ( MultipleLocator ( 100 ) ) labels = self . xaxis . get_ticklabels ( ) for label in labels : label . set_rotation ( 45 ) label . set_horizontalalignment ( 'right' ) label . set_fontsize ( 8 ) label . set_color ( '#B31515' ) self . grid ( True ) self . grid ( axis = 'top' , color = '#B31515' , linestyle = '-' , linewidth = 1 , alpha = 0.5 , zorder = 1.1 ) self . grid ( axis = 'x' , color = '#B31515' , linestyle = '-' , linewidth = 1 , alpha = 0.5 , zorder = 1.1 ) self . grid ( axis = 'y' , color = 'k' , linestyle = '-' , linewidth = 0.5 , alpha = 0.5 , zorder = 1.1 ) self . set_xlabel ( r'Temperature ($^{\circ} C$)' , color = '#B31515' ) self . set_ylabel ( 'Pressure ($hPa$)' ) if len ( self . _mixing_lines ) == 0 : self . plot_mixing_lines ( ) if len ( self . _dry_adiabats ) == 0 : self . plot_dry_adiabats ( ) if len ( self . _moist_adiabats ) == 0 : self . plot_moist_adiabats ( )
r Plot data .
60,901
def plot_barbs ( self , p , u , v , xloc = 1.0 , x_clip_radius = 0.08 , y_clip_radius = 0.08 , ** kwargs ) : r x = np . empty_like ( p ) x . fill ( xloc ) b = self . barbs ( x , p , u , v , transform = self . get_yaxis_transform ( which = 'tick2' ) , clip_on = True , ** kwargs ) ax_bbox = transforms . Bbox ( [ [ xloc - x_clip_radius , - y_clip_radius ] , [ xloc + x_clip_radius , 1.0 + y_clip_radius ] ] ) b . set_clip_box ( transforms . TransformedBbox ( ax_bbox , self . transAxes ) )
r Plot wind barbs .
60,902
def plot_dry_adiabats ( self , p = None , theta = None , ** kwargs ) : r for artist in self . _dry_adiabats : artist . remove ( ) self . _dry_adiabats = [ ] if theta is None : xmin , xmax = self . get_xlim ( ) theta = np . arange ( xmin , xmax + 201 , 10 ) if p is None : p = np . linspace ( * self . get_ylim ( ) ) t = calculate ( 'T' , theta = theta [ : , None ] , p = p , p_units = 'hPa' , T_units = 'degC' , theta_units = 'degC' ) linedata = [ np . vstack ( ( ti , p ) ) . T for ti in t ] kwargs . setdefault ( 'clip_on' , True ) kwargs . setdefault ( 'colors' , '#A65300' ) kwargs . setdefault ( 'linestyles' , '-' ) kwargs . setdefault ( 'alpha' , 1 ) kwargs . setdefault ( 'linewidth' , 0.5 ) kwargs . setdefault ( 'zorder' , 1.1 ) collection = LineCollection ( linedata , ** kwargs ) self . _dry_adiabats . append ( collection ) self . add_collection ( collection ) theta = theta . flatten ( ) T_label = calculate ( 'T' , p = 140 , p_units = 'hPa' , theta = theta , T_units = 'degC' , theta_units = 'degC' ) for i in range ( len ( theta ) ) : text = self . text ( T_label [ i ] , 140 , '{:.0f}' . format ( theta [ i ] ) , fontsize = 8 , ha = 'left' , va = 'center' , rotation = - 60 , color = '#A65300' , bbox = { 'facecolor' : 'w' , 'edgecolor' : 'w' , 'alpha' : 0 , } , zorder = 1.2 ) text . set_clip_on ( True ) self . _dry_adiabats . append ( text )
r Plot dry adiabats .
60,903
def plot_mixing_lines ( self , p = None , rv = None , ** kwargs ) : r for artist in self . _mixing_lines : artist . remove ( ) self . _mixing_lines = [ ] if rv is None : rv = np . array ( [ 0.1e-3 , 0.2e-3 , 0.5e-3 , 1e-3 , 1.5e-3 , 2e-3 , 3e-3 , 4e-3 , 6e-3 , 8e-3 , 10e-3 , 12e-3 , 15e-3 , 20e-3 , 30e-3 , 40e-3 , 50e-3 ] ) . reshape ( - 1 , 1 ) else : rv = np . asarray ( rv ) . reshape ( - 1 , 1 ) if p is None : p = np . linspace ( min ( self . get_ylim ( ) ) , max ( self . get_ylim ( ) ) ) else : p = np . asarray ( p ) Td = calculate ( 'Td' , p = p , rv = rv , p_units = 'hPa' , rv_units = 'kg/kg' , Td_units = 'degC' ) Td_label = calculate ( 'Td' , p = 550 , p_units = 'hPa' , rv = rv , Td_units = 'degC' ) linedata = [ np . vstack ( ( t , p ) ) . T for t in Td ] kwargs . setdefault ( 'clip_on' , True ) kwargs . setdefault ( 'colors' , '#166916' ) kwargs . setdefault ( 'linestyles' , '--' ) kwargs . setdefault ( 'alpha' , 1 ) kwargs . setdefault ( 'linewidth' , 0.5 ) kwargs . setdefault ( 'zorder' , 1.1 ) collection = LineCollection ( linedata , ** kwargs ) self . _mixing_lines . append ( collection ) self . add_collection ( collection ) rv = rv . flatten ( ) * 1000 for i in range ( len ( rv ) ) : if rv [ i ] < 1 : format_string = '{:.1f}' else : format_string = '{:.0f}' t = self . text ( Td_label [ i ] , 550 , format_string . format ( rv [ i ] ) , fontsize = 8 , ha = 'right' , va = 'center' , rotation = 60 , color = '#166916' , bbox = { 'facecolor' : 'w' , 'edgecolor' : 'w' , 'alpha' : 0 , } , zorder = 1.2 ) t . set_clip_on ( True ) self . _mixing_lines . append ( t )
r Plot lines of constant mixing ratio .
60,904
def get_calculatable_quantities ( inputs , methods ) : output_quantities = [ ] updated = True while updated : updated = False for output in methods . keys ( ) : if output in output_quantities or output in inputs : continue for args , func in methods [ output ] . items ( ) : if all ( [ arg in inputs or arg in output_quantities for arg in args ] ) : output_quantities . append ( output ) updated = True break return tuple ( output_quantities ) + tuple ( inputs )
Given an interable of input quantity names and a methods dictionary returns a list of output quantities that can be calculated .
60,905
def _get_methods_that_calculate_outputs ( inputs , outputs , methods ) : intermediates = get_calculatable_quantities ( inputs , methods ) return_methods = { } outputs = list ( outputs ) keep_going = True while keep_going : keep_going = False for output in outputs : try : output_dict = return_methods [ output ] except : output_dict = { } for args , func in methods [ output ] . items ( ) : if args not in output_dict . keys ( ) : needed = [ ] for arg in args : if arg in inputs : pass elif arg in outputs : pass elif arg in intermediates : if arg not in outputs : needed . append ( arg ) else : break else : output_dict [ args ] = func if len ( needed ) > 0 : outputs . extend ( needed ) keep_going = True if len ( output_dict ) > 0 : return_methods [ output ] = output_dict return return_methods
Given iterables of input variable names output variable names and a methods dictionary returns the subset of the methods dictionary that can be calculated doesn t calculate something we already have and only contains equations that might help calculate the outputs from the inputs .
60,906
def _get_calculatable_methods_dict ( inputs , methods ) : calculatable_methods = { } for var in methods . keys ( ) : if var in inputs : continue else : var_dict = { } for args , func in methods [ var ] . items ( ) : if all ( [ arg in inputs for arg in args ] ) : var_dict [ args ] = func if len ( var_dict ) == 0 : continue elif len ( var_dict ) == 1 : calculatable_methods [ var ] = var_dict else : min_args = min ( var_dict . keys ( ) , key = lambda x : len ( x ) ) calculatable_methods [ var ] = { min_args : var_dict [ min_args ] } return calculatable_methods
Given an iterable of input variable names and a methods dictionary returns the subset of that methods dictionary that can be calculated and which doesn t calculate something we already have . Additionally it may only contain one method for any given output variable which is the one with the fewest possible arguments .
60,907
def _get_module_methods ( module ) : methods = [ ] funcs = [ ] for item in inspect . getmembers ( equations ) : if ( item [ 0 ] [ 0 ] != '_' and '_from_' in item [ 0 ] ) : func = item [ 1 ] output = item [ 0 ] [ : item [ 0 ] . find ( '_from_' ) ] if func in funcs : continue else : funcs . append ( func ) args = tuple ( getfullargspec ( func ) . args ) try : assumptions = tuple ( func . assumptions ) except AttributeError : raise NotImplementedError ( 'function {0} in equations module has no' ' assumption ' 'definition' . format ( func . __name__ ) ) try : overridden_by_assumptions = func . overridden_by_assumptions except AttributeError : overridden_by_assumptions = ( ) methods . append ( { 'func' : func , 'args' : args , 'output' : output , 'assumptions' : assumptions , 'overridden_by_assumptions' : overridden_by_assumptions , } ) return methods
Returns a methods list corresponding to the equations in the given module . Each entry is a dictionary with keys output args and func corresponding to the output arguments and function of the method . The entries may optionally include assumptions and overridden_by_assumptions as keys stating which assumptions are required to use the method and which assumptions mean the method should not be used because it is overridden .
60,908
def _check_scalar ( value ) : if isinstance ( value , np . ndarray ) : if value . ndim == 0 : return value [ None ] [ 0 ] return value
If value is a 0 - dimensional array returns the contents of value . Otherwise returns value .
60,909
def calculate ( * args , ** kwargs ) : if len ( args ) == 0 : raise ValueError ( 'must specify quantities to calculate' ) solver = FluidSolver ( ** kwargs ) return solver . calculate ( * args )
Calculates and returns a requested quantity from quantities passed in as keyword arguments .
60,910
def ip ( ) : ok , err = _hack_ip ( ) if not ok : click . secho ( click . style ( err , fg = 'red' ) ) sys . exit ( 1 ) click . secho ( click . style ( err , fg = 'green' ) )
Show ip address .
60,911
def wp ( ssid ) : if not ssid : ok , err = _detect_wifi_ssid ( ) if not ok : click . secho ( click . style ( err , fg = 'red' ) ) sys . exit ( 1 ) ssid = err ok , err = _hack_wifi_password ( ssid ) if not ok : click . secho ( click . style ( err , fg = 'red' ) ) sys . exit ( 1 ) click . secho ( click . style ( '{ssid}:{password}' . format ( ssid = ssid , password = err ) , fg = 'green' ) )
Show wifi password .
60,912
def build_url ( self ) : url = '{protocol}/{url}/{rest}/{version}/{restapi}/{rscpath}/' '{query}' . format ( protocol = self . schema . protocol , url = self . schema . main_url , rest = self . schema . rest , version = self . schema . version , restapi = self . schema . restApi , rscpath = self . schema . resourcePath , query = self . schema . query ) return url . replace ( '/None/' , '/' )
Builds the URL for elevations API services based on the data given by the user .
60,913
def zoomlevel ( self ) : resources = self . get_resource ( ) zoomlevel = namedtuple ( 'zoomlevel' , 'zoomLevel' ) try : return [ zoomlevel ( resource [ 'zoomLevel' ] ) for resource in resources ] except TypeError : try : if isinstance ( resources [ 'ElevationData' ] , dict ) : return zoomlevel ( resources [ 'ElevationData' ] [ 'ZoomLevel' ] ) except KeyError : try : if isinstance ( resources [ 'SeaLevelData' ] , dict ) : zoom = resources [ 'SeaLevelData' ] [ 'ZoomLevel' ] return zoomlevel ( zoom ) except KeyError : print ( KeyError )
Retrieves zoomlevel from the output response
60,914
def to_json_file ( self , path , file_name = None ) : if bool ( path ) and os . path . isdir ( path ) : self . write_to_json ( path , file_name ) else : self . write_to_json ( os . getcwd ( ) , file_name )
Writes output to a JSON file with the given file name
60,915
def get_data ( self ) : url = self . build_url ( ) self . locationApiData = requests . get ( url ) if not self . locationApiData . status_code == 200 : raise self . locationApiData . raise_for_status ( )
Gets data from the built url
60,916
def update_compaction ( model ) : logger . debug ( "Checking %s for compaction differences" , model ) table = get_table_settings ( model ) existing_options = table . options . copy ( ) existing_compaction_strategy = existing_options [ 'compaction_strategy_class' ] existing_options = json . loads ( existing_options [ 'compaction_strategy_options' ] ) desired_options = get_compaction_options ( model ) desired_compact_strategy = desired_options . get ( 'class' , SizeTieredCompactionStrategy ) desired_options . pop ( 'class' , None ) do_update = False if desired_compact_strategy not in existing_compaction_strategy : do_update = True for k , v in desired_options . items ( ) : val = existing_options . pop ( k , None ) if val != v : do_update = True if do_update : options = get_compaction_options ( model ) options = json . dumps ( options ) . replace ( '"' , "'" ) cf_name = model . column_family_name ( ) query = "ALTER TABLE {} with compaction = {}" . format ( cf_name , options ) logger . debug ( query ) execute ( query ) return True return False
Updates the compaction options for the given model if necessary .
60,917
def setup ( hosts , default_keyspace , consistency = ConsistencyLevel . ONE , lazy_connect = False , retry_connect = False , ** kwargs ) : global cluster , session , default_consistency_level , lazy_connect_args if 'username' in kwargs or 'password' in kwargs : raise CQLEngineException ( "Username & Password are now handled by using the native driver's auth_provider" ) if not default_keyspace : raise UndefinedKeyspaceException ( ) from cqlengine import models models . DEFAULT_KEYSPACE = default_keyspace default_consistency_level = consistency if lazy_connect : kwargs [ 'default_keyspace' ] = default_keyspace kwargs [ 'consistency' ] = consistency kwargs [ 'lazy_connect' ] = False kwargs [ 'retry_connect' ] = retry_connect lazy_connect_args = ( hosts , kwargs ) return cluster = Cluster ( hosts , ** kwargs ) try : session = cluster . connect ( ) except NoHostAvailable : if retry_connect : kwargs [ 'default_keyspace' ] = default_keyspace kwargs [ 'consistency' ] = consistency kwargs [ 'lazy_connect' ] = False kwargs [ 'retry_connect' ] = retry_connect lazy_connect_args = ( hosts , kwargs ) raise session . row_factory = dict_factory
Records the hosts and connects to one of them
60,918
def validate ( self , value ) : if value is None : if self . required : raise ValidationError ( '{} - None values are not allowed' . format ( self . column_name or self . db_field ) ) return value
Returns a cleaned and validated value . Raises a ValidationError if there s a problem
60,919
def from_datetime ( self , dt ) : global _last_timestamp epoch = datetime ( 1970 , 1 , 1 , tzinfo = dt . tzinfo ) offset = epoch . tzinfo . utcoffset ( epoch ) . total_seconds ( ) if epoch . tzinfo else 0 timestamp = ( dt - epoch ) . total_seconds ( ) - offset node = None clock_seq = None nanoseconds = int ( timestamp * 1e9 ) timestamp = int ( nanoseconds // 100 ) + 0x01b21dd213814000 if clock_seq is None : import random clock_seq = random . randrange ( 1 << 14 ) time_low = timestamp & 0xffffffff time_mid = ( timestamp >> 32 ) & 0xffff time_hi_version = ( timestamp >> 48 ) & 0x0fff clock_seq_low = clock_seq & 0xff clock_seq_hi_variant = ( clock_seq >> 8 ) & 0x3f if node is None : node = getnode ( ) return pyUUID ( fields = ( time_low , time_mid , time_hi_version , clock_seq_hi_variant , clock_seq_low , node ) , version = 1 )
generates a UUID for a given datetime
60,920
def _can_update ( self ) : if not self . _is_persisted : return False pks = self . _primary_keys . keys ( ) return all ( [ not self . _values [ k ] . changed for k in self . _primary_keys ] )
Called by the save function to check if this should be persisted with update or insert
60,921
def delete ( self ) : self . __dmlquery__ ( self . __class__ , self , batch = self . _batch , timestamp = self . _timestamp , consistency = self . __consistency__ , timeout = self . _timeout ) . delete ( )
Deletes this instance
60,922
def filter ( self , * args , ** kwargs ) : if len ( [ x for x in kwargs . values ( ) if x is None ] ) : raise CQLEngineException ( "None values on filter are not allowed" ) clone = copy . deepcopy ( self ) for operator in args : if not isinstance ( operator , WhereClause ) : raise QueryException ( '{} is not a valid query operator' . format ( operator ) ) clone . _where . append ( operator ) for arg , val in kwargs . items ( ) : col_name , col_op = self . _parse_filter_arg ( arg ) quote_field = True try : column = self . model . _get_column ( col_name ) except KeyError : if col_name == 'pk__token' : if not isinstance ( val , Token ) : raise QueryException ( "Virtual column 'pk__token' may only be compared to Token() values" ) column = columns . _PartitionKeysToken ( self . model ) quote_field = False else : raise QueryException ( "Can't resolve column name: '{}'" . format ( col_name ) ) if isinstance ( val , Token ) : if col_name != 'pk__token' : raise QueryException ( "Token() values may only be compared to the 'pk__token' virtual column" ) partition_columns = column . partition_columns if len ( partition_columns ) != len ( val . value ) : raise QueryException ( 'Token() received {} arguments but model has {} partition keys' . format ( len ( val . value ) , len ( partition_columns ) ) ) val . set_columns ( partition_columns ) operator_class = BaseWhereOperator . get_operator ( col_op or 'EQ' ) operator = operator_class ( ) if isinstance ( operator , InOperator ) : if not isinstance ( val , ( list , tuple ) ) : raise QueryException ( 'IN queries must use a list/tuple value' ) query_val = [ column . to_database ( v ) for v in val ] elif isinstance ( val , BaseQueryFunction ) : query_val = val else : query_val = column . to_database ( val ) clone . _where . append ( WhereClause ( column . db_field_name , operator , query_val , quote_field = quote_field ) ) return clone
Adds WHERE arguments to the queryset returning a new queryset
60,923
def order_by ( self , * colnames ) : if len ( colnames ) == 0 : clone = copy . deepcopy ( self ) clone . _order = [ ] return clone conditions = [ ] for colname in colnames : conditions . append ( '"{}" {}' . format ( * self . _get_ordering_condition ( colname ) ) ) clone = copy . deepcopy ( self ) clone . _order . extend ( conditions ) return clone
orders the result set . ordering can only use clustering columns .
60,924
def count ( self ) : if self . _batch : raise CQLEngineException ( "Only inserts, updates, and deletes are available in batch mode" ) if self . _result_cache is None : query = self . _select_query ( ) query . count = True result = self . _execute ( query ) return result [ 0 ] [ 'count' ] else : return len ( self . _result_cache )
Returns the number of rows matched by this query
60,925
def limit ( self , v ) : if not ( v is None or isinstance ( v , six . integer_types ) ) : raise TypeError if v == self . _limit : return self if v < 0 : raise QueryException ( "Negative limit is not allowed" ) clone = copy . deepcopy ( self ) clone . _limit = v return clone
Sets the limit on the number of results returned CQL has a default limit of 10 000
60,926
def update ( self , ** values ) : if not values : return nulled_columns = set ( ) us = UpdateStatement ( self . column_family_name , where = self . _where , ttl = self . _ttl , timestamp = self . _timestamp , transactions = self . _transaction ) for name , val in values . items ( ) : col_name , col_op = self . _parse_filter_arg ( name ) col = self . model . _columns . get ( col_name ) if col is None : raise ValidationError ( "{}.{} has no column named: {}" . format ( self . __module__ , self . model . __name__ , col_name ) ) if col . is_primary_key : raise ValidationError ( "Cannot apply update to primary key '{}' for {}.{}" . format ( col_name , self . __module__ , self . model . __name__ ) ) val = col . validate ( val ) if val is None : nulled_columns . add ( col_name ) continue if isinstance ( col , Counter ) : raise NotImplementedError elif isinstance ( col , ( List , Set , Map ) ) : if isinstance ( col , List ) : klass = ListUpdateClause elif isinstance ( col , Set ) : klass = SetUpdateClause elif isinstance ( col , Map ) : klass = MapUpdateClause else : raise RuntimeError us . add_assignment_clause ( klass ( col_name , col . to_database ( val ) , operation = col_op ) ) else : us . add_assignment_clause ( AssignmentClause ( col_name , col . to_database ( val ) ) ) if us . assignments : self . _execute ( us ) if nulled_columns : ds = DeleteStatement ( self . column_family_name , fields = nulled_columns , where = self . _where ) self . _execute ( ds )
Updates the rows in this queryset
60,927
def handle ( client , request ) : formaters = request . get ( 'formaters' , None ) if not formaters : formaters = [ { 'name' : 'autopep8' } ] logging . debug ( 'formaters: ' + json . dumps ( formaters , indent = 4 ) ) data = request . get ( 'data' , None ) if not isinstance ( data , str ) : return send ( client , 'invalid data' , None ) max_line_length = None for formater in formaters : max_line_length = formater . get ( 'config' , { } ) . get ( 'max_line_length' ) if max_line_length : break for formater in formaters : name = formater . get ( 'name' , None ) config = formater . get ( 'config' , { } ) if name not in FORMATERS : return send ( client , 'formater {} not support' . format ( name ) , None ) formater = FORMATERS [ name ] if formater is None : return send ( client , 'formater {} not installed' . format ( name ) , None ) if name == 'isort' and max_line_length : config . setdefault ( 'line_length' , max_line_length ) data = formater ( data , ** config ) return send ( client , None , data )
Handle format request
60,928
def easeInOutQuad ( n ) : _checkRange ( n ) if n < 0.5 : return 2 * n ** 2 else : n = n * 2 - 1 return - 0.5 * ( n * ( n - 2 ) - 1 )
A quadratic tween function that accelerates reaches the midpoint and then decelerates .
60,929
def easeInOutCubic ( n ) : _checkRange ( n ) n = 2 * n if n < 1 : return 0.5 * n ** 3 else : n = n - 2 return 0.5 * ( n ** 3 + 2 )
A cubic tween function that accelerates reaches the midpoint and then decelerates .
60,930
def easeInOutQuart ( n ) : _checkRange ( n ) n = 2 * n if n < 1 : return 0.5 * n ** 4 else : n = n - 2 return - 0.5 * ( n ** 4 - 2 )
A quartic tween function that accelerates reaches the midpoint and then decelerates .
60,931
def easeInOutQuint ( n ) : _checkRange ( n ) n = 2 * n if n < 1 : return 0.5 * n ** 5 else : n = n - 2 return 0.5 * ( n ** 5 + 2 )
A quintic tween function that accelerates reaches the midpoint and then decelerates .
60,932
def easeInOutExpo ( n ) : _checkRange ( n ) if n == 0 : return 0 elif n == 1 : return 1 else : n = n * 2 if n < 1 : return 0.5 * 2 ** ( 10 * ( n - 1 ) ) else : n -= 1 return 0.5 * ( - 1 * ( 2 ** ( - 10 * n ) ) + 2 )
An exponential tween function that accelerates reaches the midpoint and then decelerates .
60,933
def easeInOutCirc ( n ) : _checkRange ( n ) n = n * 2 if n < 1 : return - 0.5 * ( math . sqrt ( 1 - n ** 2 ) - 1 ) else : n = n - 2 return 0.5 * ( math . sqrt ( 1 - n ** 2 ) + 1 )
A circular tween function that accelerates reaches the midpoint and then decelerates .
60,934
def easeInElastic ( n , amplitude = 1 , period = 0.3 ) : _checkRange ( n ) return 1 - easeOutElastic ( 1 - n , amplitude = amplitude , period = period )
An elastic tween function that begins with an increasing wobble and then snaps into the destination .
60,935
def easeOutElastic ( n , amplitude = 1 , period = 0.3 ) : _checkRange ( n ) if amplitude < 1 : amplitude = 1 s = period / 4 else : s = period / ( 2 * math . pi ) * math . asin ( 1 / amplitude ) return amplitude * 2 ** ( - 10 * n ) * math . sin ( ( n - s ) * ( 2 * math . pi / period ) ) + 1
An elastic tween function that overshoots the destination and then rubber bands into the destination .
60,936
def easeInOutElastic ( n , amplitude = 1 , period = 0.5 ) : _checkRange ( n ) n *= 2 if n < 1 : return easeInElastic ( n , amplitude = amplitude , period = period ) / 2 else : return easeOutElastic ( n - 1 , amplitude = amplitude , period = period ) / 2 + 0.5
An elastic tween function wobbles towards the midpoint .
60,937
def easeInBack ( n , s = 1.70158 ) : _checkRange ( n ) return n * n * ( ( s + 1 ) * n - s )
A tween function that backs up first at the start and then goes to the destination .
60,938
def easeOutBack ( n , s = 1.70158 ) : _checkRange ( n ) n = n - 1 return n * n * ( ( s + 1 ) * n + s ) + 1
A tween function that overshoots the destination a little and then backs into the destination .
60,939
def easeInOutBack ( n , s = 1.70158 ) : _checkRange ( n ) n = n * 2 if n < 1 : s *= 1.525 return 0.5 * ( n * n * ( ( s + 1 ) * n - s ) ) else : n -= 2 s *= 1.525 return 0.5 * ( n * n * ( ( s + 1 ) * n + s ) + 2 )
A back - in tween function that overshoots both the start and destination .
60,940
def easeOutBounce ( n ) : _checkRange ( n ) if n < ( 1 / 2.75 ) : return 7.5625 * n * n elif n < ( 2 / 2.75 ) : n -= ( 1.5 / 2.75 ) return 7.5625 * n * n + 0.75 elif n < ( 2.5 / 2.75 ) : n -= ( 2.25 / 2.75 ) return 7.5625 * n * n + 0.9375 else : n -= ( 2.65 / 2.75 ) return 7.5625 * n * n + 0.984375
A bouncing tween function that hits the destination and then bounces to rest .
60,941
def formfield_for_manytomany ( self , db_field , request , ** kwargs ) : db = kwargs . get ( 'using' ) kwargs [ 'queryset' ] = kwargs . get ( 'queryset' , self . get_field_queryset ( db , db_field , request ) ) return super ( AccessControlMixin , self ) . formfield_for_manytomany ( db_field , request , ** kwargs )
Not all Admin subclasses use get_field_queryset here so we will use it explicitly
60,942
def delete_selected ( self , request , queryset ) : opts = self . model . _meta app_label = opts . app_label deletable_objects , model_count , perms_needed , protected = self . get_deleted_objects ( request , queryset ) if request . POST . get ( 'post' ) and not protected : if perms_needed or protected : raise PermissionDenied n = queryset . count ( ) if n : for obj in queryset : obj_display = force_text ( obj ) self . log_deletion ( request , obj , obj_display ) queryset . delete ( ) self . message_user ( request , _ ( "Successfully deleted %(count)d %(items)s." ) % { "count" : n , "items" : model_ngettext ( self . opts , n ) } , messages . SUCCESS ) return None sz = queryset . count ( ) if sz == 1 : objects_name = _ ( '%(verbose_name)s "%(object)s"' ) % { 'verbose_name' : force_text ( opts . verbose_name ) , 'object' : queryset [ 0 ] } else : objects_name = _ ( '%(count)s %(verbose_name_plural)s' ) % { 'verbose_name_plural' : force_text ( opts . verbose_name_plural ) , 'count' : sz } if perms_needed or protected : title = _ ( "Cannot delete %(name)s" ) % { "name" : objects_name } else : title = _ ( "Are you sure?" ) context = dict ( self . admin_site . each_context ( request ) , title = title , objects_name = objects_name , deletable_objects = [ deletable_objects ] , model_count = dict ( model_count ) . items ( ) , queryset = queryset , perms_lacking = perms_needed , protected = protected , opts = opts , action_checkbox_name = helpers . ACTION_CHECKBOX_NAME , media = self . media , ) request . current_app = self . admin_site . name return TemplateResponse ( request , self . delete_selected_confirmation_template or [ "admin/%s/%s/delete_selected_confirmation.html" % ( app_label , opts . model_name ) , "admin/%s/delete_selected_confirmation.html" % app_label , "admin/delete_selected_confirmation.html" ] , context )
The real delete function always evaluated either from the action or from the instance delete link
60,943
def get_deleted_objects ( self , request , queryset ) : collector = NestedObjects ( using = queryset . db ) collector . collect ( queryset ) model_perms_needed = set ( ) object_perms_needed = set ( ) STRONG_DELETION_CONTROL = getattr ( settings , 'ACCESS_STRONG_DELETION_CONTROL' , False ) def format_callback ( obj ) : has_admin = obj . __class__ in self . admin_site . _registry opts = obj . _meta no_edit_link = '%s: %s' % ( capfirst ( opts . verbose_name ) , force_text ( obj ) ) admin_url = None try : admin_url = reverse ( '%s:%s_%s_change' % ( self . admin_site . name , opts . app_label , opts . model_name ) , None , ( quote ( obj . _get_pk_val ( ) ) , ) ) except NoReverseMatch : pass if STRONG_DELETION_CONTROL or has_admin : if not obj . __class__ . _meta . auto_created : manager = AccessManager ( obj . __class__ ) if manager . check_deleteable ( obj . __class__ , request ) is False : model_perms_needed . add ( opts . verbose_name ) if not manager . apply_deleteable ( obj . __class__ . _default_manager . filter ( pk = obj . pk ) , request ) : object_perms_needed . add ( obj ) if admin_url : return format_html ( '{}: <a href="{}">{}</a>' , capfirst ( opts . verbose_name ) , admin_url , obj ) else : return no_edit_link to_delete = collector . nested ( format_callback ) protected = [ format_callback ( obj ) for obj in collector . protected ] protected = set ( [ format_callback ( obj ) for obj in object_perms_needed ] ) . union ( protected ) model_count = { model . _meta . verbose_name_plural : len ( objs ) for model , objs in collector . model_objs . items ( ) } return to_delete , model_count , model_perms_needed , protected
Find all objects related to instances of queryset that should also be deleted .
60,944
def register_plugins ( cls , plugins ) : for model in plugins : cls . register_plugin ( model , plugins [ model ] )
Reguster plugins . The plugins parameter should be dict mapping model to plugin .
60,945
def register_plugin ( cls , model , plugin ) : logger . info ( "Plugin registered for %s: %s" , model , plugin ) cls . plugins [ model ] = plugin
Reguster a plugin for the model .
60,946
def get_default_plugin ( cls ) : from importlib import import_module from django . conf import settings default_plugin = getattr ( settings , 'ACCESS_DEFAULT_PLUGIN' , "access.plugins.DjangoAccessPlugin" ) if default_plugin not in cls . default_plugins : logger . info ( "Creating a default plugin: %s" , default_plugin ) path = default_plugin . split ( '.' ) plugin_path = '.' . join ( path [ : - 1 ] ) plugin_name = path [ - 1 ] DefaultPlugin = getattr ( import_module ( plugin_path ) , plugin_name ) cls . default_plugins [ default_plugin ] = DefaultPlugin ( ) return cls . default_plugins [ default_plugin ]
Return a default plugin .
60,947
def plugin_for ( cls , model ) : logger . debug ( "Getting a plugin for: %s" , model ) if not issubclass ( model , Model ) : return if model in cls . plugins : return cls . plugins [ model ] for b in model . __bases__ : p = cls . plugin_for ( b ) if p : return p
Find and return a plugin for this model . Uses inheritance to find a model where the plugin is registered .
60,948
def visible ( self , request ) : return self . apply_visible ( self . get_queryset ( ) , request ) if self . check_visible ( self . model , request ) is not False else self . get_queryset ( ) . none ( )
Checks the both check_visible and apply_visible against the owned model and it s instance set
60,949
def changeable ( self , request ) : return self . apply_changeable ( self . get_queryset ( ) , request ) if self . check_changeable ( self . model , request ) is not False else self . get_queryset ( ) . none ( )
Checks the both check_changeable and apply_changeable against the owned model and it s instance set
60,950
def deleteable ( self , request ) : return self . apply_deleteable ( self . get_queryset ( ) , request ) if self . check_deleteable ( self . model , request ) is not False else self . get_queryset ( ) . none ( )
Checks the both check_deleteable and apply_deleteable against the owned model and it s instance set
60,951
def get_plugin_from_string ( plugin_name ) : modulename , classname = plugin_name . rsplit ( '.' , 1 ) module = import_module ( modulename ) return getattr ( module , classname )
Returns plugin or plugin point class from given plugin_name string .
60,952
def make_pdf ( dist , params , size = 10000 ) : arg = params [ : - 2 ] loc = params [ - 2 ] scale = params [ - 1 ] start = dist . ppf ( 0.01 , * arg , loc = loc , scale = scale ) if arg else dist . ppf ( 0.01 , loc = loc , scale = scale ) end = dist . ppf ( 0.99 , * arg , loc = loc , scale = scale ) if arg else dist . ppf ( 0.99 , loc = loc , scale = scale ) x = np . linspace ( start , end , size ) y = dist . pdf ( x , loc = loc , scale = scale , * arg ) pdf = pd . Series ( y , x ) return pdf
Generate distributions s Propbability Distribution Function
60,953
def urlencode ( query , params ) : return query + '?' + "&" . join ( key + '=' + quote_plus ( str ( value ) ) for key , value in params )
Correctly convert the given query and parameters into a full query + query string ensuring the order of the params .
60,954
def _load_from_string ( data ) : global _CACHE if PYTHON_3 : data = json . loads ( data . decode ( "utf-8" ) ) else : data = json . loads ( data ) _CACHE = _recursively_convert_unicode_to_str ( data ) [ 'data' ]
Loads the cache from the string
60,955
def get_model ( cls , name = None , status = ENABLED ) : ppath = cls . get_pythonpath ( ) if is_plugin_point ( cls ) : if name is not None : kwargs = { } if status is not None : kwargs [ 'status' ] = status return Plugin . objects . get ( point__pythonpath = ppath , name = name , ** kwargs ) else : return PluginPointModel . objects . get ( pythonpath = ppath ) else : return Plugin . objects . get ( pythonpath = ppath )
Returns model instance of plugin point or plugin depending from which class this methos is called .
60,956
def get_point_model ( cls ) : if is_plugin_point ( cls ) : raise Exception ( _ ( 'This method is only available to plugin ' 'classes.' ) ) else : return PluginPointModel . objects . get ( plugin__pythonpath = cls . get_pythonpath ( ) )
Returns plugin point model instance . Only used from plugin classes .
60,957
def get_plugins ( cls ) : if django_version >= ( 1 , 9 ) and not db_table_exists ( Plugin . _meta . db_table ) : raise StopIteration if is_plugin_point ( cls ) : for plugin_model in cls . get_plugins_qs ( ) : yield plugin_model . get_plugin ( ) else : raise Exception ( _ ( 'This method is only available to plugin point ' 'classes.' ) )
Returns all plugin instances of plugin point passing all args and kwargs to plugin constructor .
60,958
def get_plugins_qs ( cls ) : if is_plugin_point ( cls ) : point_pythonpath = cls . get_pythonpath ( ) return Plugin . objects . filter ( point__pythonpath = point_pythonpath , status = ENABLED ) . order_by ( 'index' ) else : raise Exception ( _ ( 'This method is only available to plugin point ' 'classes.' ) )
Returns query set of all plugins belonging to plugin point .
60,959
def safeunicode ( arg , * args , ** kwargs ) : return arg if isinstance ( arg , unicode ) else unicode ( arg , * args , ** kwargs )
Coerce argument to unicode if it s not already .
60,960
def get_reports ( ) : if False : pass else : rows = _Constants . _DATABASE . execute ( "SELECT data FROM energy" . format ( hardware = _Constants . _HARDWARE ) ) data = [ r [ 0 ] for r in rows ] data = [ _Auxiliary . _byteify ( _json . loads ( r ) ) for r in data ] return _Auxiliary . _byteify ( data )
Returns energy data from 1960 to 2014 across various factors .
60,961
def available ( self , src , dst , model ) : for name , point in six . iteritems ( src ) : inst = dst . pop ( name , None ) if inst is None : self . print_ ( 1 , "Registering %s for %s" % ( model . __name__ , name ) ) inst = model ( pythonpath = name ) if inst . status == REMOVED : self . print_ ( 1 , "Updating %s for %s" % ( model . __name__ , name ) ) inst . status = ENABLED yield point , inst
Iterate over all registered plugins or plugin points and prepare to add them to database .
60,962
def missing ( self , dst ) : for inst in six . itervalues ( dst ) : if inst . status != REMOVED : inst . status = REMOVED inst . save ( )
Mark all missing plugins that exists in database but are not registered .
60,963
def all ( self ) : if django_version >= ( 1 , 9 ) and ( not db_table_exists ( Plugin . _meta . db_table ) or not db_table_exists ( PluginPoint . _meta . db_table ) ) : return self . points ( )
Synchronize all registered plugins and plugin points to database .
60,964
def get_weather ( test = False ) : if _Constants . _TEST or test : rows = _Constants . _DATABASE . execute ( "SELECT data FROM weather LIMIT {hardware}" . format ( hardware = _Constants . _HARDWARE ) ) data = [ r [ 0 ] for r in rows ] data = [ _Auxiliary . _byteify ( _json . loads ( r ) ) for r in data ] return _Auxiliary . _byteify ( data ) else : rows = _Constants . _DATABASE . execute ( "SELECT data FROM weather" . format ( hardware = _Constants . _HARDWARE ) ) data = [ r [ 0 ] for r in rows ] data = [ _Auxiliary . _byteify ( _json . loads ( r ) ) for r in data ] return _Auxiliary . _byteify ( data )
Returns weather reports from the dataset .
60,965
def _get ( self , url , ** kw ) : headers = kw . pop ( 'headers' , { } ) headers . setdefault ( 'Content-Type' , 'application/json' ) headers . setdefault ( 'Accept' , 'application/json' ) headers . setdefault ( 'Authorization' , self . AUTHORIZATION_HEADER ) kw [ 'headers' ] = headers resp = self . session . get ( url , ** kw ) self . _raise_for_status ( resp ) return resp
Makes a GET request setting Authorization header by default
60,966
def _post ( self , url , ** kw ) : headers = kw . pop ( 'headers' , { } ) headers . setdefault ( 'Authorization' , self . AUTHORIZATION_HEADER ) kw [ 'headers' ] = headers resp = self . session . post ( url , ** kw ) self . _raise_for_status ( resp ) return resp
Makes a POST request setting Authorization header by default
60,967
def _post_json ( self , url , data , ** kw ) : data = json . dumps ( data ) headers = kw . pop ( 'headers' , { } ) headers . setdefault ( 'Content-Type' , 'application/json' ) headers . setdefault ( 'Accept' , 'application/json' ) kw [ 'headers' ] = headers kw [ 'data' ] = data return self . _post ( url , ** kw )
Makes a POST request setting Authorization and Content - Type headers by default
60,968
def authenticate ( self , login = None , password = None ) : if login is None : login = os . environ . get ( 'MS_LOGIN' ) if password is None : password = os . environ . get ( 'MS_PASSWD' ) if not login or not password : msg = ( 'Authentication credentials required. Please refer to ' 'http://xbox.readthedocs.org/en/latest/authentication.html' ) raise AuthenticationException ( msg ) self . login = login base_url = 'https://login.live.com/oauth20_authorize.srf?' qs = unquote ( urlencode ( { 'client_id' : '0000000048093EE3' , 'redirect_uri' : 'https://login.live.com/oauth20_desktop.srf' , 'response_type' : 'token' , 'display' : 'touch' , 'scope' : 'service::user.auth.xboxlive.com::MBI_SSL' , 'locale' : 'en' , } ) ) resp = self . session . get ( base_url + qs ) url_re = b'urlPost:\\\'([A-Za-z0-9:\?_\-\.&/=]+)' ppft_re = b'sFTTag:\\\'.*value="(.*)"/>' login_post_url = re . search ( url_re , resp . content ) . group ( 1 ) post_data = { 'login' : login , 'passwd' : password , 'PPFT' : re . search ( ppft_re , resp . content ) . groups ( 1 ) [ 0 ] , 'PPSX' : 'Passpor' , 'SI' : 'Sign in' , 'type' : '11' , 'NewUser' : '1' , 'LoginOptions' : '1' , 'i3' : '36728' , 'm1' : '768' , 'm2' : '1184' , 'm3' : '0' , 'i12' : '1' , 'i17' : '0' , 'i18' : '__Login_Host|1' , } resp = self . session . post ( login_post_url , data = post_data , allow_redirects = False , ) if 'Location' not in resp . headers : msg = 'Could not log in with supplied credentials' raise AuthenticationException ( msg ) location = resp . headers [ 'Location' ] parsed = urlparse ( location ) fragment = parse_qs ( parsed . fragment ) access_token = fragment [ 'access_token' ] [ 0 ] url = 'https://user.auth.xboxlive.com/user/authenticate' resp = self . session . post ( url , data = json . dumps ( { "RelyingParty" : "http://auth.xboxlive.com" , "TokenType" : "JWT" , "Properties" : { "AuthMethod" : "RPS" , "SiteName" : "user.auth.xboxlive.com" , "RpsTicket" : access_token , } } ) , headers = { 'Content-Type' : 'application/json' } ) json_data = resp . json ( ) user_token = json_data [ 'Token' ] uhs = json_data [ 'DisplayClaims' ] [ 'xui' ] [ 0 ] [ 'uhs' ] url = 'https://xsts.auth.xboxlive.com/xsts/authorize' resp = self . session . post ( url , data = json . dumps ( { "RelyingParty" : "http://xboxlive.com" , "TokenType" : "JWT" , "Properties" : { "UserTokens" : [ user_token ] , "SandboxId" : "RETAIL" , } } ) , headers = { 'Content-Type' : 'application/json' } ) response = resp . json ( ) self . AUTHORIZATION_HEADER = 'XBL3.0 x=%s;%s' % ( uhs , response [ 'Token' ] ) self . user_xid = response [ 'DisplayClaims' ] [ 'xui' ] [ 0 ] [ 'xid' ] self . authenticated = True return self
Authenticated this client instance .
60,969
def from_xuid ( cls , xuid ) : url = 'https://profile.xboxlive.com/users/xuid(%s)/profile/settings' % xuid try : return cls . _fetch ( url ) except ( GamertagNotFound , InvalidRequest ) : raise GamertagNotFound ( 'No such user: %s' % xuid )
Instantiates an instance of GamerProfile from an xuid
60,970
def from_gamertag ( cls , gamertag ) : url = 'https://profile.xboxlive.com/users/gt(%s)/profile/settings' % gamertag try : return cls . _fetch ( url ) except GamertagNotFound : raise GamertagNotFound ( 'No such user: %s' % gamertag )
Instantiates an instance of GamerProfile from a gamertag
60,971
def get ( cls , xuid , scid , clip_id ) : url = ( 'https://gameclipsmetadata.xboxlive.com/users' '/xuid(%(xuid)s)/scids/%(scid)s/clips/%(clip_id)s' % { 'xuid' : xuid , 'scid' : scid , 'clip_id' : clip_id , } ) resp = xbox . client . _get ( url ) if resp . status_code == 404 : msg = 'Could not find clip: xuid=%s, scid=%s, clip_id=%s' % ( xuid , scid , clip_id , ) raise ClipNotFound ( msg ) data = resp . json ( ) user = UserProxy ( xuid ) return cls ( user , data [ 'gameClip' ] )
Gets a specific game clip
60,972
def saved_from_user ( cls , user , include_pending = False ) : url = 'https://gameclipsmetadata.xboxlive.com/users/xuid(%s)/clips/saved' resp = xbox . client . _get ( url % user . xuid ) data = resp . json ( ) for clip in data [ 'gameClips' ] : if clip [ 'state' ] != 'PendingUpload' or include_pending : yield cls ( user , clip )
Gets all clips saved by a user .
60,973
def prepare_url ( self , url , params ) : url = to_native_string ( url ) if ':' in url and not url . lower ( ) . startswith ( 'http' ) : self . url = url return scheme , auth , host , port , path , query , fragment = parse_url ( url ) if not scheme : raise MissingSchema ( "Invalid URL {0!r}: No schema supplied. " "Perhaps you meant http://{0}?" . format ( url ) ) if not host : raise InvalidURL ( "Invalid URL %r: No host supplied" % url ) try : host = host . encode ( 'idna' ) . decode ( 'utf-8' ) except UnicodeError : raise InvalidURL ( 'URL has an invalid label.' ) netloc = auth or '' if netloc : netloc += '@' netloc += host if port : netloc += ':' + str ( port ) if not path : path = '/' if is_py2 : if isinstance ( scheme , str ) : scheme = scheme . encode ( 'utf-8' ) if isinstance ( netloc , str ) : netloc = netloc . encode ( 'utf-8' ) if isinstance ( path , str ) : path = path . encode ( 'utf-8' ) if isinstance ( query , str ) : query = query . encode ( 'utf-8' ) if isinstance ( fragment , str ) : fragment = fragment . encode ( 'utf-8' ) enc_params = self . _encode_params ( params ) if enc_params : if query : query = '%s&%s' % ( query , enc_params ) else : query = enc_params url = requote_uri ( urlunparse ( [ scheme , netloc , path , None , query , fragment ] ) ) self . url = url
Prepares the given HTTP URL .
60,974
def description_of ( file , name = 'stdin' ) : u = UniversalDetector ( ) for line in file : u . feed ( line ) u . close ( ) result = u . result if result [ 'encoding' ] : return '%s: %s with confidence %s' % ( name , result [ 'encoding' ] , result [ 'confidence' ] ) else : return '%s: no result' % name
Return a string describing the probable encoding of a file .
60,975
def run ( self ) : self . _debug ( 'Provider' , 'run' , 'Running pre-download callback.' ) self . _pre_download ( ) url = None out = None success = False try : url = Provider . _download ( self . get_download_url ( ) ) self . _debug ( 'Provider' , 'run' , 'Running in-download callback.' ) self . _in_download ( url ) if IS_WINDOWS : self . filename = re . sub ( ur'[?\/\\<>:"*|]+' , '_' , self . filename , re . UNICODE ) self . full_filename = '%s.%s' % ( self . filename , self . fileext ) self . _debug ( 'Provider' , 'run' , 'full_filename' , self . full_filename ) if not os . path . isfile ( self . full_filename ) : out = open ( self . full_filename , 'wb' ) out . write ( url . read ( ) ) else : pass success = True finally : if out is not None : out . close ( ) if url is not None : url . close ( ) self . _debug ( 'Provider' , 'run' , 'Running post-download callback.' ) self . _debug ( 'Provider' , 'run' , 'success' , success ) self . _post_download ( success )
Download the video .
60,976
def dumps ( obj , ** kwargs ) : try : return json . dumps ( obj , ** kwargs ) except Exception as e : raise JSONLibraryException ( e )
Serialize obj to a JSON formatted str . Accepts the same arguments as json module in stdlib .
60,977
def prepend_name_prefix ( func ) : @ wraps ( func ) def prepend_prefix ( self , name , * args , ** kwargs ) : name = self . name_prefix + name return func ( self , name , * args , ** kwargs ) return prepend_prefix
Decorator that wraps instance methods to prepend the instance s filename prefix to the beginning of the referenced filename . Must only be used on instance methods where the first parameter after self is name or a comparable parameter of a different name .
60,978
def get_chalk ( level ) : if level >= logging . ERROR : _chalk = chalk . red elif level >= logging . WARNING : _chalk = chalk . yellow elif level >= logging . INFO : _chalk = chalk . blue elif level >= logging . DEBUG : _chalk = chalk . green else : _chalk = chalk . white return _chalk
Gets the appropriate piece of chalk for the logging level
60,979
def to_str ( obj ) : if not isinstance ( obj , str ) and PY3 and isinstance ( obj , bytes ) : obj = obj . decode ( 'utf-8' ) return obj if isinstance ( obj , string_types ) else str ( obj )
Attempts to convert given object to a string object
60,980
def get_color ( self , value ) : if value in COLOR_SET : value = COLOR_MAP [ value ] else : try : value = int ( value ) if value >= 8 : raise ValueError ( ) except ValueError as exc : raise ValueError ( 'Colors should either a member of: {} or a positive ' 'integer below 8' . format ( ', ' . join ( COLOR_NAMES ) ) ) return '{}{}' . format ( self . PREFIX , value )
Helper method to validate and map values used in the instantiation of of the Color object to the correct unicode value .
60,981
def shuffle ( self , overwrite = False ) : if overwrite : shuffled = self . path else : shuffled = FileAPI . add_ext_name ( self . path , "_shuffled" ) lines = open ( self . path ) . readlines ( ) random . shuffle ( lines ) open ( shuffled , "w" ) . writelines ( lines ) self . path = shuffled
This method creates new shuffled file .
60,982
def multiple_files_count_reads_in_windows ( bed_files , args ) : bed_windows = OrderedDict ( ) for bed_file in bed_files : logging . info ( "Binning " + bed_file ) if ".bedpe" in bed_file : chromosome_dfs = count_reads_in_windows_paired_end ( bed_file , args ) else : chromosome_dfs = count_reads_in_windows ( bed_file , args ) bed_windows [ bed_file ] = chromosome_dfs return bed_windows
Use count_reads on multiple files and store result in dict .
60,983
def _merge_files ( windows , nb_cpu ) : windows = iter ( windows ) merged = next ( windows ) for chromosome_dfs in windows : merged = merge_same_files ( merged , chromosome_dfs , nb_cpu ) return merged
Merge lists of chromosome bin df chromosome - wise .
60,984
def py2round ( value ) : if value > 0 : return float ( floor ( float ( value ) + 0.5 ) ) else : return float ( ceil ( float ( value ) - 0.5 ) )
Round values as in Python 2 for Python 3 compatibility .
60,985
def canonicalize ( interval , lower_inc = True , upper_inc = False ) : if not interval . discrete : raise TypeError ( 'Only discrete ranges can be canonicalized' ) if interval . empty : return interval lower , lower_inc = canonicalize_lower ( interval , lower_inc ) upper , upper_inc = canonicalize_upper ( interval , upper_inc ) return interval . __class__ ( [ lower , upper ] , lower_inc = lower_inc , upper_inc = upper_inc , )
Convert equivalent discrete intervals to different representations .
60,986
def glb ( self , other ) : return self . __class__ ( [ min ( self . lower , other . lower ) , min ( self . upper , other . upper ) ] , lower_inc = self . lower_inc if self < other else other . lower_inc , upper_inc = self . upper_inc if self > other else other . upper_inc , )
Return the greatest lower bound for given intervals .
60,987
def lub ( self , other ) : return self . __class__ ( [ max ( self . lower , other . lower ) , max ( self . upper , other . upper ) , ] , lower_inc = self . lower_inc if self < other else other . lower_inc , upper_inc = self . upper_inc if self > other else other . upper_inc , )
Return the least upper bound for given intervals .
60,988
def compute_enriched_threshold ( average_window_readcount ) : current_threshold , survival_function = 0 , 1 for current_threshold in count ( start = 0 , step = 1 ) : survival_function -= poisson . pmf ( current_threshold , average_window_readcount ) if survival_function <= WINDOW_P_VALUE : break island_enriched_threshold = current_threshold + 1 return island_enriched_threshold
Computes the minimum number of tags required in window for an island to be enriched .
60,989
def _factln ( num ) : if num < 20 : log_factorial = log ( factorial ( num ) ) else : log_factorial = num * log ( num ) - num + log ( num * ( 1 + 4 * num * ( 1 + 2 * num ) ) ) / 6.0 + log ( pi ) / 2 return log_factorial
Computes logfactorial regularly for tractable numbers uses Ramanujans approximation otherwise .
60,990
def add_new_enriched_bins_matrixes ( region_files , dfs , bin_size ) : dfs = _remove_epic_enriched ( dfs ) names = [ "Enriched_" + os . path . basename ( r ) for r in region_files ] regions = region_files_to_bins ( region_files , names , bin_size ) new_dfs = OrderedDict ( ) assert len ( regions . columns ) == len ( dfs ) for region , ( n , df ) in zip ( regions , dfs . items ( ) ) : region_col = regions [ region ] df = df . join ( region_col , how = "outer" ) . fillna ( 0 ) new_dfs [ n ] = df return new_dfs
Add enriched bins based on bed files .
60,991
def merge_chromosome_dfs ( df_tuple ) : plus_df , minus_df = df_tuple index_cols = "Chromosome Bin" . split ( ) count_column = plus_df . columns [ 0 ] if plus_df . empty : return return_other ( minus_df , count_column , index_cols ) if minus_df . empty : return return_other ( plus_df , count_column , index_cols ) plus_df = plus_df . groupby ( index_cols ) . sum ( ) minus_df = minus_df . groupby ( index_cols ) . sum ( ) df = pd . concat ( [ plus_df , minus_df ] , axis = 1 ) . fillna ( 0 ) . sum ( axis = 1 ) df = df . reset_index ( ) . sort_values ( by = "Bin" ) df . columns = [ "Chromosome" , "Bin" , count_column ] df = df . sort_values ( [ "Chromosome" , "Bin" ] ) df [ [ "Bin" , count_column ] ] = df [ [ "Bin" , count_column ] ] . astype ( int32 ) df = df [ [ count_column , "Chromosome" , "Bin" ] ] return df . reset_index ( drop = True )
Merges data from the two strands into strand - agnostic counts .
60,992
def create_log2fc_bigwigs ( matrix , outdir , args ) : call ( "mkdir -p {}" . format ( outdir ) , shell = True ) genome_size_dict = args . chromosome_sizes outpaths = [ ] for bed_file in matrix [ args . treatment ] : outpath = join ( outdir , splitext ( basename ( bed_file ) ) [ 0 ] + "_log2fc.bw" ) outpaths . append ( outpath ) data = create_log2fc_data ( matrix , args ) Parallel ( n_jobs = args . number_cores ) ( delayed ( _create_bigwig ) ( bed_column , outpath , genome_size_dict ) for outpath , bed_column in zip ( outpaths , data ) )
Create bigwigs from matrix .
60,993
def add_to_island_expectations_dict ( average_window_readcount , current_max_scaled_score , island_eligibility_threshold , island_expectations , gap_contribution ) : scaled_score = current_max_scaled_score + E_VALUE for index in range ( current_max_scaled_score + 1 , scaled_score + 1 ) : island_expectation = 0.0 i = island_eligibility_threshold current_island = int ( round ( index - compute_window_score ( i , average_window_readcount ) / BIN_SIZE ) ) while ( current_island >= 0 ) : if current_island in island_expectations : island_expectation += _poisson ( i , average_window_readcount ) * island_expectations [ current_island ] i += 1 current_island = int ( round ( index - compute_window_score ( i , average_window_readcount ) / BIN_SIZE ) ) island_expectation *= gap_contribution if island_expectation : island_expectations [ index ] = island_expectation return island_expectations
Can probably be heavily optimized . Time required to run can be seen from logging info .
60,994
def effective_genome_size ( fasta , read_length , nb_cores , tmpdir = "/tmp" ) : idx = Fasta ( fasta ) genome_length = sum ( [ len ( c ) for c in idx ] ) logging . info ( "Temporary directory: " + tmpdir ) logging . info ( "File analyzed: " + fasta ) logging . info ( "Genome length: " + str ( genome_length ) ) print ( "File analyzed: " , fasta ) print ( "Genome length: " , genome_length ) chromosomes = ", " . join ( [ c . name for c in idx ] ) if "_" in chromosomes : print ( "Warning. The following chromosomes are part of your genome:\n" , chromosomes . replace ( ">" , "" ) + "\n" , file = sys . stderr ) print ( "You probably want to remove all chromosomes in your fasta containing '_' for the effective genome size computation to be accurate." , file = sys . stderr ) output_file = os . path . join ( tmpdir , '{1}.jf' . format ( read_length , basename ( fasta ) ) ) atexit . register ( lambda : call ( "rm {output_file}" . format ( output_file = output_file ) , shell = True ) ) call ( "jellyfish count -t {nb_cores} -m {read_length} -s {genome_length} -L 1 -U 1 --out-counter-len 1 --counter-len 1 {fasta} -o {output_file}" . format ( ** vars ( ) ) , shell = True ) stats = check_output ( "jellyfish stats {output_file}" . format ( output_file = output_file ) , shell = True ) unique_kmers = int ( stats . split ( ) [ 1 ] ) effective_genome_size = unique_kmers / genome_length logging . info ( "Number unique {read_length}-mers: " . format ( read_length = read_length ) + str ( unique_kmers ) ) logging . info ( "Effective genome size: " + str ( effective_genome_size ) ) print ( "Number unique {read_length}-mers: " . format ( read_length = read_length ) , unique_kmers ) print ( "Effective genome size: " , effective_genome_size ) assert effective_genome_size < 1 , "Something wrong happened, effective genome size over 1!"
Compute effective genome size for genome .
60,995
def get_island_bins ( df , window_size , genome , args ) : chromosomes = natsorted ( list ( args . chromosome_sizes ) ) chromosome_island_bins = { } df_copy = df . reset_index ( drop = False ) for chromosome in chromosomes : cdf = df_copy . loc [ df_copy . Chromosome == chromosome ] if cdf . empty : chromosome_island_bins [ chromosome ] = set ( ) else : island_starts_ends = zip ( cdf . Start . values . tolist ( ) , cdf . End . values . tolist ( ) ) island_bins = chain ( * [ range ( int ( start ) , int ( end ) , window_size ) for start , end in island_starts_ends ] ) chromosome_island_bins [ chromosome ] = set ( island_bins ) return chromosome_island_bins
Finds the enriched bins in a df .
60,996
def create_genome_size_dict ( genome ) : size_file = get_genome_size_file ( genome ) size_lines = open ( size_file ) . readlines ( ) size_dict = { } for line in size_lines : genome , length = line . split ( ) size_dict [ genome ] = int ( length ) return size_dict
Creates genome size dict from string containing data .
60,997
def compute_score_threshold ( average_window_readcount , island_enriched_threshold , gap_contribution , boundary_contribution , genome_length_in_bins ) : required_p_value = poisson . pmf ( island_enriched_threshold , average_window_readcount ) prob = boundary_contribution * required_p_value score = - log ( required_p_value ) current_scaled_score = int ( round ( score / BIN_SIZE ) ) island_expectations_d = { } island_expectations_d [ current_scaled_score ] = prob * genome_length_in_bins island_expectations_d [ 0 ] = boundary_contribution * genome_length_in_bins / gap_contribution current_max_scaled_score = current_scaled_score interval = int ( 1 / BIN_SIZE ) partial_cumu = 0.0 logging . info ( "Finding the score required to consider an island enriched." ) while ( partial_cumu > E_VALUE_THRESHOLD or partial_cumu < 1e-100 ) : current_scaled_score += interval current_max_scaled_score = current_scaled_score - interval if current_scaled_score > current_max_scaled_score : island_expectations_d = add_to_island_expectations_dict ( average_window_readcount , current_max_scaled_score , island_enriched_threshold , island_expectations_d , gap_contribution ) partial_cumu = 0.0001 current_max_scaled_score += 1000 if max ( island_expectations_d ) > interval : partial_cumu = sum ( [ val for idx , val in island_expectations_d . items ( ) if idx > current_max_scaled_score - interval ] ) else : partial_cumu = sum ( island_expectations_d . values ( ) ) logging . debug ( "Computing cumulative distribution." ) score_threshold = generate_cumulative_dist ( island_expectations_d , current_max_scaled_score + 1 ) logging . info ( "Enriched score threshold for islands: " + str ( score_threshold ) ) return score_threshold
What does island_expectations do?
60,998
def find_readlength ( args ) : try : bed_file = args . treatment [ 0 ] except AttributeError : bed_file = args . infiles [ 0 ] filereader = "cat " if bed_file . endswith ( ".gz" ) and search ( "linux" , platform , IGNORECASE ) : filereader = "zcat " elif bed_file . endswith ( ".gz" ) and search ( "darwin" , platform , IGNORECASE ) : filereader = "gzcat " elif bed_file . endswith ( ".bz2" ) : filereader = "bzgrep " command = filereader + "{} | head -10000" . format ( bed_file ) output = check_output ( command , shell = True ) df = pd . read_table ( BytesIO ( output ) , header = None , usecols = [ 1 , 2 ] , sep = "\t" , names = [ "Start" , "End" ] ) readlengths = df . End - df . Start mean_readlength = readlengths . mean ( ) median_readlength = readlengths . median ( ) max_readlength = readlengths . max ( ) min_readlength = readlengths . min ( ) logging . info ( ( "Used first 10000 reads of {} to estimate a median read length of {}\n" "Mean readlength: {}, max readlength: {}, min readlength: {}." ) . format ( bed_file , median_readlength , mean_readlength , max_readlength , min_readlength ) ) return median_readlength
Estimate length of reads based on 10000 first .
60,999
def get_closest_readlength ( estimated_readlength ) : readlengths = [ 36 , 50 , 75 , 100 ] differences = [ abs ( r - estimated_readlength ) for r in readlengths ] min_difference = min ( differences ) index_of_min_difference = [ i for i , d in enumerate ( differences ) if d == min_difference ] [ 0 ] return readlengths [ index_of_min_difference ]
Find the predefined readlength closest to the estimated readlength .