idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
51,000
def read_digits ( ctx , text ) : def chunk ( value , chunk_size ) : return [ value [ i : i + chunk_size ] for i in range ( 0 , len ( value ) , chunk_size ) ] text = conversions . to_string ( text , ctx ) . strip ( ) if not text : return '' if text [ 0 ] == '+' : text = text [ 1 : ] length = len ( text ) if length == 9 : result = ' ' . join ( text [ : 3 ] ) result += ' , ' + ' ' . join ( text [ 3 : 5 ] ) result += ' , ' + ' ' . join ( text [ 5 : ] ) return result if length % 3 == 0 and length > 3 : chunks = chunk ( text , 3 ) return ' ' . join ( ',' . join ( chunks ) ) if length % 4 == 0 : chunks = chunk ( text , 4 ) return ' ' . join ( ',' . join ( chunks ) ) return ',' . join ( text )
Formats digits in text for reading in TTS
51,001
def remove_first_word ( ctx , text ) : text = conversions . to_string ( text , ctx ) . lstrip ( ) first = first_word ( ctx , text ) return text [ len ( first ) : ] . lstrip ( ) if first else ''
Removes the first word from the given text string
51,002
def word ( ctx , text , number , by_spaces = False ) : return word_slice ( ctx , text , number , conversions . to_integer ( number , ctx ) + 1 , by_spaces )
Extracts the nth word from the given text string
51,003
def word_count ( ctx , text , by_spaces = False ) : text = conversions . to_string ( text , ctx ) by_spaces = conversions . to_boolean ( by_spaces , ctx ) return len ( __get_words ( text , by_spaces ) )
Returns the number of words in the given text string
51,004
def word_slice ( ctx , text , start , stop = 0 , by_spaces = False ) : text = conversions . to_string ( text , ctx ) start = conversions . to_integer ( start , ctx ) stop = conversions . to_integer ( stop , ctx ) by_spaces = conversions . to_boolean ( by_spaces , ctx ) if start == 0 : raise ValueError ( "Start word cannot be zero" ) elif start > 0 : start -= 1 if stop == 0 : stop = None elif stop > 0 : stop -= 1 words = __get_words ( text , by_spaces ) selection = operator . getitem ( words , slice ( start , stop ) ) return ' ' . join ( selection )
Extracts a substring spanning from start up to but not - including stop
51,005
def regex_group ( ctx , text , pattern , group_num ) : text = conversions . to_string ( text , ctx ) pattern = conversions . to_string ( pattern , ctx ) group_num = conversions . to_integer ( group_num , ctx ) expression = regex . compile ( pattern , regex . UNICODE | regex . IGNORECASE | regex . MULTILINE | regex . V0 ) match = expression . search ( text ) if not match : return "" if group_num < 0 or group_num > len ( match . groups ( ) ) : raise ValueError ( "No such matching group %d" % group_num ) return match . group ( group_num )
Tries to match the text with the given pattern and returns the value of matching group
51,006
def decimal_round ( number , num_digits , rounding = ROUND_HALF_UP ) : exp = Decimal ( 10 ) ** - num_digits if num_digits >= 0 : return number . quantize ( exp , rounding ) else : return exp * ( number / exp ) . to_integral_value ( rounding )
Rounding for decimals with support for negative digits
51,007
def parse_json_date ( value ) : if not value : return None return datetime . datetime . strptime ( value , JSON_DATETIME_FORMAT ) . replace ( tzinfo = pytz . UTC )
Parses an ISO8601 formatted datetime from a string value
51,008
def clean ( ctx , text ) : text = conversions . to_string ( text , ctx ) return '' . join ( [ c for c in text if ord ( c ) >= 32 ] )
Removes all non - printable characters from a text string
51,009
def concatenate ( ctx , * text ) : result = '' for arg in text : result += conversions . to_string ( arg , ctx ) return result
Joins text strings into one text string
51,010
def fixed ( ctx , number , decimals = 2 , no_commas = False ) : value = _round ( ctx , number , decimals ) format_str = '{:f}' if no_commas else '{:,f}' return format_str . format ( value )
Formats the given number in decimal format using a period and commas
51,011
def left ( ctx , text , num_chars ) : num_chars = conversions . to_integer ( num_chars , ctx ) if num_chars < 0 : raise ValueError ( "Number of chars can't be negative" ) return conversions . to_string ( text , ctx ) [ 0 : num_chars ]
Returns the first characters in a text string
51,012
def rept ( ctx , text , number_times ) : if number_times < 0 : raise ValueError ( "Number of times can't be negative" ) return conversions . to_string ( text , ctx ) * conversions . to_integer ( number_times , ctx )
Repeats text a given number of times
51,013
def right ( ctx , text , num_chars ) : num_chars = conversions . to_integer ( num_chars , ctx ) if num_chars < 0 : raise ValueError ( "Number of chars can't be negative" ) elif num_chars == 0 : return '' else : return conversions . to_string ( text , ctx ) [ - num_chars : ]
Returns the last characters in a text string
51,014
def substitute ( ctx , text , old_text , new_text , instance_num = - 1 ) : text = conversions . to_string ( text , ctx ) old_text = conversions . to_string ( old_text , ctx ) new_text = conversions . to_string ( new_text , ctx ) if instance_num < 0 : return text . replace ( old_text , new_text ) else : splits = text . split ( old_text ) output = splits [ 0 ] instance = 1 for split in splits [ 1 : ] : sep = new_text if instance == instance_num else old_text output += sep + split instance += 1 return output
Substitutes new_text for old_text in a text string
51,015
def _unicode ( ctx , text ) : text = conversions . to_string ( text , ctx ) if len ( text ) == 0 : raise ValueError ( "Text can't be empty" ) return ord ( text [ 0 ] )
Returns a numeric code for the first character in a text string
51,016
def date ( ctx , year , month , day ) : return _date ( conversions . to_integer ( year , ctx ) , conversions . to_integer ( month , ctx ) , conversions . to_integer ( day , ctx ) )
Defines a date value
51,017
def datedif ( ctx , start_date , end_date , unit ) : start_date = conversions . to_date ( start_date , ctx ) end_date = conversions . to_date ( end_date , ctx ) unit = conversions . to_string ( unit , ctx ) . lower ( ) if start_date > end_date : raise ValueError ( "Start date cannot be after end date" ) if unit == 'y' : return relativedelta ( end_date , start_date ) . years elif unit == 'm' : delta = relativedelta ( end_date , start_date ) return 12 * delta . years + delta . months elif unit == 'd' : return ( end_date - start_date ) . days elif unit == 'md' : return relativedelta ( end_date , start_date ) . days elif unit == 'ym' : return relativedelta ( end_date , start_date ) . months elif unit == 'yd' : return ( end_date - start_date . replace ( year = end_date . year ) ) . days raise ValueError ( "Invalid unit value: %s" % unit )
Calculates the number of days months or years between two dates .
51,018
def edate ( ctx , date , months ) : return conversions . to_date_or_datetime ( date , ctx ) + relativedelta ( months = conversions . to_integer ( months , ctx ) )
Moves a date by the given number of months
51,019
def time ( ctx , hours , minutes , seconds ) : return _time ( conversions . to_integer ( hours , ctx ) , conversions . to_integer ( minutes , ctx ) , conversions . to_integer ( seconds , ctx ) )
Defines a time value
51,020
def _abs ( ctx , number ) : return conversions . to_decimal ( abs ( conversions . to_decimal ( number , ctx ) ) , ctx )
Returns the absolute value of a number
51,021
def _int ( ctx , number ) : return conversions . to_integer ( conversions . to_decimal ( number , ctx ) . to_integral_value ( ROUND_FLOOR ) , ctx )
Rounds a number down to the nearest integer
51,022
def _max ( ctx , * number ) : if len ( number ) == 0 : raise ValueError ( "Wrong number of arguments" ) result = conversions . to_decimal ( number [ 0 ] , ctx ) for arg in number [ 1 : ] : arg = conversions . to_decimal ( arg , ctx ) if arg > result : result = arg return result
Returns the maximum value of all arguments
51,023
def mod ( ctx , number , divisor ) : number = conversions . to_decimal ( number , ctx ) divisor = conversions . to_decimal ( divisor , ctx ) return number - divisor * _int ( ctx , number / divisor )
Returns the remainder after number is divided by divisor
51,024
def _power ( ctx , number , power ) : return decimal_pow ( conversions . to_decimal ( number , ctx ) , conversions . to_decimal ( power , ctx ) )
Returns the result of a number raised to a power
51,025
def randbetween ( ctx , bottom , top ) : bottom = conversions . to_integer ( bottom , ctx ) top = conversions . to_integer ( top , ctx ) return random . randint ( bottom , top )
Returns a random integer number between the numbers you specify
51,026
def _round ( ctx , number , num_digits ) : number = conversions . to_decimal ( number , ctx ) num_digits = conversions . to_integer ( num_digits , ctx ) return decimal_round ( number , num_digits , ROUND_HALF_UP )
Rounds a number to a specified number of digits
51,027
def rounddown ( ctx , number , num_digits ) : number = conversions . to_decimal ( number , ctx ) num_digits = conversions . to_integer ( num_digits , ctx ) return decimal_round ( number , num_digits , ROUND_DOWN )
Rounds a number down toward zero
51,028
def roundup ( ctx , number , num_digits ) : number = conversions . to_decimal ( number , ctx ) num_digits = conversions . to_integer ( num_digits , ctx ) return decimal_round ( number , num_digits , ROUND_UP )
Rounds a number up away from zero
51,029
def _sum ( ctx , * number ) : if len ( number ) == 0 : raise ValueError ( "Wrong number of arguments" ) result = Decimal ( 0 ) for arg in number : result += conversions . to_decimal ( arg , ctx ) return result
Returns the sum of all arguments
51,030
def trunc ( ctx , number ) : return conversions . to_integer ( conversions . to_decimal ( number , ctx ) . to_integral_value ( ROUND_DOWN ) , ctx )
Truncates a number to an integer by removing the fractional part of the number
51,031
def _and ( ctx , * logical ) : for arg in logical : if not conversions . to_boolean ( arg , ctx ) : return False return True
Returns TRUE if and only if all its arguments evaluate to TRUE
51,032
def _if ( ctx , logical_test , value_if_true = 0 , value_if_false = False ) : return value_if_true if conversions . to_boolean ( logical_test , ctx ) else value_if_false
Returns one value if the condition evaluates to TRUE and another value if it evaluates to FALSE
51,033
def _or ( ctx , * logical ) : for arg in logical : if conversions . to_boolean ( arg , ctx ) : return True return False
Returns TRUE if any argument is TRUE
51,034
def compute_sum_values ( i , j , data1 , data2 ) : sum1_ij = 1. for idx , d in zip ( [ i , j ] , [ data1 , data2 ] ) : if isinstance ( d , field ) : sum1_ij *= d . wvalue [ idx ] elif isinstance ( d , points ) : sum1_ij *= d . weights [ idx ] else : raise NotImplementedError ( "data type not recognized" ) sum2_ij = data1 . weights [ i ] * data2 . weights [ j ] return sum1_ij , sum2_ij
Return the sum1_ij and sum2_ij values given the input indices and data instances .
51,035
def _setup ( self ) : dtype = [ ( 'inv' , 'f8' ) , ( 'min' , 'f8' ) , ( 'max' , 'f8' ) , ( 'N' , 'i4' ) , ( 'spacing' , 'object' ) ] dtype = numpy . dtype ( dtype ) self . _info = numpy . empty ( self . Ndim , dtype = dtype ) self . min = self . _info [ 'min' ] self . max = self . _info [ 'max' ] self . N = self . _info [ 'N' ] self . inv = self . _info [ 'inv' ] self . spacing = self . _info [ 'spacing' ] for i , dim in enumerate ( self . dims ) : self . N [ i ] = len ( self . edges [ i ] ) - 1 self . min [ i ] = self . edges [ i ] [ 0 ] self . max [ i ] = self . edges [ i ] [ - 1 ] self . spacing [ i ] = None lin_diff = numpy . diff ( self . edges [ i ] ) with numpy . errstate ( divide = 'ignore' , invalid = 'ignore' ) : log_diff = numpy . diff ( numpy . log10 ( self . edges [ i ] ) ) if numpy . allclose ( lin_diff , lin_diff [ 0 ] ) : self . spacing [ i ] = 'linspace' self . inv [ i ] = self . N [ i ] * 1.0 / ( self . max [ i ] - self . min [ i ] ) elif numpy . allclose ( log_diff , log_diff [ 0 ] ) : self . spacing [ i ] = 'logspace' self . inv [ i ] = self . N [ i ] * 1.0 / numpy . log10 ( self . max [ i ] / self . min [ i ] ) self . shape = self . N + 2 self . Rmax = self . max [ 0 ]
Set the binning info we need from the edges
51,036
def linear ( self , ** paircoords ) : N = len ( paircoords [ list ( paircoords . keys ( ) ) [ 0 ] ] ) integer = numpy . empty ( N , ( 'i8' , ( self . Ndim , ) ) ) . T for i , dim in enumerate ( self . dims ) : if self . spacing [ i ] == 'linspace' : x = paircoords [ dim ] - self . min [ i ] integer [ i ] = numpy . ceil ( x * self . inv [ i ] ) elif self . spacing [ i ] == 'logspace' : x = paircoords [ dim ] . copy ( ) x [ x == 0 ] = self . min [ i ] * 0.9 x = numpy . log10 ( x / self . min [ i ] ) integer [ i ] = numpy . ceil ( x * self . inv [ i ] ) elif self . spacing [ i ] is None : edge = self . edges if self . Ndim == 1 else self . edges [ i ] integer [ i ] = numpy . searchsorted ( edge , paircoords [ dim ] , side = 'left' ) return numpy . ravel_multi_index ( integer , self . shape , mode = 'clip' )
Linearize bin indices .
51,037
def update_sums ( self , r , i , j , data1 , data2 , sum1 , sum2 , N = None , centers_sum = None ) : sum1_ij , sum2_ij = compute_sum_values ( i , j , data1 , data2 ) digr = self . digitize ( r , i , j , data1 , data2 ) if len ( digr ) == 3 and isinstance ( digr [ 1 ] , dict ) : dig , paircoords , weights = digr elif len ( digr ) == 2 and isinstance ( digr [ 1 ] , dict ) : dig , paircoords = digr weights = None else : dig = digr paircoords = None weights = None def add_one_channel ( sum1c , sum1_ijc ) : if numpy . isscalar ( sum1_ijc ) or sum1_ijc . ndim == 1 : sum1c . flat [ : ] += utils . bincount ( dig , sum1_ijc , minlength = sum1c . size ) else : for d in range ( sum1c . shape [ 0 ] ) : sum1c [ d ] . flat [ : ] += utils . bincount ( dig , sum1_ijc [ ... , d ] , minlength = sum1c [ d ] . size ) if self . channels : if weights is None : raise RuntimeError ( "`digitize` of multi channel paircount did not return a weight array for the channels" ) sum1_ij = weights * sum1_ij for ichannel in range ( len ( self . channels ) ) : add_one_channel ( sum1 [ ichannel ] , sum1_ij [ ichannel ] ) else : add_one_channel ( sum1 , sum1_ij ) if not numpy . isscalar ( sum2 ) : sum2 . flat [ : ] += utils . bincount ( dig , sum2_ij , minlength = sum2 . size ) if N is not None : if not paircoords : raise RuntimeError ( "Bin center is requested but not returned by digitize" ) self . _update_mean_coords ( dig , N , centers_sum , ** paircoords )
The main function that digitizes the pair counts calls bincount for the appropriate sum1 and sum2 values and adds them to the input arrays
51,038
def sum_shapes ( self , data1 , data2 ) : linearshape = [ - 1 ] + list ( self . shape ) subshapes = [ list ( d . subshape ) for d in [ data1 , data2 ] if isinstance ( d , field ) ] subshape = [ ] if len ( subshapes ) == 2 : assert subshapes [ 0 ] == subshapes [ 1 ] subshape = subshapes [ 0 ] elif len ( subshapes ) == 1 : subshape = subshapes [ 0 ] fullshape = subshape + list ( self . shape ) if self . channels : fullshape = [ len ( self . channels ) ] + fullshape return linearshape , fullshape
Return the shapes of the summation arrays given the input data and shape of the bins
51,039
def _update_mean_coords ( self , dig , N , centers_sum , ** paircoords ) : if N is None or centers_sum is None : return N . flat [ : ] += utils . bincount ( dig , 1. , minlength = N . size ) for i , dim in enumerate ( self . dims ) : size = centers_sum [ i ] . size centers_sum [ i ] . flat [ : ] += utils . bincount ( dig , paircoords [ dim ] , minlength = size )
Update the mean coordinate sums
51,040
def work ( self , i ) : n1 , n2 = self . p [ i ] sum1 = numpy . zeros_like ( self . sum1g ) sum2 = 1. if not self . pts_only : sum2 = numpy . zeros_like ( self . sum2g ) if self . compute_mean_coords : N = numpy . zeros_like ( self . N ) centers_sum = [ numpy . zeros_like ( c ) for c in self . centers ] else : N = None centers_sum = None if self . bins . enable_fast_node_count : sum1attrs = [ d . attr for d in self . data ] counts , sum1c = n1 . count ( n2 , self . bins . edges , attrs = sum1attrs ) sum1 [ ... , : - 1 ] = sum1c sum1 [ ... , - 1 ] = 0 else : def callback ( r , i , j ) : self . bins . update_sums ( r , i , j , self . data [ 0 ] , self . data [ 1 ] , sum1 , sum2 , N = N , centers_sum = centers_sum ) n1 . enum ( n2 , self . bins . Rmax , process = callback ) if not self . compute_mean_coords : return sum1 , sum2 else : return sum1 , sum2 , N , centers_sum
Internal function that performs the pair - counting
51,041
def reduce ( self , sum1 , sum2 , * args ) : self . sum1g [ ... ] += sum1 if not self . pts_only : self . sum2g [ ... ] += sum2 if self . compute_mean_coords : N , centers_sum = args self . N [ ... ] += N for i in range ( self . bins . Ndim ) : self . centers [ i ] [ ... ] += centers_sum [ i ]
The internal reduce function that sums the results from various processors
51,042
def psisloo ( log_lik , ** kwargs ) : r kwargs [ 'overwrite_lw' ] = True lw = - log_lik lw , ks = psislw ( lw , ** kwargs ) lw += log_lik loos = sumlogs ( lw , axis = 0 ) loo = loos . sum ( ) return loo , loos , ks
r PSIS leave - one - out log predictive densities .
51,043
def gpinv ( p , k , sigma ) : x = np . empty ( p . shape ) x . fill ( np . nan ) if sigma <= 0 : return x ok = ( p > 0 ) & ( p < 1 ) if np . all ( ok ) : if np . abs ( k ) < np . finfo ( float ) . eps : np . negative ( p , out = x ) np . log1p ( x , out = x ) np . negative ( x , out = x ) else : np . negative ( p , out = x ) np . log1p ( x , out = x ) x *= - k np . expm1 ( x , out = x ) x /= k x *= sigma else : if np . abs ( k ) < np . finfo ( float ) . eps : temp = p [ ok ] np . negative ( temp , out = temp ) np . log1p ( temp , out = temp ) np . negative ( temp , out = temp ) x [ ok ] = temp else : temp = p [ ok ] np . negative ( temp , out = temp ) np . log1p ( temp , out = temp ) temp *= - k np . expm1 ( temp , out = temp ) temp /= k x [ ok ] = temp x *= sigma x [ p == 0 ] = 0 if k >= 0 : x [ p == 1 ] = np . inf else : x [ p == 1 ] = - sigma / k return x
Inverse Generalised Pareto distribution function .
51,044
def sumlogs ( x , axis = None , out = None ) : maxx = x . max ( axis = axis , keepdims = True ) xnorm = x - maxx np . exp ( xnorm , out = xnorm ) out = np . sum ( xnorm , axis = axis , out = out ) if isinstance ( out , np . ndarray ) : np . log ( out , out = out ) else : out = np . log ( out ) out += np . squeeze ( maxx ) return out
Sum of vector where numbers are represented by their logarithms .
51,045
def build_listing ( self ) : def func_entry ( name , func ) : args , varargs , defaults = self . _get_arg_spec ( func ) params = [ { 'name' : str ( a ) , 'optional' : a in defaults , 'vararg' : False } for a in args if a != 'ctx' ] if varargs : params += [ { 'name' : str ( varargs ) , 'optional' : False , 'vararg' : True } ] return { 'name' : str ( name . upper ( ) ) , 'description' : str ( func . __doc__ ) . strip ( ) , 'params' : params } listing = [ func_entry ( f_name , f ) for f_name , f in self . _functions . items ( ) ] return sorted ( listing , key = lambda l : l [ 'name' ] )
Builds a listing of all functions sorted A - Z with their names and descriptions
51,046
def _get_arg_spec ( func ) : args , varargs , keywords , defaults = inspect . getargspec ( func ) if defaults is None : defaults = { } else : defaulted_args = args [ - len ( defaults ) : ] defaults = { name : val for name , val in zip ( defaulted_args , defaults ) } return args , varargs , defaults
Gets the argument spec of the given function returning defaults as a dict of param names to values
51,047
def check_ocrmypdf ( input_file , output_file , * args , env = None ) : "Run ocrmypdf and confirmed that a valid file was created" p , out , err = run_ocrmypdf ( input_file , output_file , * args , env = env ) if p . returncode != 0 : print ( 'stdout\n======' ) print ( out ) print ( 'stderr\n======' ) print ( err ) return output_file
Run ocrmypdf and confirmed that a valid file was created
51,048
def run_ocrmypdf ( input_file , output_file , * args , env = None ) : "Run ocrmypdf and let caller deal with results" if env is None : env = os . environ p_args = OCRMYPDF + list ( args ) + [ input_file , output_file ] p = Popen ( p_args , close_fds = True , stdout = PIPE , stderr = PIPE , universal_newlines = True , env = env ) out , err = p . communicate ( ) return p , out , err
Run ocrmypdf and let caller deal with results
51,049
def standardizeMapName ( mapName ) : newName = os . path . basename ( mapName ) newName = newName . split ( "." ) [ 0 ] newName = newName . split ( "(" ) [ 0 ] newName = re . sub ( "[LT]E+$" , "" , newName ) newName = re . sub ( "-" , "" , newName ) newName = re . sub ( ' ' , '' , newName , flags = re . UNICODE ) foreignName = newName if foreignName in c . mapNameTranslations : return c . mapNameTranslations [ foreignName ] return newName
pretty - fy the name for pysc2 map lookup
51,050
def env_info ( ) : v = sys . version_info pyver = f'Python {v.major}.{v.minor}.{v.micro}' if v . releaselevel == 'alpha' : pyver += 'a' if v . releaselevel == 'beta' : pyver += 'b' if v . releaselevel == 'candidate' : pyver += 'rc' if v . releaselevel != 'final' : pyver += str ( v . serial ) return f'{pyver} (env: {sys.prefix})'
Returns a string that contains the Python version and runtime path .
51,051
def dict2kvlist ( o ) : return chain . from_iterable ( ( k , v ) for k , v in o . items ( ) )
Serializes a dict - like object into a generator of the flatten list of repeating key - value pairs . It is useful when using HMSET method in Redis .
51,052
def setuptools_entry ( dist , keyword , value ) : if not value : return version = get_version ( ) if dist . metadata . version is not None : s = "Ignoring explicit version='{0}' in setup.py, using '{1}' instead" warnings . warn ( s . format ( dist . metadata . version , version ) ) dist . metadata . version = version ExistingCustomBuildPy = dist . cmdclass . get ( 'build_py' , object ) class KatVersionBuildPy ( AddVersionToInitBuildPy , ExistingCustomBuildPy ) : dist . cmdclass [ 'build_py' ] = KatVersionBuildPy ExistingCustomSdist = dist . cmdclass . get ( 'sdist' , object ) class KatVersionSdist ( AddVersionToInitSdist , ExistingCustomSdist ) : dist . cmdclass [ 'sdist' ] = KatVersionSdist
Setuptools entry point for setting version and baking it into package .
51,053
def is_ ( self , state ) : translator = self . _meta [ 'translator' ] state = translator . translate ( state ) return self . actual_state == state
Check if machine is in given state .
51,054
def can_be_ ( self , state ) : translator = self . _meta [ 'translator' ] state = translator . translate ( state ) if self . _meta [ 'complete' ] : return True if self . actual_state is None : return True transitions = self . _meta [ 'transitions' ] [ self . actual_state ] return state in transitions
Check if machine can transit to given state .
51,055
def force_set ( self , state ) : translator = self . _meta [ 'translator' ] state = translator . translate ( state ) attr = self . _meta [ 'state_attribute_name' ] setattr ( self , attr , state )
Set new state without checking if transition is allowed .
51,056
def set_ ( self , state ) : if not self . can_be_ ( state ) : state = self . _meta [ 'translator' ] . translate ( state ) raise TransitionError ( "Cannot transit from '{actual_value}' to '{value}'." . format ( actual_value = self . actual_state . value , value = state . value ) ) self . force_set ( state )
Set new state for machine .
51,057
def generate_getter ( value ) : @ property @ wraps ( is_ ) def getter ( self ) : return self . is_ ( value ) return getter
Generate getter for given value .
51,058
def generate_checker ( value ) : @ property @ wraps ( can_be_ ) def checker ( self ) : return self . can_be_ ( value ) return checker
Generate state checker for given value .
51,059
def generate_setter ( value ) : @ wraps ( set_ ) def setter ( self ) : self . set_ ( value ) return setter
Generate setter for given value .
51,060
def translate ( self , value ) : if self . _check_if_already_proper ( value ) : return value try : return self . search_table [ value ] except KeyError : raise ValueError ( "Value {value} doesn't match any state." . format ( value = value ) )
Translate value to enum instance .
51,061
def _query ( self , text ) : params = ( ( 'v' , self . api_version ) , ( 'query' , text ) , ( 'lang' , self . language ) , ( 'sessionId' , self . session_id ) , ( 'timezone' , self . timezone ) , ) if self . query_response : self . previous_query_response = self . query_response self . query_response = result = self . session . get ( url = self . query_url , params = params ) . json ( ) return result
Takes natural language text and information as query parameters and returns information as JSON .
51,062
def is_venv ( ) : dir_path = os . path . dirname ( SRC ) is_venv_flag = True if SYS_NAME == "Windows" : executable_list = [ "activate" , "pip.exe" , "python.exe" ] elif SYS_NAME in [ "Darwin" , "Linux" ] : executable_list = [ "activate" , "pip" , "python" ] for executable in executable_list : path = os . path . join ( dir_path , BIN_SCRIPTS , executable ) if not os . path . exists ( path ) : is_venv_flag = False return is_venv_flag
Check whether if this workspace is a virtualenv .
51,063
def find_linux_venv_py_version ( ) : available_python_version = [ "python2.6" , "python2.7" , "python3.3" , "python3.4" , "python3.5" , "python3.6" , ] dir_path = os . path . dirname ( SRC ) for basename in os . listdir ( os . path . join ( dir_path , BIN_SCRIPTS ) ) : for python_version in available_python_version : if python_version in basename : return python_version raise Exception ( "Can't find virtualenv python version!" )
Find python version name used in this virtualenv .
51,064
def find_venv_DST ( ) : dir_path = os . path . dirname ( SRC ) if SYS_NAME == "Windows" : DST = os . path . join ( dir_path , "Lib" , "site-packages" , PKG_NAME ) elif SYS_NAME in [ "Darwin" , "Linux" ] : python_version = find_linux_venv_py_version ( ) DST = os . path . join ( dir_path , "lib" , python_version , "site-packages" , PKG_NAME ) return DST
Find where this package should be installed to in this virtualenv .
51,065
def find_DST ( ) : if SYS_NAME == "Windows" : return os . path . join ( site . getsitepackages ( ) [ 1 ] , PKG_NAME ) elif SYS_NAME in [ "Darwin" , "Linux" ] : return os . path . join ( site . getsitepackages ( ) [ 0 ] , PKG_NAME )
Find where this package should be installed to .
51,066
def md5_of_file ( abspath ) : chunk_size = 1024 * 1024 m = hashlib . md5 ( ) with open ( abspath , "rb" ) as f : while True : data = f . read ( chunk_size ) if not data : break m . update ( data ) return m . hexdigest ( )
Md5 value of a file .
51,067
def check_need_install ( ) : need_install_flag = False for root , _ , basename_list in os . walk ( SRC ) : if os . path . basename ( root ) != "__pycache__" : for basename in basename_list : src = os . path . join ( root , basename ) dst = os . path . join ( root . replace ( SRC , DST ) , basename ) if os . path . exists ( dst ) : if md5_of_file ( src ) != md5_of_file ( dst ) : return True else : return True return need_install_flag
Check if installed package are exactly the same to this one . By checking md5 value of all files .
51,068
def is_git ( path ) : try : repo_dir = run_cmd ( path , 'git' , 'rev-parse' , '--git-dir' ) return True if repo_dir else False except ( OSError , RuntimeError ) : return False
Return True if this is a git repo .
51,069
def date_version ( scm = None ) : dt = str ( time . strftime ( '%Y%m%d%H%M' ) ) if scm : version = "0.0+unknown.{0}.{1}" . format ( scm , dt ) else : version = "0.0+unknown." + dt return version
Generate a version string based on the SCM type and the date .
51,070
def get_git_cleaned_branch_name ( path ) : branch_name = run_cmd ( path , 'git' , 'rev-parse' , '--abbrev-ref' , 'HEAD' ) branch_name = re . sub ( r"[^A-Za-z0-9]+" , "." , branch_name . strip ( ) ) return branch_name
Get the git branch name of the current HEAD in path . The branch name is scrubbed to conform to PEP - 440 .
51,071
def get_git_version ( path ) : branch_name = get_git_cleaned_branch_name ( path ) mods = run_cmd ( path , 'git' , 'status' , '--porcelain' , '--untracked-files=no' ) dirty = '.dirty' if mods else '' git_output = run_cmd ( path , 'git' , 'log' , '--pretty="%h%d"' ) commits = git_output . strip ( ) . replace ( '"' , '' ) . split ( '\n' ) num_commits_since_branch = len ( commits ) short_commit_name = commits [ 0 ] . partition ( ' ' ) [ 0 ] valid_version = re . compile ( r'^v?([\.\d]+)$' ) def tagged_version ( commit ) : refs = commit . partition ( ' ' ) [ 2 ] for ref in refs . lstrip ( '(' ) . rstrip ( ')' ) . split ( ', ' ) : if ref . startswith ( 'tag: ' ) : tag = ref [ 5 : ] . lower ( ) found = valid_version . match ( tag ) if found : return [ int ( v ) for v in found . group ( 1 ) . split ( '.' ) if v ] return [ ] for commit in commits : version_numbers = tagged_version ( commit ) if version_numbers : break else : version_numbers = [ 0 , 0 ] release = ( commit == commits [ 0 ] ) and not dirty if not release : version_numbers [ - 1 ] += 1 version = '.' . join ( [ str ( v ) for v in version_numbers ] ) if not release : version = ( "%s.dev%d+%s.%s%s" % ( version , num_commits_since_branch , branch_name , short_commit_name , dirty ) ) return version
Get the GIT version .
51,072
def get_version_from_scm ( path = None ) : if is_git ( path ) : return 'git' , get_git_version ( path ) elif is_svn ( path ) : return 'svn' , get_svn_version ( path ) return None , None
Get the current version string of this package using SCM tool .
51,073
def get_version_from_module ( module ) : if module is not None : module = str ( module ) . split ( '.' , 1 ) [ 0 ] try : package = pkg_resources . get_distribution ( module ) return package . version except pkg_resources . DistributionNotFound : pass
Use pkg_resources to get version of installed module by name .
51,074
def _must_decode ( value ) : if type ( value ) is bytes : try : return value . decode ( 'utf-8' ) except UnicodeDecodeError : return value . decode ( 'latin1' ) return value
Copied from pkginfo 1 . 4 . 1 _compat module .
51,075
def get_version_from_unpacked_sdist ( path ) : try : with open ( os . path . join ( path , 'PKG-INFO' ) ) as f : data = f . read ( ) except IOError : return fp = StringIO ( _must_decode ( data ) ) msg = Parser ( ) . parse ( fp ) value = msg . get ( 'Version' ) if value != 'UNKNOWN' : return value
Assume path points to unpacked source distribution and get version .
51,076
def get_version_from_file ( path ) : filename = os . path . join ( path , VERSION_FILE ) if not os . path . isfile ( filename ) : filename = os . path . join ( os . path . dirname ( path ) , VERSION_FILE ) if not os . path . isfile ( filename ) : filename = '' if filename : with open ( filename ) as fh : version = fh . readline ( ) . strip ( ) if version : return version
Find the VERSION_FILE and return its contents .
51,077
def normalised ( version ) : norm_version = pkg_resources . parse_version ( version ) if not isinstance ( norm_version , tuple ) : return str ( norm_version ) else : public , sep , local = version . lower ( ) . partition ( '+' ) if len ( public ) >= 2 : if public [ 0 ] == 'v' and public [ 1 ] in '0123456789' : public = public [ 1 : ] local = NON_ALPHANUMERIC . sub ( '.' , local ) return public + sep + local
Normalise a version string according to PEP 440 if possible .
51,078
def get_version ( path = None , module = None ) : version = get_version_from_module ( module ) if version : return normalised ( version ) if path is None : path = os . getcwd ( ) path = os . path . abspath ( path ) if os . path . exists ( path ) and not os . path . isdir ( path ) : path = os . path . dirname ( path ) if not os . path . isdir ( path ) : raise ValueError ( 'No such package source directory: %r' % ( path , ) ) version = get_version_from_unpacked_sdist ( path ) if version : return normalised ( version ) scm , version = get_version_from_scm ( path ) if version : return normalised ( version ) version = get_version_from_file ( path ) if version : return normalised ( version ) return normalised ( date_version ( scm ) )
Return the version string .
51,079
def _sane_version_list ( version ) : v0 = str ( version [ 0 ] ) if v0 : try : v0 = v0 . lstrip ( "v" ) . lstrip ( "V" ) v0 = int ( v0 ) except ValueError : v0 = None if v0 is None : version = [ 0 , 0 ] + version else : version [ 0 ] = v0 try : version [ 1 ] = int ( version [ 1 ] ) except ValueError : version = [ version [ 0 ] , 0 ] + version [ 1 : ] return version
Ensure the major and minor are int .
51,080
def get_version_list ( path = None , module = None ) : major = 0 minor = 0 patch = '' ver = get_version ( path , module ) if ver is not None : ver_segments = _sane_version_list ( ver . split ( "." , 2 ) ) major = ver_segments [ 0 ] minor = ver_segments [ 1 ] patch = "." . join ( ver_segments [ 2 : ] ) return [ None , major , minor , patch ]
Return the version information as a tuple .
51,081
def build_info ( name , path = None , module = None ) : verlist = get_version_list ( path , module ) verlist [ 0 ] = name return tuple ( verlist )
Return the build info tuple .
51,082
def find ( self , groupid ) : return self . indices [ self . offset [ groupid ] : self . offset [ groupid ] + self . length [ groupid ] ]
return all of the indices of particles of groupid
51,083
def sum ( self , weights = None ) : if weights is None : weights = self . data . weights return utils . bincount ( self . labels , weights , self . N )
return the sum of weights of each object
51,084
def center ( self , weights = None ) : if weights is None : weights = self . data . weights mass = utils . bincount ( self . labels , weights , self . N ) cp = numpy . empty ( ( len ( mass ) , self . data . pos . shape [ - 1 ] ) , 'f8' ) for d in range ( self . data . pos . shape [ - 1 ] ) : cp [ ... , d ] = utils . bincount ( self . labels , weights * self . data . pos [ ... , d ] , self . N ) cp [ ... , d ] /= mass return cp
return the center of each object
51,085
def getParamValues ( self , paramName = None , paramId = None , useOnlyValids = True ) : if not paramName is None : if not paramId is None : if getParameterTypeNameFromID ( paramId ) != paramName : raise ValueError ( "Parameters paramId and paramName " + "passed to ParamSample.getParamValues() are incompatible." ) else : if paramId is None : raise ValueError ( "At least one of the attribute paramName and paramId " + "passed to ParamSample.getParamValues() most not be None." ) paramName = getParameterTypeNameFromID ( paramId ) df = self . sampleDF if useOnlyValids : df = df [ df [ "isValid" ] == True ] df . loc [ : , "paramNames" ] = [ getParameterTypeNameFromID ( param . typeId ) for param in df [ "obj_parameter" ] ] return df [ df [ "paramNames" ] == paramName ]
Return the rows of sampleDF that are associated to the parameter specified in paramName .
51,086
def interpolate ( self , interpValues ) : self . __operations . append ( [ "interpolate" , interpValues ] ) df = self . sampleDF self . interpValues = interpValues for interParamName , value in interpValues . items ( ) : self . __report += "Interpolation of the parameters for independent variables '" + interParamName + "' at value " + str ( value ) + ".\n" for ind , ( paramTrace , resType ) in enumerate ( zip ( df [ "obj_parameter" ] , df [ "Result type" ] ) ) : if resType == "numericalTrace" and interParamName in paramTrace . indepNames : val = paramTrace . getInterp1dValues ( value , interParamName , statsToReturn = [ "mean" ] ) if isinstance ( val , list ) : if len ( val ) == 1 : val = val [ 0 ] else : raise ValueError ( "This case has not been implemented yet." ) df . loc [ ind , "Values" ] = float ( val )
interpValues should be a dictionnary where the keys are the parameter names for the independant variable for which interpolation should be run and the values are the value to which the parameter should be interpolated .
51,087
def loo_compare ( psisloo1 , psisloo2 ) : loores = psisloo1 . pointwise . join ( psisloo2 . pointwise , lsuffix = '_m1' , rsuffix = '_m2' ) loores [ 'pw_diff' ] = loores . pointwise_elpd_m2 - loores . pointwise_elpd_m1 sum_elpd_diff = loores . apply ( numpy . sum ) . pw_diff sd_elpd_diff = loores . apply ( numpy . std ) . pw_diff elpd_diff = { 'diff' : sum_elpd_diff , 'se_diff' : math . sqrt ( len ( loores . pw_diff ) ) * sd_elpd_diff } return elpd_diff
Compares two models using pointwise approximate leave - one - out cross validation . For the method to be valid the two models should have been fit on the same input data .
51,088
def plot ( self ) : seaborn . pointplot ( y = self . pointwise . pareto_k , x = self . pointwise . index , join = False )
Graphical summary of pointwise pareto - k importance - sampling indices
51,089
def get_isotopic_ratio ( self , compound = '' , element = '' ) : _stack = self . stack compound = str ( compound ) if compound == '' : _list_compounds = _stack . keys ( ) list_all_dict = { } for _compound in _list_compounds : _compound = str ( _compound ) _list_element = _stack [ _compound ] [ 'elements' ] list_all_dict [ _compound ] = { } for _element in _list_element : list_all_dict [ _compound ] [ _element ] = self . get_isotopic_ratio ( compound = _compound , element = _element ) return list_all_dict list_compounds = _stack . keys ( ) if compound not in list_compounds : list_compounds_joined = ', ' . join ( list_compounds ) raise ValueError ( "Compound '{}' could not be find in {}" . format ( compound , list_compounds_joined ) ) if element == '' : element = compound list_element = _stack [ compound ] . keys ( ) if element not in list_element : list_element_joined = ', ' . join ( list_element ) raise ValueError ( "Element '{}' should be any of those elements: {}" . format ( element , list_element_joined ) ) list_istopes = _stack [ compound ] [ element ] [ 'isotopes' ] [ 'list' ] list_ratio = _stack [ compound ] [ element ] [ 'isotopes' ] [ 'isotopic_ratio' ] iso_ratio = zip ( list_istopes , list_ratio ) _stoichiometric_ratio = { } for _iso , _ratio in iso_ratio : _stoichiometric_ratio [ _iso ] = _ratio return _stoichiometric_ratio
returns the list of isotopes for the element of the compound defined with their stoichiometric values
51,090
def get_density ( self , compound = '' , element = '' ) : _stack = self . stack if compound == '' : _list_compounds = _stack . keys ( ) list_all_dict = { } for _compound in _list_compounds : _list_element = _stack [ _compound ] [ 'elements' ] list_all_dict [ _compound ] = { } for _element in _list_element : list_all_dict [ _compound ] [ _element ] = self . get_density ( compound = _compound , element = _element ) return list_all_dict list_compounds = _stack . keys ( ) if compound not in list_compounds : list_compounds_joined = ', ' . join ( list_compounds ) raise ValueError ( "Compound '{}' could not be find in {}" . format ( compile , list_compounds_joined ) ) if element == '' : element = compound list_element = _stack [ compound ] . keys ( ) if element not in list_element : list_element_joined = ', ' . join ( list_element ) raise ValueError ( "Element '{}' should be any of those elements: {}" . format ( element , list_element_joined ) ) return _stack [ compound ] [ element ] [ 'density' ] [ 'value' ]
returns the list of isotopes for the element of the compound defined with their density
51,091
def __calculate_atoms_per_cm3 ( self , used_lock = False ) : stack = self . stack _density_lock = self . density_lock for _name_of_compound in stack . keys ( ) : if used_lock and _density_lock [ _name_of_compound ] : continue molar_mass_layer , atoms_per_cm3_layer = _utilities . get_atoms_per_cm3_of_layer ( compound_dict = stack [ _name_of_compound ] ) stack [ _name_of_compound ] [ 'molar_mass' ] = { 'value' : molar_mass_layer , 'units' : 'g/mol' } stack [ _name_of_compound ] [ 'atoms_per_cm3' ] = atoms_per_cm3_layer for _index , _name_of_ele in enumerate ( stack [ _name_of_compound ] [ 'elements' ] ) : stack [ _name_of_compound ] [ _name_of_ele ] [ 'atoms_per_cm3' ] = atoms_per_cm3_layer * stack [ _name_of_compound ] [ 'stoichiometric_ratio' ] [ _index ] self . stack = stack
calculate for each element the atoms per cm3
51,092
def __update_stack_with_isotopes_infos ( self , stack : dict ) : for _key in stack : _elements = stack [ _key ] [ 'elements' ] for _element in _elements : _dict = _utilities . get_isotope_dicts ( element = _element , database = self . database ) stack [ _key ] [ _element ] = _dict stack = self . __fill_missing_keys ( stack = stack ) return stack
retrieve the isotopes isotopes file names mass and atomic_ratio from each element in stack
51,093
def __update_layer_density ( self ) : _stack = self . stack _density_lock = self . density_lock list_compound = _stack . keys ( ) for _key in list_compound : if _density_lock [ _key ] : continue _list_ratio = _stack [ _key ] [ 'stoichiometric_ratio' ] _list_density = [ ] for _element in _stack [ _key ] [ 'elements' ] : _list_density . append ( _stack [ _key ] [ _element ] [ 'density' ] [ 'value' ] ) _compound_density = _utilities . get_compound_density ( list_density = _list_density , list_ratio = _list_ratio ) _stack [ _key ] [ 'density' ] [ 'value' ] = _compound_density self . stack = _stack
calculate or update the layer density
51,094
def __update_molar_mass ( self , compound = '' , element = '' ) : _molar_mass_element = 0 list_ratio = self . stack [ compound ] [ element ] [ 'isotopes' ] [ 'isotopic_ratio' ] list_mass = self . stack [ compound ] [ element ] [ 'isotopes' ] [ 'mass' ] [ 'value' ] ratio_mass = zip ( list_ratio , list_mass ) for _ratio , _mass in ratio_mass : _molar_mass_element += np . float ( _ratio ) * np . float ( _mass ) self . stack [ compound ] [ element ] [ 'molar_mass' ] [ 'value' ] = _molar_mass_element
Re - calculate the molar mass of the element given due to stoichiometric changes
51,095
def main ( ) : parser = argparse . ArgumentParser ( description = 'Compose a yaml file.' ) parser . add_argument ( 'root' , type = argparse . FileType ( 'r' ) , help = 'The root yaml file to compose.' ) args = parser . parse_args ( ) result = yaml . load ( args . root , Loader = ComposeLoader ) print ( yaml . dump ( result ) )
Builds a yaml file
51,096
def _parse ( self , text , mode ) : if text is None or not text . strip ( ) : return None if len ( text ) >= 16 : try : parsed = iso8601 . parse_date ( text , default_timezone = None ) if not parsed . tzinfo : parsed = self . _timezone . localize ( parsed ) return parsed except iso8601 . ParseError : pass tokens = regex . findall ( r'([0-9]+|[^\W\d]+)' , text , flags = regex . MULTILINE | regex . UNICODE | regex . V0 ) token_possibilities = [ ] for token in tokens : possibilities = self . _get_token_possibilities ( token , mode ) if len ( possibilities ) > 0 : token_possibilities . append ( possibilities ) sequences = self . _get_possible_sequences ( mode , len ( token_possibilities ) , self . _date_style ) for sequence in sequences : match = OrderedDict ( ) for c in range ( len ( sequence ) ) : component = sequence [ c ] value = token_possibilities [ c ] . get ( component , None ) match [ component ] = value if value is None : break else : obj = self . _make_result ( match , self . _now , self . _timezone ) if obj is not None : return obj return None
Returns a date datetime or time depending on what information is available
51,097
def negative_report ( binary_report , sha256hash , project , patch_file ) : report_url = binary_report [ 'permalink' ] scan_date = binary_report [ 'scan_date' ] logger . info ( 'File scan date for %s shows a clean status on: %s' , patch_file , scan_date ) logger . info ( 'Full report avaliable here: %s' , report_url ) logger . info ( 'The following sha256 hash can be used in your %s.yaml file to suppress this scan:' , project ) logger . info ( '%s' , sha256hash ) with open ( reports_dir + "binaries-" + project + ".log" , "a" ) as gate_report : gate_report . write ( 'Non Whitelisted Binary: {}\n' . format ( patch_file ) ) gate_report . write ( 'File scan date for {} shows a clean status on {}\n' . format ( patch_file , scan_date ) ) gate_report . write ( 'The following sha256 hash can be used in your {}.yaml file to suppress this scan:\n' . format ( project ) ) gate_report . write ( '{}\n' . format ( sha256hash ) )
If no match is made and file is clean
51,098
def positive_report ( binary_report , sha256hash , project , patch_file ) : failure = True report_url = binary_report [ 'permalink' ] scan_date = binary_report [ 'scan_date' ] logger . error ( "Virus Found!" ) logger . info ( 'File scan date for %s shows a infected status on: %s' , patch_file , scan_date ) logger . info ( 'Full report avaliable here: %s' , report_url )
If a Positive match is found
51,099
def average_gradient ( data , * kwargs ) : return np . average ( np . array ( np . gradient ( data ) ) ** 2 )
Compute average gradient norm of an image