idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
11,300
def append_data ( self , data_buffer ) : if len ( data_buffer ) % ( self . sample_width * self . channels ) != 0 : raise ValueError ( "length of data_buffer must be a multiple of (sample_width * channels)" ) self . _buffer += data_buffer self . _left += len ( data_buffer )
Append data to this audio stream
11,301
def user_post_save ( sender , ** kwargs ) : if kwargs . get ( "raw" , False ) : return False user , created = kwargs [ "instance" ] , kwargs [ "created" ] disabled = getattr ( user , "_disable_account_creation" , not settings . ACCOUNT_CREATE_ON_SAVE ) if created and not disabled : Account . create ( user = user )
After User . save is called we check to see if it was a created user . If so we check if the User object wants account creation . If all passes we create an Account object .
11,302
def check_password_expired ( user ) : if not settings . ACCOUNT_PASSWORD_USE_HISTORY : return False if hasattr ( user , "password_expiry" ) : expiry = user . password_expiry . expiry else : expiry = settings . ACCOUNT_PASSWORD_EXPIRY if expiry == 0 : return False try : latest = user . password_history . latest ( "timestamp" ) except PasswordHistory . DoesNotExist : return False now = datetime . datetime . now ( tz = pytz . UTC ) expiration = latest . timestamp + datetime . timedelta ( seconds = expiry ) if expiration < now : return True else : return False
Return True if password is expired and system is using password expiration False otherwise .
11,303
def login_required ( func = None , redirect_field_name = REDIRECT_FIELD_NAME , login_url = None ) : def decorator ( view_func ) : @ functools . wraps ( view_func , assigned = available_attrs ( view_func ) ) def _wrapped_view ( request , * args , ** kwargs ) : if is_authenticated ( request . user ) : return view_func ( request , * args , ** kwargs ) return handle_redirect_to_login ( request , redirect_field_name = redirect_field_name , login_url = login_url ) return _wrapped_view if func : return decorator ( func ) return decorator
Decorator for views that checks that the user is logged in redirecting to the log in page if necessary .
11,304
def add_next ( self , url , context ) : if all ( [ key in context for key in [ "redirect_field_name" , "redirect_field_value" ] ] ) : if context [ "redirect_field_value" ] : url += "?" + urlencode ( { context [ "redirect_field_name" ] : context [ "redirect_field_value" ] , } ) return url
With both redirect_field_name and redirect_field_value available in the context add on a querystring to handle next redirecting .
11,305
def _verify ( self , request , return_payload = False , verify = True , raise_missing = False , request_args = None , request_kwargs = None , * args , ** kwargs ) : if "permakey" in request . headers : permakey = request . headers . get ( "permakey" ) payload = self . _decode ( permakey , verify = verify ) if return_payload : return payload user_id = payload . get ( "user_id" , None ) user = userid_table . get ( user_id ) if not user_id or not user : is_valid = False reason = "No user found" status = 401 else : is_valid = user . permakey == permakey reason = None if is_valid else "Permakey mismatch" status = 200 if is_valid else 401 return is_valid , status , reason else : return super ( ) . _verify ( request = request , return_payload = return_payload , verify = verify , raise_missing = raise_missing , request_args = request_args , request_kwargs = request_kwargs , * args , ** kwargs )
If there is a permakey then we will verify the token by checking the database . Otherwise just do the normal verification .
11,306
def get ( self , item ) : if item in self : item = getattr ( self , item ) return item ( )
Helper method to avoid calling getattr
11,307
def extract_presets ( app_config ) : return { x . lower ( ) [ 10 : ] : app_config . get ( x ) for x in filter ( lambda x : x . startswith ( "SANIC_JWT" ) , app_config ) }
Pull the application s configurations for Sanic JWT
11,308
def initialize ( * args , ** kwargs ) : if len ( args ) > 1 : kwargs . update ( { "authenticate" : args [ 1 ] } ) return Initialize ( args [ 0 ] , ** kwargs )
Functional approach to initializing Sanic JWT . This was the original method but was replaced by the Initialize class . It is recommended to use the class because it is more flexible . There is no current plan to remove this method but it may be depracated in the future .
11,309
def __check_deprecated ( self ) : if "SANIC_JWT_HANDLER_PAYLOAD_SCOPES" in self . app . config : raise exceptions . InvalidConfiguration ( "SANIC_JWT_HANDLER_PAYLOAD_SCOPES has been deprecated. " "Instead, pass your handler method (not an import path) as " "initialize(add_scopes_to_payload=my_scope_extender)" ) if "SANIC_JWT_PAYLOAD_HANDLER" in self . app . config : raise exceptions . InvalidConfiguration ( "SANIC_JWT_PAYLOAD_HANDLER has been deprecated. " "Instead, you will need to subclass Authentication. " ) if "SANIC_JWT_HANDLER_PAYLOAD_EXTEND" in self . app . config : raise exceptions . InvalidConfiguration ( "SANIC_JWT_HANDLER_PAYLOAD_EXTEND has been deprecated. " "Instead, you will need to subclass Authentication. " "Check out the documentation for more information." )
Checks for deprecated configuration keys
11,310
def __add_endpoints ( self ) : for mapping in endpoint_mappings : if all ( map ( self . config . get , mapping . keys ) ) : self . __add_single_endpoint ( mapping . cls , mapping . endpoint , mapping . is_protected ) self . bp . exception ( exceptions . SanicJWTException ) ( self . responses . exception_response ) if not self . instance_is_blueprint : url_prefix = self . _get_url_prefix ( ) self . instance . blueprint ( self . bp , url_prefix = url_prefix )
Initialize the Sanic JWT Blueprint and add to the instance initialized
11,311
def __add_class_views ( self ) : config = self . config if "class_views" in self . kwargs : class_views = self . kwargs . pop ( "class_views" ) for route , view in class_views : if issubclass ( view , endpoints . BaseEndpoint ) and isinstance ( route , str ) : self . bp . add_route ( view . as_view ( self . responses , config = self . config , instance = self . instance , ) , route , strict_slashes = config . strict_slashes ( ) , ) else : raise exceptions . InvalidClassViewsFormat ( )
Include any custom class views on the Sanic JWT Blueprint
11,312
async def _get_user_id ( self , user , * , asdict = False ) : uid = self . config . user_id ( ) if isinstance ( user , dict ) : user_id = user . get ( uid ) elif hasattr ( user , "to_dict" ) : _to_dict = await utils . call ( user . to_dict ) user_id = _to_dict . get ( uid ) else : raise exceptions . InvalidRetrieveUserObject ( ) if asdict : return { uid : user_id } return user_id
Get a user_id from a user object . If asdict is True will return it as a dict with config . user_id as key . The asdict keyword defaults to False .
11,313
def _check_authentication ( self , request , request_args , request_kwargs ) : try : is_valid , status , reasons = self . _verify ( request , request_args = request_args , request_kwargs = request_kwargs , ) except Exception as e : logger . debug ( e . args ) if self . config . debug ( ) : raise e args = e . args if isinstance ( e , SanicJWTException ) else [ ] raise exceptions . Unauthorized ( * args ) return is_valid , status , reasons
Checks a request object to determine if that request contains a valid and authenticated JWT .
11,314
def _decode ( self , token , verify = True ) : secret = self . _get_secret ( ) algorithm = self . _get_algorithm ( ) kwargs = { } for claim in self . claims : if claim != "exp" : setting = "claim_{}" . format ( claim . lower ( ) ) if setting in self . config : value = self . config . get ( setting ) kwargs . update ( { claim_label [ claim ] : value } ) kwargs [ "leeway" ] = int ( self . config . leeway ( ) ) if "claim_aud" in self . config : kwargs [ "audience" ] = self . config . claim_aud ( ) if "claim_iss" in self . config : kwargs [ "issuer" ] = self . config . claim_iss ( ) decoded = jwt . decode ( token , secret , algorithms = [ algorithm ] , verify = verify , options = { "verify_exp" : self . config . verify_exp ( ) } , ** kwargs ) return decoded
Take a JWT and return a decoded payload . Optionally will verify the claims on the token .
11,315
async def _get_payload ( self , user ) : payload = await utils . call ( self . build_payload , user ) if ( not isinstance ( payload , dict ) or self . config . user_id ( ) not in payload ) : raise exceptions . InvalidPayload payload = await utils . call ( self . add_claims , payload , user ) extend_payload_args = inspect . getfullargspec ( self . extend_payload ) args = [ payload ] if "user" in extend_payload_args . args : args . append ( user ) payload = await utils . call ( self . extend_payload , * args ) if self . config . scopes_enabled ( ) : scopes = await utils . call ( self . add_scopes_to_payload , user ) if not isinstance ( scopes , ( tuple , list ) ) : scopes = [ scopes ] payload [ self . config . scopes_name ( ) ] = scopes claims = self . claims + [ x . get_key ( ) for x in self . _custom_claims ] missing = [ x for x in claims if x not in payload ] if missing : logger . debug ( "" ) raise exceptions . MissingRegisteredClaim ( missing = missing ) return payload
Given a user object create a payload and extend it as configured .
11,316
def _get_token_from_cookies ( self , request , refresh_token ) : if refresh_token : cookie_token_name_key = "cookie_refresh_token_name" else : cookie_token_name_key = "cookie_access_token_name" cookie_token_name = getattr ( self . config , cookie_token_name_key ) return request . cookies . get ( cookie_token_name ( ) , None )
Extract the token if present inside the request cookies .
11,317
def _get_token_from_headers ( self , request , refresh_token ) : header = request . headers . get ( self . config . authorization_header ( ) , None ) if header is None : return None else : header_prefix_key = "authorization_header_prefix" header_prefix = getattr ( self . config , header_prefix_key ) if header_prefix ( ) : try : prefix , token = header . split ( " " ) if prefix != header_prefix ( ) : raise Exception except Exception : raise exceptions . InvalidAuthorizationHeader ( ) else : token = header if refresh_token : token = request . json . get ( self . config . refresh_token_name ( ) ) return token
Extract the token if present inside the headers of a request .
11,318
def _get_token_from_query_string ( self , request , refresh_token ) : if refresh_token : query_string_token_name_key = "query_string_refresh_token_name" else : query_string_token_name_key = "query_string_access_token_name" query_string_token_name = getattr ( self . config , query_string_token_name_key ) return request . args . get ( query_string_token_name ( ) , None )
Extract the token if present from the request args .
11,319
def _get_token ( self , request , refresh_token = False ) : if self . config . cookie_set ( ) : token = self . _get_token_from_cookies ( request , refresh_token ) if token : return token else : if self . config . cookie_strict ( ) : raise exceptions . MissingAuthorizationCookie ( ) if self . config . query_string_set ( ) : token = self . _get_token_from_query_string ( request , refresh_token ) if token : return token else : if self . config . query_string_strict ( ) : raise exceptions . MissingAuthorizationQueryArg ( ) token = self . _get_token_from_headers ( request , refresh_token ) if token : return token raise exceptions . MissingAuthorizationHeader ( )
Extract a token from a request object .
11,320
def _verify ( self , request , return_payload = False , verify = True , raise_missing = False , request_args = None , request_kwargs = None , * args , ** kwargs ) : try : token = self . _get_token ( request ) is_valid = True reason = None except ( exceptions . MissingAuthorizationCookie , exceptions . MissingAuthorizationQueryArg , exceptions . MissingAuthorizationHeader , ) as e : token = None is_valid = False reason = list ( e . args ) status = e . status_code if self . config . debug ( ) else 401 if raise_missing : if not self . config . debug ( ) : e . status_code = 401 raise e if token : try : payload = self . _decode ( token , verify = verify ) if verify : if self . _extra_verifications : self . _verify_extras ( payload ) if self . _custom_claims : self . _verify_custom_claims ( payload ) except ( jwt . exceptions . ExpiredSignatureError , jwt . exceptions . InvalidIssuerError , jwt . exceptions . ImmatureSignatureError , jwt . exceptions . InvalidIssuedAtError , jwt . exceptions . InvalidAudienceError , InvalidVerificationError , InvalidCustomClaimError , ) as e : reason = [ x if x . endswith ( "." ) else "{}." . format ( x ) for x in list ( e . args ) ] payload = None status = 401 is_valid = False except jwt . exceptions . DecodeError as e : self . _reasons = e . args reason = ( [ x if x . endswith ( "." ) else "{}." . format ( x ) for x in list ( e . args ) ] if self . config . debug ( ) else "Auth required." ) logger . debug ( e . args ) is_valid = False payload = None status = 400 if self . config . debug ( ) else 401 else : payload = None if return_payload : return payload status = 200 if is_valid else status return is_valid , status , reason
Verify that a request object is authenticated .
11,321
def extract_payload ( self , request , verify = True , * args , ** kwargs ) : payload = self . _verify ( request , return_payload = True , verify = verify , * args , ** kwargs ) return payload
Extract a payload from a request object .
11,322
def extract_scopes ( self , request ) : payload = self . extract_payload ( request ) if not payload : return None scopes_attribute = self . config . scopes_name ( ) return payload . get ( scopes_attribute , None )
Extract scopes from a request object .
11,323
def extract_user_id ( self , request ) : payload = self . extract_payload ( request ) user_id_attribute = self . config . user_id ( ) return payload . get ( user_id_attribute , None )
Extract a user id from a request object .
11,324
async def generate_access_token ( self , user ) : payload = await self . _get_payload ( user ) secret = self . _get_secret ( True ) algorithm = self . _get_algorithm ( ) return jwt . encode ( payload , secret , algorithm = algorithm ) . decode ( "utf-8" )
Generate an access token for a given user .
11,325
async def generate_refresh_token ( self , request , user ) : refresh_token = await utils . call ( self . config . generate_refresh_token ( ) ) user_id = await self . _get_user_id ( user ) await utils . call ( self . store_refresh_token , user_id = user_id , refresh_token = refresh_token , request = request , ) return refresh_token
Generate a refresh token for a given user .
11,326
def tsplit ( df , shape ) : if isinstance ( df , ( pd . DataFrame , pd . Series ) ) : return df . iloc [ 0 : shape ] , df . iloc [ shape : ] else : return df [ 0 : shape ] , df [ shape : ]
Split array into two parts .
11,327
def concat ( x , y , axis = 0 ) : if all ( [ isinstance ( df , ( pd . DataFrame , pd . Series ) ) for df in [ x , y ] ] ) : return pd . concat ( [ x , y ] , axis = axis ) else : if axis == 0 : return np . concatenate ( [ x , y ] ) else : return np . column_stack ( [ x , y ] )
Concatenate a sequence of pandas or numpy objects into one entity .
11,328
def reshape_1d ( df ) : shape = df . shape if len ( shape ) == 1 : return df . reshape ( shape [ 0 ] , 1 ) else : return df
If parameter is 1D row vector then convert it into 2D matrix .
11,329
def idx ( df , index ) : if isinstance ( df , ( pd . DataFrame , pd . Series ) ) : return df . iloc [ index ] else : return df [ index , : ]
Universal indexing for numpy and pandas objects .
11,330
def xgb_progressbar ( rounds = 1000 ) : pbar = tqdm ( total = rounds ) def callback ( _ , ) : pbar . update ( 1 ) return callback
Progressbar for xgboost using tqdm library .
11,331
def add ( self , model ) : if isinstance ( model , ( Regressor , Classifier ) ) : self . models . append ( model ) else : raise ValueError ( 'Unrecognized estimator.' )
Adds a single model .
11,332
def stack ( self , k = 5 , stratify = False , shuffle = True , seed = 100 , full_test = True , add_diff = False ) : result_train = [ ] result_test = [ ] y = None for model in self . models : result = model . stack ( k = k , stratify = stratify , shuffle = shuffle , seed = seed , full_test = full_test ) train_df = pd . DataFrame ( result . X_train , columns = generate_columns ( result . X_train , model . name ) ) test_df = pd . DataFrame ( result . X_test , columns = generate_columns ( result . X_test , model . name ) ) result_train . append ( train_df ) result_test . append ( test_df ) if y is None : y = result . y_train result_train = pd . concat ( result_train , axis = 1 ) result_test = pd . concat ( result_test , axis = 1 ) if add_diff : result_train = feature_combiner ( result_train ) result_test = feature_combiner ( result_test ) ds = Dataset ( X_train = result_train , y_train = y , X_test = result_test ) return ds
Stacks sequence of models .
11,333
def blend ( self , proportion = 0.2 , stratify = False , seed = 100 , indices = None , add_diff = False ) : result_train = [ ] result_test = [ ] y = None for model in self . models : result = model . blend ( proportion = proportion , stratify = stratify , seed = seed , indices = indices ) train_df = pd . DataFrame ( result . X_train , columns = generate_columns ( result . X_train , model . name ) ) test_df = pd . DataFrame ( result . X_test , columns = generate_columns ( result . X_test , model . name ) ) result_train . append ( train_df ) result_test . append ( test_df ) if y is None : y = result . y_train result_train = pd . concat ( result_train , axis = 1 , ignore_index = True ) result_test = pd . concat ( result_test , axis = 1 , ignore_index = True ) if add_diff : result_train = feature_combiner ( result_train ) result_test = feature_combiner ( result_test ) return Dataset ( X_train = result_train , y_train = y , X_test = result_test )
Blends sequence of models .
11,334
def find_weights ( self , scorer , test_size = 0.2 , method = 'SLSQP' ) : p = Optimizer ( self . models , test_size = test_size , scorer = scorer ) return p . minimize ( method )
Finds optimal weights for weighted average of models .
11,335
def weight ( self , weights ) : return self . apply ( lambda x : np . average ( x , axis = 0 , weights = weights ) )
Applies weighted mean to models .
11,336
def onehot_features ( train , test , features , full = False , sparse = False , dummy_na = True ) : features = [ f for f in features if f in train . columns ] for column in features : if full : categories = pd . concat ( [ train [ column ] , test [ column ] ] ) . dropna ( ) . unique ( ) else : categories = train [ column ] . dropna ( ) . unique ( ) train [ column ] = train [ column ] . astype ( 'category' , categories = categories ) test [ column ] = test [ column ] . astype ( 'category' , categories = categories ) train = pd . get_dummies ( train , columns = features , dummy_na = dummy_na , sparse = sparse ) test = pd . get_dummies ( test , columns = features , dummy_na = dummy_na , sparse = sparse ) return train , test
Encode categorical features using a one - hot scheme .
11,337
def factorize ( train , test , features , na_value = - 9999 , full = False , sort = True ) : for column in features : if full : vs = pd . concat ( [ train [ column ] , test [ column ] ] ) labels , indexer = pd . factorize ( vs , sort = sort ) else : labels , indexer = pd . factorize ( train [ column ] , sort = sort ) train [ column ] = indexer . get_indexer ( train [ column ] ) test [ column ] = indexer . get_indexer ( test [ column ] ) if na_value != - 1 : train [ column ] = train [ column ] . replace ( - 1 , na_value ) test [ column ] = test [ column ] . replace ( - 1 , na_value ) return train , test
Factorize categorical features .
11,338
def woe ( df , feature_name , target_name ) : def group_woe ( group ) : event = float ( group . sum ( ) ) non_event = group . shape [ 0 ] - event rel_event = event / event_total rel_non_event = non_event / non_event_total return np . log ( rel_non_event / rel_event ) * 100 if df [ target_name ] . nunique ( ) > 2 : raise ValueError ( 'Target column should be binary (1/0).' ) event_total = float ( df [ df [ target_name ] == 1.0 ] . shape [ 0 ] ) non_event_total = float ( df . shape [ 0 ] - event_total ) woe_vals = df . groupby ( feature_name ) [ target_name ] . transform ( group_woe ) return woe_vals
Calculate weight of evidence .
11,339
def kfold ( self , k = 5 , stratify = False , shuffle = True , seed = 33 ) : if stratify : kf = StratifiedKFold ( n_splits = k , random_state = seed , shuffle = shuffle ) else : kf = KFold ( n_splits = k , random_state = seed , shuffle = shuffle ) for train_index , test_index in kf . split ( self . X_train , self . y_train ) : X_train , y_train = idx ( self . X_train , train_index ) , self . y_train [ train_index ] X_test , y_test = idx ( self . X_train , test_index ) , self . y_train [ test_index ] yield X_train , y_train , X_test , y_test , train_index , test_index
K - Folds cross validation iterator .
11,340
def hash ( self ) : if self . _hash is None : m = hashlib . new ( 'md5' ) if self . _preprocessor is None : m . update ( numpy_buffer ( self . _X_train ) ) m . update ( numpy_buffer ( self . _y_train ) ) if self . _X_test is not None : m . update ( numpy_buffer ( self . _X_test ) ) if self . _y_test is not None : m . update ( numpy_buffer ( self . _y_test ) ) elif callable ( self . _preprocessor ) : m . update ( inspect . getsource ( self . _preprocessor ) . encode ( 'utf-8' ) ) self . _hash = m . hexdigest ( ) return self . _hash
Return md5 hash for current dataset .
11,341
def merge ( self , ds , inplace = False , axis = 1 ) : if not isinstance ( ds , Dataset ) : raise ValueError ( 'Expected `Dataset`, got %s.' % ds ) X_train = concat ( ds . X_train , self . X_train , axis = axis ) y_train = concat ( ds . y_train , self . y_train , axis = axis ) if ds . X_test is not None : X_test = concat ( ds . X_test , self . X_test , axis = axis ) else : X_test = None if ds . y_test is not None : y_test = concat ( ds . y_test , self . y_test , axis = axis ) else : y_test = None if inplace : self . _X_train = X_train self . _y_train = y_train if X_test is not None : self . _X_test = X_test if y_test is not None : self . _y_test = y_test return None return Dataset ( X_train , y_train , X_test , y_test )
Merge two datasets .
11,342
def to_csc ( self ) : self . _X_train = csc_matrix ( self . _X_train ) self . _X_test = csc_matrix ( self . _X_test )
Convert Dataset to scipy s Compressed Sparse Column matrix .
11,343
def to_csr ( self ) : self . _X_train = csr_matrix ( self . _X_train ) self . _X_test = csr_matrix ( self . _X_test )
Convert Dataset to scipy s Compressed Sparse Row matrix .
11,344
def to_dense ( self ) : if hasattr ( self . _X_train , 'todense' ) : self . _X_train = self . _X_train . todense ( ) self . _X_test = self . _X_test . todense ( )
Convert sparse Dataset to dense matrix .
11,345
def _dhash ( self , params ) : m = hashlib . new ( 'md5' ) m . update ( self . hash . encode ( 'utf-8' ) ) for key in sorted ( params . keys ( ) ) : h_string = ( '%s-%s' % ( key , params [ key ] ) ) . encode ( 'utf-8' ) m . update ( h_string ) return m . hexdigest ( )
Generate hash of the dictionary object .
11,346
def validate ( self , scorer = None , k = 1 , test_size = 0.1 , stratify = False , shuffle = True , seed = 100 , indices = None ) : if self . use_cache : pdict = { 'k' : k , 'stratify' : stratify , 'shuffle' : shuffle , 'seed' : seed , 'test_size' : test_size } if indices is not None : pdict [ 'train_index' ] = np_hash ( indices [ 0 ] ) pdict [ 'test_index' ] = np_hash ( indices [ 1 ] ) dhash = self . _dhash ( pdict ) c = Cache ( dhash , prefix = 'v' ) if c . available : logger . info ( 'Loading %s\'s validation results from cache.' % self . _name ) elif ( self . dataset . X_train is None ) and ( self . dataset . y_train is None ) : self . dataset . load ( ) scores = [ ] y_true = [ ] y_pred = [ ] if k == 1 : X_train , y_train , X_test , y_test = self . dataset . split ( test_size = test_size , stratify = stratify , seed = seed , indices = indices ) if self . use_cache and c . available : prediction = c . retrieve ( '0' ) else : prediction = self . _predict ( X_train , y_train , X_test , y_test ) if self . use_cache : c . store ( '0' , prediction ) if scorer is not None : scores . append ( scorer ( y_test , prediction ) ) y_true . append ( y_test ) y_pred . append ( prediction ) else : for i , fold in enumerate ( self . dataset . kfold ( k , stratify = stratify , seed = seed , shuffle = shuffle ) ) : X_train , y_train , X_test , y_test , train_index , test_index = fold if self . use_cache and c . available : prediction = c . retrieve ( str ( i ) ) else : prediction = None if prediction is None : logger . info ( 'Calculating %s\'s fold #%s' % ( self . _name , i + 1 ) ) prediction = self . _predict ( X_train , y_train , X_test , y_test ) if self . use_cache : c . store ( str ( i ) , prediction ) if scorer is not None : scores . append ( scorer ( y_test , prediction ) ) y_true . append ( y_test ) y_pred . append ( prediction ) if scorer is not None : report_score ( scores , scorer ) return y_true , y_pred
Evaluate score by cross - validation .
11,347
def stack ( self , k = 5 , stratify = False , shuffle = True , seed = 100 , full_test = True ) : train = None test = [ ] if self . use_cache : pdict = { 'k' : k , 'stratify' : stratify , 'shuffle' : shuffle , 'seed' : seed , 'full_test' : full_test } dhash = self . _dhash ( pdict ) c = Cache ( dhash , prefix = 's' ) if c . available : logger . info ( 'Loading %s\'s stack results from cache.' % self . _name ) train = c . retrieve ( 'train' ) test = c . retrieve ( 'test' ) y_train = c . retrieve ( 'y_train' ) return Dataset ( X_train = train , y_train = y_train , X_test = test ) elif not self . dataset . loaded : self . dataset . load ( ) for i , fold in enumerate ( self . dataset . kfold ( k , stratify = stratify , seed = seed , shuffle = shuffle ) ) : X_train , y_train , X_test , y_test , train_index , test_index = fold logger . info ( 'Calculating %s\'s fold #%s' % ( self . _name , i + 1 ) ) if full_test : prediction = reshape_1d ( self . _predict ( X_train , y_train , X_test , y_test ) ) else : xt_shape = X_test . shape [ 0 ] x_t = concat ( X_test , self . dataset . X_test ) prediction_concat = reshape_1d ( self . _predict ( X_train , y_train , x_t ) ) prediction , prediction_test = tsplit ( prediction_concat , xt_shape ) test . append ( prediction_test ) if train is None : train = np . zeros ( ( self . dataset . X_train . shape [ 0 ] , prediction . shape [ 1 ] ) ) train [ test_index ] = prediction if full_test : logger . info ( 'Calculating %s\'s test data' % self . _name ) test = self . _predict ( self . dataset . X_train , self . dataset . y_train , self . dataset . X_test ) else : test = np . mean ( test , axis = 0 ) test = reshape_1d ( test ) if self . use_cache : c . store ( 'train' , train ) c . store ( 'test' , test ) c . store ( 'y_train' , self . dataset . y_train ) return Dataset ( X_train = train , y_train = self . dataset . y_train , X_test = test )
Stack a single model . You should rarely be using this method . Use ModelsPipeline . stack instead .
11,348
def blend ( self , proportion = 0.2 , stratify = False , seed = 100 , indices = None ) : if self . use_cache : pdict = { 'proportion' : proportion , 'stratify' : stratify , 'seed' : seed , 'indices' : indices } if indices is not None : pdict [ 'train_index' ] = np_hash ( indices [ 0 ] ) pdict [ 'test_index' ] = np_hash ( indices [ 1 ] ) dhash = self . _dhash ( pdict ) c = Cache ( dhash , prefix = 'b' ) if c . available : logger . info ( 'Loading %s\'s blend results from cache.' % self . _name ) train = c . retrieve ( 'train' ) test = c . retrieve ( 'test' ) y_train = c . retrieve ( 'y_train' ) return Dataset ( X_train = train , y_train = y_train , X_test = test ) elif not self . dataset . loaded : self . dataset . load ( ) X_train , y_train , X_test , y_test = self . dataset . split ( test_size = proportion , stratify = stratify , seed = seed , indices = indices ) xt_shape = X_test . shape [ 0 ] x_t = concat ( X_test , self . dataset . X_test ) prediction_concat = reshape_1d ( self . _predict ( X_train , y_train , x_t ) ) new_train , new_test = tsplit ( prediction_concat , xt_shape ) if self . use_cache : c . store ( 'train' , new_train ) c . store ( 'test' , new_test ) c . store ( 'y_train' , y_test ) return Dataset ( new_train , y_test , new_test )
Blend a single model . You should rarely be using this method . Use ModelsPipeline . blend instead .
11,349
def numpy_buffer ( ndarray ) : if isinstance ( ndarray , ( pd . Series , pd . DataFrame ) ) : ndarray = ndarray . values if ndarray . flags . c_contiguous : obj_c_contiguous = ndarray elif ndarray . flags . f_contiguous : obj_c_contiguous = ndarray . T else : obj_c_contiguous = ndarray . flatten ( ) obj_c_contiguous = obj_c_contiguous . view ( np . uint8 ) if hasattr ( np , 'getbuffer' ) : return np . getbuffer ( obj_c_contiguous ) else : return memoryview ( obj_c_contiguous )
Creates a buffer from c_contiguous numpy ndarray .
11,350
def store ( self , key , data ) : if not os . path . exists ( self . _hash_dir ) : os . makedirs ( self . _hash_dir ) if isinstance ( data , pd . DataFrame ) : columns = data . columns . tolist ( ) np . save ( os . path . join ( self . _hash_dir , key ) , data . values ) json . dump ( columns , open ( os . path . join ( self . _hash_dir , '%s.json' % key ) , 'w' ) ) else : np . save ( os . path . join ( self . _hash_dir , key ) , data )
Takes an array and stores it in the cache .
11,351
def retrieve ( self , key ) : column_file = os . path . join ( self . _hash_dir , '%s.json' % key ) cache_file = os . path . join ( self . _hash_dir , '%s.npy' % key ) if os . path . exists ( cache_file ) : data = np . load ( cache_file ) if os . path . exists ( column_file ) : with open ( column_file , 'r' ) as json_file : columns = json . load ( json_file ) data = pd . DataFrame ( data , columns = columns ) else : return None return data
Retrieves a cached array if possible .
11,352
def from_coords ( cls , x , y ) : x_bytes = int ( math . ceil ( math . log ( x , 2 ) / 8.0 ) ) y_bytes = int ( math . ceil ( math . log ( y , 2 ) / 8.0 ) ) num_bytes = max ( x_bytes , y_bytes ) byte_string = b'\x04' byte_string += int_to_bytes ( x , width = num_bytes ) byte_string += int_to_bytes ( y , width = num_bytes ) return cls ( byte_string )
Creates an ECPoint object from the X and Y integer coordinates of the point
11,353
def to_coords ( self ) : data = self . native first_byte = data [ 0 : 1 ] if first_byte == b'\x04' : remaining = data [ 1 : ] field_len = len ( remaining ) // 2 x = int_from_bytes ( remaining [ 0 : field_len ] ) y = int_from_bytes ( remaining [ field_len : ] ) return ( x , y ) if first_byte not in set ( [ b'\x02' , b'\x03' ] ) : raise ValueError ( unwrap ( ) ) raise ValueError ( unwrap ( ) )
Returns the X and Y coordinates for this EC point as native Python integers
11,354
def unwrap ( self ) : if self . algorithm == 'rsa' : return self [ 'private_key' ] . parsed if self . algorithm == 'dsa' : params = self [ 'private_key_algorithm' ] [ 'parameters' ] return DSAPrivateKey ( { 'version' : 0 , 'p' : params [ 'p' ] , 'q' : params [ 'q' ] , 'g' : params [ 'g' ] , 'public_key' : self . public_key , 'private_key' : self [ 'private_key' ] . parsed , } ) if self . algorithm == 'ec' : output = self [ 'private_key' ] . parsed output [ 'parameters' ] = self [ 'private_key_algorithm' ] [ 'parameters' ] output [ 'public_key' ] = self . public_key return output
Unwraps the private key into an RSAPrivateKey DSAPrivateKey or ECPrivateKey object
11,355
def fingerprint ( self ) : if self . _fingerprint is None : params = self [ 'private_key_algorithm' ] [ 'parameters' ] key = self [ 'private_key' ] . parsed if self . algorithm == 'rsa' : to_hash = '%d:%d' % ( key [ 'modulus' ] . native , key [ 'public_exponent' ] . native , ) elif self . algorithm == 'dsa' : public_key = self . public_key to_hash = '%d:%d:%d:%d' % ( params [ 'p' ] . native , params [ 'q' ] . native , params [ 'g' ] . native , public_key . native , ) elif self . algorithm == 'ec' : public_key = key [ 'public_key' ] . native if public_key is None : public_key = self . public_key . native if params . name == 'named' : to_hash = '%s:' % params . chosen . native to_hash = to_hash . encode ( 'utf-8' ) to_hash += public_key elif params . name == 'implicit_ca' : to_hash = public_key elif params . name == 'specified' : to_hash = '%s:' % params . chosen [ 'field_id' ] [ 'parameters' ] . native to_hash = to_hash . encode ( 'utf-8' ) to_hash += b':' + params . chosen [ 'curve' ] [ 'a' ] . native to_hash += b':' + params . chosen [ 'curve' ] [ 'b' ] . native to_hash += public_key if isinstance ( to_hash , str_cls ) : to_hash = to_hash . encode ( 'utf-8' ) self . _fingerprint = hashlib . sha256 ( to_hash ) . digest ( ) return self . _fingerprint
Creates a fingerprint that can be compared with a public key to see if the two form a pair .
11,356
def run ( ci = False ) : xml_report_path = os . path . join ( package_root , 'coverage.xml' ) if os . path . exists ( xml_report_path ) : os . unlink ( xml_report_path ) cov = coverage . Coverage ( include = '%s/*.py' % package_name ) cov . start ( ) from . tests import run as run_tests result = run_tests ( ) print ( ) if ci : suite = unittest . TestSuite ( ) loader = unittest . TestLoader ( ) for other_package in other_packages : for test_class in _load_package_tests ( other_package ) : suite . addTest ( loader . loadTestsFromTestCase ( test_class ) ) if suite . countTestCases ( ) > 0 : print ( 'Running tests from other modularcrypto packages' ) sys . stdout . flush ( ) runner_result = unittest . TextTestRunner ( stream = sys . stdout , verbosity = 1 ) . run ( suite ) result = runner_result . wasSuccessful ( ) and result print ( ) sys . stdout . flush ( ) cov . stop ( ) cov . save ( ) cov . report ( show_missing = False ) print ( ) sys . stdout . flush ( ) if ci : cov . xml_report ( ) if ci and result and os . path . exists ( xml_report_path ) : _codecov_submit ( ) print ( ) return result
Runs the tests while measuring coverage
11,357
def _git_command ( params , cwd ) : proc = subprocess . Popen ( [ 'git' ] + params , stdout = subprocess . PIPE , stderr = subprocess . STDOUT , cwd = cwd ) stdout , stderr = proc . communicate ( ) code = proc . wait ( ) if code != 0 : e = OSError ( 'git exit code was non-zero' ) e . stdout = stdout raise e return stdout . decode ( 'utf-8' ) . strip ( )
Executes a git command returning the output
11,358
def _parse_env_var_file ( data ) : output = { } for line in data . splitlines ( ) : line = line . strip ( ) if not line or '=' not in line : continue parts = line . split ( '=' ) if len ( parts ) != 2 : continue name = parts [ 0 ] value = parts [ 1 ] if len ( value ) > 1 : if value [ 0 ] == '"' and value [ - 1 ] == '"' : value = value [ 1 : - 1 ] output [ name ] = value return output
Parses a basic VAR = value data file contents into a dict
11,359
def _platform_name ( ) : if sys . platform == 'darwin' : version = _plat . mac_ver ( ) [ 0 ] _plat_ver_info = tuple ( map ( int , version . split ( '.' ) ) ) if _plat_ver_info < ( 10 , 12 ) : name = 'OS X' else : name = 'macOS' return '%s %s' % ( name , version ) elif sys . platform == 'win32' : _win_ver = sys . getwindowsversion ( ) _plat_ver_info = ( _win_ver [ 0 ] , _win_ver [ 1 ] ) return 'Windows %s' % _plat . win32_ver ( ) [ 0 ] elif sys . platform in [ 'linux' , 'linux2' ] : if os . path . exists ( '/etc/os-release' ) : with open ( '/etc/os-release' , 'r' , encoding = 'utf-8' ) as f : pairs = _parse_env_var_file ( f . read ( ) ) if 'NAME' in pairs and 'VERSION_ID' in pairs : return '%s %s' % ( pairs [ 'NAME' ] , pairs [ 'VERSION_ID' ] ) version = pairs [ 'VERSION_ID' ] elif 'PRETTY_NAME' in pairs : return pairs [ 'PRETTY_NAME' ] elif 'NAME' in pairs : return pairs [ 'NAME' ] else : raise ValueError ( 'No suitable version info found in /etc/os-release' ) elif os . path . exists ( '/etc/lsb-release' ) : with open ( '/etc/lsb-release' , 'r' , encoding = 'utf-8' ) as f : pairs = _parse_env_var_file ( f . read ( ) ) if 'DISTRIB_DESCRIPTION' in pairs : return pairs [ 'DISTRIB_DESCRIPTION' ] else : raise ValueError ( 'No suitable version info found in /etc/lsb-release' ) else : return 'Linux' else : return '%s %s' % ( _plat . system ( ) , _plat . release ( ) )
Returns information about the current operating system and version
11,360
def _list_files ( root ) : dir_patterns , file_patterns = _gitignore ( root ) paths = [ ] prefix = os . path . abspath ( root ) + os . sep for base , dirs , files in os . walk ( root ) : for d in dirs : for dir_pattern in dir_patterns : if fnmatch ( d , dir_pattern ) : dirs . remove ( d ) break for f in files : skip = False for file_pattern in file_patterns : if fnmatch ( f , file_pattern ) : skip = True break if skip : continue full_path = os . path . join ( base , f ) if full_path [ : len ( prefix ) ] == prefix : full_path = full_path [ len ( prefix ) : ] paths . append ( full_path ) return sorted ( paths )
Lists all of the files in a directory taking into account any . gitignore file that is present
11,361
def _execute ( params , cwd ) : proc = subprocess . Popen ( params , stdout = subprocess . PIPE , stderr = subprocess . PIPE , cwd = cwd ) stdout , stderr = proc . communicate ( ) code = proc . wait ( ) if code != 0 : e = OSError ( 'subprocess exit code for %r was %d: %s' % ( params , code , stderr ) ) e . stdout = stdout e . stderr = stderr raise e return ( stdout , stderr )
Executes a subprocess
11,362
def run ( ) : deps_dir = os . path . join ( build_root , 'modularcrypto-deps' ) if os . path . exists ( deps_dir ) : shutil . rmtree ( deps_dir , ignore_errors = True ) os . mkdir ( deps_dir ) try : print ( "Staging ci dependencies" ) _stage_requirements ( deps_dir , os . path . join ( package_root , 'requires' , 'ci' ) ) print ( "Checking out modularcrypto packages for coverage" ) for other_package in other_packages : pkg_url = 'https://github.com/wbond/%s.git' % other_package pkg_dir = os . path . join ( build_root , other_package ) if os . path . exists ( pkg_dir ) : print ( "%s is already present" % other_package ) continue print ( "Cloning %s" % pkg_url ) _execute ( [ 'git' , 'clone' , pkg_url ] , build_root ) print ( ) except ( Exception ) : if os . path . exists ( deps_dir ) : shutil . rmtree ( deps_dir , ignore_errors = True ) raise return True
Installs required development dependencies . Uses git to checkout other modularcrypto repos for more accurate coverage data .
11,363
def _download ( url , dest ) : print ( 'Downloading %s' % url ) filename = os . path . basename ( url ) dest_path = os . path . join ( dest , filename ) if sys . platform == 'win32' : powershell_exe = os . path . join ( 'system32\\WindowsPowerShell\\v1.0\\powershell.exe' ) code = "[System.Net.ServicePointManager]::SecurityProtocol = [System.Net.SecurityProtocolType]::Tls12;" code += "(New-Object Net.WebClient).DownloadFile('%s', '%s');" % ( url , dest_path ) _execute ( [ powershell_exe , '-Command' , code ] , dest ) else : _execute ( [ 'curl' , '-L' , '--silent' , '--show-error' , '-O' , url ] , dest ) return dest_path
Downloads a URL to a directory
11,364
def _archive_single_dir ( archive ) : common_root = None for info in _list_archive_members ( archive ) : fn = _info_name ( info ) if fn in set ( [ '.' , '/' ] ) : continue sep = None if '/' in fn : sep = '/' elif '\\' in fn : sep = '\\' if sep is None : root_dir = fn else : root_dir , _ = fn . split ( sep , 1 ) if common_root is None : common_root = root_dir else : if common_root != root_dir : return None return common_root
Check if all members of the archive are in a single top - level directory
11,365
def _info_name ( info ) : if isinstance ( info , zipfile . ZipInfo ) : return info . filename . replace ( '\\' , '/' ) return info . name . replace ( '\\' , '/' )
Returns a normalized file path for an archive info object
11,366
def _extract_info ( archive , info ) : if isinstance ( archive , zipfile . ZipFile ) : fn = info . filename is_dir = fn . endswith ( '/' ) or fn . endswith ( '\\' ) out = archive . read ( info ) if is_dir and out == b'' : return None return out info_file = archive . extractfile ( info ) if info_file : return info_file . read ( ) return None
Extracts the contents of an archive info object
11,367
def _extract_package ( deps_dir , pkg_path ) : if pkg_path . endswith ( '.exe' ) : try : zf = None zf = zipfile . ZipFile ( pkg_path , 'r' ) for zi in zf . infolist ( ) : if not zi . filename . startswith ( 'PLATLIB' ) : continue data = _extract_info ( zf , zi ) if data is not None : dst_path = os . path . join ( deps_dir , zi . filename [ 8 : ] ) dst_dir = os . path . dirname ( dst_path ) if not os . path . exists ( dst_dir ) : os . makedirs ( dst_dir ) with open ( dst_path , 'wb' ) as f : f . write ( data ) finally : if zf : zf . close ( ) return if pkg_path . endswith ( '.whl' ) : try : zf = None zf = zipfile . ZipFile ( pkg_path , 'r' ) zf . extractall ( deps_dir ) finally : if zf : zf . close ( ) return try : ar = None ar = _open_archive ( pkg_path ) pkg_name = None base_path = _archive_single_dir ( ar ) or '' if len ( base_path ) : if '-' in base_path : pkg_name , _ = base_path . split ( '-' , 1 ) base_path += '/' base_pkg_path = None if pkg_name is not None : base_pkg_path = base_path + pkg_name + '/' src_path = base_path + 'src/' members = [ ] for info in _list_archive_members ( ar ) : fn = _info_name ( info ) if base_pkg_path is not None and fn . startswith ( base_pkg_path ) : dst_path = fn [ len ( base_pkg_path ) - len ( pkg_name ) - 1 : ] members . append ( ( info , dst_path ) ) continue if fn . startswith ( src_path ) : members . append ( ( info , fn [ len ( src_path ) : ] ) ) continue for info , path in members : info_data = _extract_info ( ar , info ) if info_data is not None : dst_path = os . path . join ( deps_dir , path ) dst_dir = os . path . dirname ( dst_path ) if not os . path . exists ( dst_dir ) : os . makedirs ( dst_dir ) with open ( dst_path , 'wb' ) as f : f . write ( info_data ) finally : if ar : ar . close ( )
Extract a . whl . zip . tar . gz or . tar . bz2 into a package path to use when running CI tasks
11,368
def _parse_requires ( path ) : python_version = '.' . join ( map ( str_cls , sys . version_info [ 0 : 2 ] ) ) sys_platform = sys . platform packages = [ ] with open ( path , 'rb' ) as f : contents = f . read ( ) . decode ( 'utf-8' ) for line in re . split ( r'\r?\n' , contents ) : line = line . strip ( ) if not len ( line ) : continue if re . match ( r'^\s*#' , line ) : continue if ';' in line : package , cond = line . split ( ';' , 1 ) package = package . strip ( ) cond = cond . strip ( ) cond = cond . replace ( 'sys_platform' , repr ( sys_platform ) ) cond = cond . replace ( 'python_version' , repr ( python_version ) ) if not eval ( cond ) : continue else : package = line . strip ( ) if re . match ( r'^\s*-r\s*' , package ) : sub_req_file = re . sub ( r'^\s*-r\s*' , '' , package ) sub_req_file = os . path . abspath ( os . path . join ( os . path . dirname ( path ) , sub_req_file ) ) packages . extend ( _parse_requires ( sub_req_file ) ) continue if re . match ( r'https?://' , package ) : packages . append ( { 'type' : 'url' , 'pkg' : package } ) continue if '>=' in package : parts = package . split ( '>=' ) package = parts [ 0 ] . strip ( ) ver = parts [ 1 ] . strip ( ) packages . append ( { 'type' : '>=' , 'pkg' : package , 'ver' : ver } ) continue if '==' in package : parts = package . split ( '==' ) package = parts [ 0 ] . strip ( ) ver = parts [ 1 ] . strip ( ) packages . append ( { 'type' : '==' , 'pkg' : package , 'ver' : ver } ) continue if re . search ( r'[^ a-zA-Z0-9\-]' , package ) : raise Exception ( 'Unsupported requirements format version constraint: %s' % package ) packages . append ( { 'type' : 'any' , 'pkg' : package } ) return packages
Does basic parsing of pip requirements files to allow for using something other than Python to do actual TLS requests
11,369
def unarmor ( pem_bytes , multiple = False ) : generator = _unarmor ( pem_bytes ) if not multiple : return next ( generator ) return generator
Convert a PEM - encoded byte string into a DER - encoded byte string
11,370
def preferred_ordinal ( cls , attr_name ) : attr_name = cls . map ( attr_name ) if attr_name in cls . preferred_order : ordinal = cls . preferred_order . index ( attr_name ) else : ordinal = len ( cls . preferred_order ) return ( ordinal , attr_name )
Returns an ordering value for a particular attribute key .
11,371
def prepped_value ( self ) : if self . _prepped is None : self . _prepped = self . _ldap_string_prep ( self [ 'value' ] . native ) return self . _prepped
Returns the value after being processed by the internationalized string preparation as specified by RFC 5280
11,372
def _get_values ( self , rdn ) : output = { } [ output . update ( [ ( ntv [ 'type' ] . native , ntv . prepped_value ) ] ) for ntv in rdn ] return output
Returns a dict of prepped values contained in an RDN
11,373
def build ( cls , name_dict , use_printable = False ) : rdns = [ ] if not use_printable : encoding_name = 'utf8_string' encoding_class = UTF8String else : encoding_name = 'printable_string' encoding_class = PrintableString name_dict = OrderedDict ( sorted ( name_dict . items ( ) , key = lambda item : NameType . preferred_ordinal ( item [ 0 ] ) ) ) for attribute_name , attribute_value in name_dict . items ( ) : attribute_name = NameType . map ( attribute_name ) if attribute_name == 'email_address' : value = EmailAddress ( attribute_value ) elif attribute_name == 'domain_component' : value = DNSName ( attribute_value ) elif attribute_name in set ( [ 'dn_qualifier' , 'country_name' , 'serial_number' ] ) : value = DirectoryString ( name = 'printable_string' , value = PrintableString ( attribute_value ) ) else : value = DirectoryString ( name = encoding_name , value = encoding_class ( attribute_value ) ) rdns . append ( RelativeDistinguishedName ( [ NameTypeAndValue ( { 'type' : attribute_name , 'value' : value } ) ] ) ) return cls ( name = '' , value = RDNSequence ( rdns ) )
Creates a Name object from a dict of unicode string keys and values . The keys should be from NameType . _map or a dotted - integer OID unicode string .
11,374
def _recursive_humanize ( self , value ) : if isinstance ( value , list ) : return ', ' . join ( reversed ( [ self . _recursive_humanize ( sub_value ) for sub_value in value ] ) ) return value . native
Recursively serializes data compiled from the RDNSequence
11,375
def crl_distribution_points ( self ) : if self . _crl_distribution_points is None : self . _crl_distribution_points = self . _get_http_crl_distribution_points ( self . crl_distribution_points_value ) return self . _crl_distribution_points
Returns complete CRL URLs - does not include delta CRLs
11,376
def delta_crl_distribution_points ( self ) : if self . _delta_crl_distribution_points is None : self . _delta_crl_distribution_points = self . _get_http_crl_distribution_points ( self . freshest_crl_value ) return self . _delta_crl_distribution_points
Returns delta CRL URLs - does not include complete CRLs
11,377
def _get_http_crl_distribution_points ( self , crl_distribution_points ) : output = [ ] if crl_distribution_points is None : return [ ] for distribution_point in crl_distribution_points : distribution_point_name = distribution_point [ 'distribution_point' ] if distribution_point_name is VOID : continue if distribution_point_name . name == 'name_relative_to_crl_issuer' : continue for general_name in distribution_point_name . chosen : if general_name . name == 'uniform_resource_identifier' : output . append ( distribution_point ) return output
Fetches the DistributionPoint object for non - relative HTTP CRLs referenced by the certificate
11,378
def _is_wildcard_match ( self , domain_labels , valid_domain_labels ) : first_domain_label = domain_labels [ 0 ] other_domain_labels = domain_labels [ 1 : ] wildcard_label = valid_domain_labels [ 0 ] other_valid_domain_labels = valid_domain_labels [ 1 : ] if other_domain_labels != other_valid_domain_labels : return False if wildcard_label == '*' : return True wildcard_regex = re . compile ( '^' + wildcard_label . replace ( '*' , '.*' ) + '$' ) if wildcard_regex . match ( first_domain_label ) : return True return False
Determines if the labels in a domain are a match for labels from a wildcard valid domain name
11,379
def run ( ) : print ( 'Running flake8 %s' % flake8 . __version__ ) flake8_style = get_style_guide ( config_file = os . path . join ( package_root , 'tox.ini' ) ) paths = [ ] for _dir in [ package_name , 'dev' , 'tests' ] : for root , _ , filenames in os . walk ( _dir ) : for filename in filenames : if not filename . endswith ( '.py' ) : continue paths . append ( os . path . join ( root , filename ) ) report = flake8_style . check_files ( paths ) success = report . total_errors == 0 if success : print ( 'OK' ) return success
Runs flake8 lint
11,380
def run ( ) : print ( 'Python ' + sys . version . replace ( '\n' , '' ) ) try : oscrypto_tests_module_info = imp . find_module ( 'tests' , [ os . path . join ( build_root , 'oscrypto' ) ] ) oscrypto_tests = imp . load_module ( 'oscrypto.tests' , * oscrypto_tests_module_info ) oscrypto = oscrypto_tests . local_oscrypto ( ) print ( '\noscrypto backend: %s' % oscrypto . backend ( ) ) except ( ImportError ) : pass if run_lint : print ( '' ) lint_result = run_lint ( ) else : lint_result = True if run_coverage : print ( '\nRunning tests (via coverage.py)' ) sys . stdout . flush ( ) tests_result = run_coverage ( ci = True ) else : print ( '\nRunning tests' ) sys . stdout . flush ( ) tests_result = run_tests ( ) sys . stdout . flush ( ) return lint_result and tests_result
Runs the linter and tests
11,381
def replace ( self , year = None , month = None , day = None ) : if year is None : year = self . year if month is None : month = self . month if day is None : day = self . day if year > 0 : cls = date else : cls = extended_date return cls ( year , month , day )
Returns a new datetime . date or asn1crypto . util . extended_date object with the specified components replaced
11,382
def replace ( self , year = None , month = None , day = None , hour = None , minute = None , second = None , microsecond = None , tzinfo = None ) : if year is None : year = self . year if month is None : month = self . month if day is None : day = self . day if hour is None : hour = self . hour if minute is None : minute = self . minute if second is None : second = self . second if microsecond is None : microsecond = self . microsecond if tzinfo is None : tzinfo = self . tzinfo if year > 0 : cls = datetime else : cls = extended_datetime return cls ( year , month , day , hour , minute , second , microsecond , tzinfo )
Returns a new datetime . datetime or asn1crypto . util . extended_datetime object with the specified components replaced
11,383
def delta_crl_distribution_points ( self ) : if self . _delta_crl_distribution_points is None : self . _delta_crl_distribution_points = [ ] if self . freshest_crl_value is not None : for distribution_point in self . freshest_crl_value : distribution_point_name = distribution_point [ 'distribution_point' ] if distribution_point_name . name == 'name_relative_to_crl_issuer' : continue for general_name in distribution_point_name . chosen : if general_name . name == 'uniform_resource_identifier' : self . _delta_crl_distribution_points . append ( distribution_point ) return self . _delta_crl_distribution_points
Returns delta CRL URLs - only applies to complete CRLs
11,384
def _set_extensions ( self ) : self . _critical_extensions = set ( ) for extension in self [ 'single_extensions' ] : name = extension [ 'extn_id' ] . native attribute_name = '_%s_value' % name if hasattr ( self , attribute_name ) : setattr ( self , attribute_name , extension [ 'extn_value' ] . parsed ) if extension [ 'critical' ] . native : self . _critical_extensions . add ( name ) self . _processed_extensions = True
Sets common named extensions to private attributes and creates a list of critical extensions
11,385
def _basic_debug ( prefix , self ) : print ( '%s%s Object #%s' % ( prefix , type_name ( self ) , id ( self ) ) ) if self . _header : print ( '%s Header: 0x%s' % ( prefix , binascii . hexlify ( self . _header or b'' ) . decode ( 'utf-8' ) ) ) has_header = self . method is not None and self . class_ is not None and self . tag is not None if has_header : method_name = METHOD_NUM_TO_NAME_MAP . get ( self . method ) class_name = CLASS_NUM_TO_NAME_MAP . get ( self . class_ ) if self . explicit is not None : for class_ , tag in self . explicit : print ( '%s %s tag %s (explicitly tagged)' % ( prefix , CLASS_NUM_TO_NAME_MAP . get ( class_ ) , tag ) ) if has_header : print ( '%s %s %s %s' % ( prefix , method_name , class_name , self . tag ) ) elif self . implicit : if has_header : print ( '%s %s %s tag %s (implicitly tagged)' % ( prefix , method_name , class_name , self . tag ) ) elif has_header : print ( '%s %s %s tag %s' % ( prefix , method_name , class_name , self . tag ) ) print ( '%s Data: 0x%s' % ( prefix , binascii . hexlify ( self . contents or b'' ) . decode ( 'utf-8' ) ) )
Prints out basic information about an Asn1Value object . Extracted for reuse among different classes that customize the debug information .
11,386
def _tag_type_to_explicit_implicit ( params ) : if 'tag_type' in params : if params [ 'tag_type' ] == 'explicit' : params [ 'explicit' ] = ( params . get ( 'class' , 2 ) , params [ 'tag' ] ) elif params [ 'tag_type' ] == 'implicit' : params [ 'implicit' ] = ( params . get ( 'class' , 2 ) , params [ 'tag' ] ) del params [ 'tag_type' ] del params [ 'tag' ] if 'class' in params : del params [ 'class' ]
Converts old - style tag_type and tag params to explicit and implicit
11,387
def _build_id_tuple ( params , spec ) : if spec is None : return ( None , None ) required_class = spec . class_ required_tag = spec . tag _tag_type_to_explicit_implicit ( params ) if 'explicit' in params : if isinstance ( params [ 'explicit' ] , tuple ) : required_class , required_tag = params [ 'explicit' ] else : required_class = 2 required_tag = params [ 'explicit' ] elif 'implicit' in params : if isinstance ( params [ 'implicit' ] , tuple ) : required_class , required_tag = params [ 'implicit' ] else : required_class = 2 required_tag = params [ 'implicit' ] if required_class is not None and not isinstance ( required_class , int_types ) : required_class = CLASS_NAME_TO_NUM_MAP [ required_class ] required_class = params . get ( 'class_' , required_class ) required_tag = params . get ( 'tag' , required_tag ) return ( required_class , required_tag )
Builds a 2 - element tuple used to identify fields by grabbing the class_ and tag from an Asn1Value class and the params dict being passed to it
11,388
def _parse_build ( encoded_data , pointer = 0 , spec = None , spec_params = None , strict = False ) : encoded_len = len ( encoded_data ) info , new_pointer = _parse ( encoded_data , encoded_len , pointer ) if strict and new_pointer != pointer + encoded_len : extra_bytes = pointer + encoded_len - new_pointer raise ValueError ( 'Extra data - %d bytes of trailing data were provided' % extra_bytes ) return ( _build ( * info , spec = spec , spec_params = spec_params ) , new_pointer )
Parses a byte string generically or using a spec with optional params
11,389
def _new_instance ( self ) : new_obj = self . __class__ ( ) new_obj . class_ = self . class_ new_obj . tag = self . tag new_obj . implicit = self . implicit new_obj . explicit = self . explicit return new_obj
Constructs a new copy of the current object preserving any tagging
11,390
def retag ( self , tagging , tag = None ) : if not isinstance ( tagging , dict ) : tagging = { tagging : tag } new_obj = self . __class__ ( explicit = tagging . get ( 'explicit' ) , implicit = tagging . get ( 'implicit' ) ) new_obj . _copy ( self , copy . deepcopy ) return new_obj
Copies the object applying a new tagging to it
11,391
def untag ( self ) : new_obj = self . __class__ ( ) new_obj . _copy ( self , copy . deepcopy ) return new_obj
Copies the object removing any special tagging from it
11,392
def _as_chunk ( self ) : if self . _chunks_offset == 0 : return self . contents return self . contents [ self . _chunks_offset : ]
A method to return a chunk of data that can be combined for constructed method values
11,393
def _copy ( self , other , copy_func ) : super ( Constructable , self ) . _copy ( other , copy_func ) self . method = other . method self . _indefinite = other . _indefinite
Copies the contents of another Constructable object to itself
11,394
def _copy ( self , other , copy_func ) : super ( Any , self ) . _copy ( other , copy_func ) self . _parsed = copy_func ( other . _parsed )
Copies the contents of another Any object to itself
11,395
def _setup ( self ) : cls = self . __class__ cls . _id_map = { } cls . _name_map = { } for index , info in enumerate ( cls . _alternatives ) : if len ( info ) < 3 : info = info + ( { } , ) cls . _alternatives [ index ] = info id_ = _build_id_tuple ( info [ 2 ] , info [ 1 ] ) cls . _id_map [ id_ ] = index cls . _name_map [ info [ 0 ] ] = index
Generates _id_map from _alternatives to allow validating contents
11,396
def parse ( self ) : if self . _parsed is not None : return self . _parsed try : _ , spec , params = self . _alternatives [ self . _choice ] self . _parsed , _ = _parse_build ( self . _contents , spec = spec , spec_params = params ) except ( ValueError , TypeError ) as e : args = e . args [ 1 : ] e . args = ( e . args [ 0 ] + '\n while parsing %s' % type_name ( self ) , ) + args raise e
Parses the detected alternative
11,397
def _copy ( self , other , copy_func ) : super ( Choice , self ) . _copy ( other , copy_func ) self . _choice = other . _choice self . _name = other . _name self . _parsed = copy_func ( other . _parsed )
Copies the contents of another Choice object to itself
11,398
def _copy ( self , other , copy_func ) : super ( AbstractString , self ) . _copy ( other , copy_func ) self . _unicode = other . _unicode
Copies the contents of another AbstractString object to itself
11,399
def _copy ( self , other , copy_func ) : super ( OctetBitString , self ) . _copy ( other , copy_func ) self . _bytes = other . _bytes
Copies the contents of another OctetBitString object to itself