idx int64 0 63k | question stringlengths 61 4.03k | target stringlengths 6 1.23k |
|---|---|---|
50,600 | def port ( self , value ) : self . _port = value if value is None : try : del self . _connectionXML . attrib [ 'port' ] except KeyError : pass else : self . _connectionXML . set ( 'port' , value ) | Set the connection s port property . |
50,601 | def query_band ( self , value ) : self . _query_band = value if value is None : try : del self . _connectionXML . attrib [ 'query-band-spec' ] except KeyError : pass else : self . _connectionXML . set ( 'query-band-spec' , value ) | Set the connection s query_band property . |
50,602 | def initial_sql ( self , value ) : self . _initial_sql = value if value is None : try : del self . _connectionXML . attrib [ 'one-time-sql' ] except KeyError : pass else : self . _connectionXML . set ( 'one-time-sql' , value ) | Set the connection s initial_sql property . |
50,603 | def base36encode ( number ) : ALPHABET = "0123456789abcdefghijklmnopqrstuvwxyz" base36 = '' sign = '' if number < 0 : sign = '-' number = - number if 0 <= number < len ( ALPHABET ) : return sign + ALPHABET [ number ] while number != 0 : number , i = divmod ( number , len ( ALPHABET ) ) base36 = ALPHABET [ i ] + base36 return sign + base36 | Converts an integer into a base36 string . |
50,604 | def get_connections ( self ) : if float ( self . _dsversion ) < 10 : connections = self . _extract_legacy_connection ( ) else : connections = self . _extract_federated_connections ( ) return connections | Find and return all connections based on file format version . |
50,605 | def from_connections ( cls , caption , connections ) : root = ET . Element ( 'datasource' , caption = caption , version = '10.0' , inline = 'true' ) outer_connection = ET . SubElement ( root , 'connection' ) outer_connection . set ( 'class' , 'federated' ) named_conns = ET . SubElement ( outer_connection , 'named-connections' ) for conn in connections : nc = ET . SubElement ( named_conns , 'named-connection' , name = _make_unique_name ( conn . dbclass ) , caption = conn . server ) nc . append ( conn . _connectionXML ) return cls ( root ) | Create a new Data Source give a list of Connections . |
50,606 | def name ( self ) : alias = getattr ( self , 'alias' , None ) if alias : return alias caption = getattr ( self , 'caption' , None ) if caption : return caption return self . id | Provides a nice name for the field which is derived from the alias caption or the id . |
50,607 | def _check_configuration ( self , * attrs ) : for attr in attrs : if getattr ( self , attr , None ) is None : raise ConfigurationError ( "{} not configured" . format ( attr ) ) | Check that each named attr has been configured |
50,608 | def _make_request ( self , url , ** kwargs ) : response = requests . post ( url , ** kwargs ) try : return response . json ( ) except ValueError : pass return parse_qs ( response . content ) | Make a request to an OAuth2 endpoint |
50,609 | def get_token ( self , code , headers = None , ** kwargs ) : self . _check_configuration ( "site" , "token_url" , "redirect_uri" , "client_id" , "client_secret" ) url = "%s%s" % ( self . site , quote ( self . token_url ) ) data = { 'redirect_uri' : self . redirect_uri , 'client_id' : self . client_id , 'client_secret' : self . client_secret , 'code' : code , } data . update ( kwargs ) return self . _make_request ( url , data = data , headers = headers ) | Requests an access token |
50,610 | def refresh_token ( self , headers = None , ** kwargs ) : self . _check_configuration ( "site" , "token_url" , "client_id" , "client_secret" ) url = "%s%s" % ( self . site , quote ( self . token_url ) ) data = { 'client_id' : self . client_id , 'client_secret' : self . client_secret , } data . update ( kwargs ) return self . _make_request ( url , data = data , headers = headers ) | Request a refreshed token |
50,611 | def revoke_token ( self , token , headers = None , ** kwargs ) : self . _check_configuration ( "site" , "revoke_uri" ) url = "%s%s" % ( self . site , quote ( self . revoke_url ) ) data = { 'token' : token } data . update ( kwargs ) return self . _make_request ( url , data = data , headers = headers ) | Revoke an access token |
50,612 | def save_network_to_file ( self , filename = "network0.pkl" ) : import cPickle , os , re if filename == "network0.pkl" : while os . path . exists ( os . path . join ( os . getcwd ( ) , filename ) ) : filename = re . sub ( '\d(?!\d)' , lambda x : str ( int ( x . group ( 0 ) ) + 1 ) , filename ) with open ( filename , 'wb' ) as file : store_dict = { "n_inputs" : self . n_inputs , "layers" : self . layers , "n_weights" : self . n_weights , "weights" : self . weights , } cPickle . dump ( store_dict , file , 2 ) | This save method pickles the parameters of the current network into a binary file for persistant storage . |
50,613 | def load_network_from_file ( filename ) : import cPickle network = NeuralNet ( { "n_inputs" : 1 , "layers" : [ [ 0 , None ] ] } ) with open ( filename , 'rb' ) as file : store_dict = cPickle . load ( file ) network . n_inputs = store_dict [ "n_inputs" ] network . n_weights = store_dict [ "n_weights" ] network . layers = store_dict [ "layers" ] network . weights = store_dict [ "weights" ] return network | Load the complete configuration of a previously stored network . |
50,614 | def replace_nan ( trainingset , replace_with = None ) : training_data = np . array ( [ instance . features for instance in trainingset ] ) . astype ( np . float64 ) def encoder ( dataset ) : for instance in dataset : instance . features = instance . features . astype ( np . float64 ) if np . sum ( np . isnan ( instance . features ) ) : if replace_with == None : instance . features [ np . isnan ( instance . features ) ] = means [ np . isnan ( instance . features ) ] else : instance . features [ np . isnan ( instance . features ) ] = replace_with return dataset if replace_nan_with == None : means = np . mean ( np . nan_to_num ( training_data ) , axis = 0 ) return encoder | Replace instanced of not a number with either the mean of the signal feature or a specific value assigned by replace_nan_with |
50,615 | def elliot_function ( signal , derivative = False ) : s = 1 abs_signal = ( 1 + np . abs ( signal * s ) ) if derivative : return 0.5 * s / abs_signal ** 2 else : return 0.5 * ( signal * s ) / abs_signal + 0.5 | A fast approximation of sigmoid |
50,616 | def symmetric_elliot_function ( signal , derivative = False ) : s = 1.0 abs_signal = ( 1 + np . abs ( signal * s ) ) if derivative : return s / abs_signal ** 2 else : return ( signal * s ) / abs_signal | A fast approximation of tanh |
50,617 | def LReLU_function ( signal , derivative = False , leakage = 0.01 ) : if derivative : return np . clip ( signal > 0 , leakage , 1.0 ) else : output = np . copy ( signal ) output [ output < 0 ] *= leakage return output | Leaky Rectified Linear Unit |
50,618 | def is_zipfile ( filename ) : result = False try : if hasattr ( filename , "read" ) : result = _check_zipfile ( fp = filename ) else : with open ( filename , "rb" ) as fp : result = _check_zipfile ( fp ) except OSError : pass return result | Quickly see if a file is a ZIP file by checking the magic number . |
50,619 | def readline ( self , limit = - 1 ) : if not self . _universal and limit < 0 : i = self . _readbuffer . find ( b'\n' , self . _offset ) + 1 if i > 0 : line = self . _readbuffer [ self . _offset : i ] self . _offset = i return line if not self . _universal : return io . BufferedIOBase . readline ( self , limit ) line = b'' while limit < 0 or len ( line ) < limit : readahead = self . peek ( 2 ) if readahead == b'' : return line match = self . PATTERN . search ( readahead ) newline = match . group ( 'newline' ) if newline is not None : if self . newlines is None : self . newlines = [ ] if newline not in self . newlines : self . newlines . append ( newline ) self . _offset += len ( newline ) return line + b'\n' chunk = match . group ( 'chunk' ) if limit >= 0 : chunk = chunk [ : limit - len ( line ) ] self . _offset += len ( chunk ) line += chunk return line | Read and return a line from the stream . |
50,620 | def setpassword ( self , pwd ) : if pwd and not isinstance ( pwd , bytes ) : raise TypeError ( "pwd: expected bytes, got %s" % type ( pwd ) ) if pwd : self . pwd = pwd else : self . pwd = None | Set default password for encrypted files . |
50,621 | def _sanitize_windows_name ( cls , arcname , pathsep ) : table = cls . _windows_illegal_name_trans_table if not table : illegal = ':<>|"?*' table = str . maketrans ( illegal , '_' * len ( illegal ) ) cls . _windows_illegal_name_trans_table = table arcname = arcname . translate ( table ) arcname = ( x . rstrip ( '.' ) for x in arcname . split ( pathsep ) ) arcname = pathsep . join ( x for x in arcname if x ) return arcname | Replace bad characters and remove trailing dots from parts . |
50,622 | def close ( self ) : if self . fp is None : return try : if self . mode in ( 'w' , 'x' , 'a' ) and self . _didModify : with self . _lock : if self . _seekable : self . fp . seek ( self . start_dir ) self . _write_end_record ( ) finally : fp = self . fp self . fp = None self . _fpclose ( fp ) | Close the file and for mode w x and a write the ending records . |
50,623 | def display_string_dump ( self , section_spec ) : section = _section_from_spec ( self . elf_file , section_spec ) if section is None : print ( "Section '%s' does not exist in the file!" % section_spec ) return None data = section . data ( ) dataptr = 0 strs = [ ] while dataptr < len ( data ) : while dataptr < len ( data ) and not 32 <= byte2int ( data [ dataptr ] ) <= 127 : dataptr += 1 if dataptr >= len ( data ) : break endptr = dataptr while endptr < len ( data ) and byte2int ( data [ endptr ] ) != 0 : endptr += 1 strs . append ( binascii . b2a_hex ( data [ dataptr : endptr ] ) . decode ( ) . upper ( ) ) dataptr = endptr return strs | Display a strings dump of a section . section_spec is either a section number or a name . |
50,624 | def _EnvOpen ( var , mode ) : value = os . getenv ( var ) if value is None : raise ValueError ( "%s is not set" % var ) fd = int ( value ) if _WINDOWS : fd = msvcrt . open_osfhandle ( fd , 0 ) return os . fdopen ( fd , mode ) | Open a file descriptor identified by an environment variable . |
50,625 | def Send ( self , message ) : if not isinstance ( message , common_pb2 . Message ) : raise ValueError ( "Send requires a fleetspeak.Message" ) if message . destination . service_name == "system" : raise ValueError ( "Only predefined messages can have destination.service_name == \"system\"" ) return self . _SendImpl ( message ) | Send a message through Fleetspeak . |
50,626 | def Recv ( self ) : size = struct . unpack ( _STRUCT_FMT , self . _ReadN ( _STRUCT_LEN ) ) [ 0 ] if size > MAX_SIZE : raise ProtocolError ( "Expected size to be at most %d, got %d" % ( MAX_SIZE , size ) ) with self . _read_lock : buf = self . _ReadN ( size ) self . _ReadMagic ( ) res = common_pb2 . Message ( ) res . ParseFromString ( buf ) return res , len ( buf ) | Accept a message from Fleetspeak . |
50,627 | def Heartbeat ( self ) : heartbeat_msg = common_pb2 . Message ( message_type = "Heartbeat" , destination = common_pb2 . Address ( service_name = "system" ) ) self . _SendImpl ( heartbeat_msg ) | Sends a heartbeat to the Fleetspeak client . |
50,628 | def _ReadN ( self , n ) : ret = "" while True : chunk = self . _read_file . read ( n - len ( ret ) ) ret += chunk if len ( ret ) == n or not chunk : return ret | Reads n characters from the input stream or until EOF . |
50,629 | def _CompileProtos ( ) : proto_files = [ ] for dir_path , _ , filenames in os . walk ( THIS_DIRECTORY ) : for filename in filenames : if filename . endswith ( ".proto" ) : proto_files . append ( os . path . join ( dir_path , filename ) ) if not proto_files : return protoc_command = [ "python" , "-m" , "grpc_tools.protoc" , "--python_out" , THIS_DIRECTORY , "--grpc_python_out" , THIS_DIRECTORY , "--proto_path" , THIS_DIRECTORY , ] protoc_command . extend ( proto_files ) subprocess . check_output ( protoc_command ) | Compiles all Fleetspeak protos . |
50,630 | def _RetryLoop ( self , func , timeout = None ) : timeout = timeout or self . DEFAULT_TIMEOUT deadline = time . time ( ) + timeout sleep = 1 while True : try : return func ( timeout ) except grpc . RpcError : if time . time ( ) + sleep > deadline : raise time . sleep ( sleep ) sleep *= 2 timeout = deadline - time . time ( ) | Retries an operation until success or deadline . |
50,631 | def InsertMessage ( self , message , timeout = None ) : if not isinstance ( message , common_pb2 . Message ) : raise InvalidArgument ( "Attempt to send unexpected message type: %s" % message . __class__ . __name__ ) if not message . HasField ( "source" ) : message . source . service_name = self . _service_name if not message . message_id : message . message_id = os . urandom ( 32 ) return self . _RetryLoop ( lambda t : self . _stub . InsertMessage ( message , timeout = t ) ) | Inserts a message into the Fleetspeak server . |
50,632 | def ListClients ( self , request , timeout = None ) : return self . _RetryLoop ( lambda t : self . _stub . ListClients ( request , timeout = t ) ) | Provides basic information about Fleetspeak clients . |
50,633 | def Send ( self , message ) : if not self . outgoing : raise NotConfigured ( "Send address not provided." ) self . outgoing . InsertMessage ( message ) | Send one message . |
50,634 | def start_naive_bayes ( automated_run , session , path ) : module = functions . import_string_code_as_module ( automated_run . source ) random_state = 8 if not hasattr ( module , 'random_state' ) else module . random_state assert module . metric_to_optimize in automated_run . base_learner_origin . metric_generators base_estimator = automated_run . base_learner_origin . return_estimator ( ) base_estimator . set_params ( ** module . default_params ) default_params = functions . make_serializable ( base_estimator . get_params ( ) ) non_searchable_params = dict ( ( key , val ) for key , val in iteritems ( default_params ) if key not in module . pbounds ) existing_base_learners = [ ] for base_learner in automated_run . base_learner_origin . base_learners : if not base_learner . job_status == 'finished' : continue in_search_space = True for key , val in iteritems ( non_searchable_params ) : if base_learner . hyperparameters [ key ] != val : in_search_space = False break if in_search_space : existing_base_learners . append ( base_learner ) target = [ ] initialization_dict = dict ( ( key , list ( ) ) for key in module . pbounds . keys ( ) ) for base_learner in existing_base_learners : all_numerical = True for key in module . pbounds . keys ( ) : if not isinstance ( base_learner . hyperparameters [ key ] , numbers . Number ) : all_numerical = False break if not all_numerical : continue for key in module . pbounds . keys ( ) : initialization_dict [ key ] . append ( base_learner . hyperparameters [ key ] ) target . append ( base_learner . individual_score [ module . metric_to_optimize ] ) initialization_dict [ 'target' ] = target if not module . invert_metric else list ( map ( lambda x : - x , target ) ) print ( '{} existing in initialization dictionary' . format ( len ( initialization_dict [ 'target' ] ) ) ) func_to_optimize = return_func_to_optimize ( path , session , automated_run . base_learner_origin , module . default_params , module . metric_to_optimize , module . invert_metric , set ( module . integers ) ) bo = BayesianOptimization ( func_to_optimize , module . pbounds ) bo . initialize ( initialization_dict ) np . random . seed ( random_state ) bo . maximize ( ** module . maximize_config ) | Starts naive bayes automated run |
50,635 | def start_tpot ( automated_run , session , path ) : module = functions . import_string_code_as_module ( automated_run . source ) extraction = session . query ( models . Extraction ) . first ( ) X , y = extraction . return_train_dataset ( ) tpot_learner = module . tpot_learner tpot_learner . fit ( X , y ) temp_filename = os . path . join ( path , 'tpot-temp-export-{}' . format ( os . getpid ( ) ) ) tpot_learner . export ( temp_filename ) with open ( temp_filename ) as f : base_learner_source = f . read ( ) base_learner_source = constants . tpot_learner_docstring + base_learner_source try : os . remove ( temp_filename ) except OSError : pass blo = models . BaseLearnerOrigin ( source = base_learner_source , name = 'TPOT Learner' , meta_feature_generator = 'predict' ) session . add ( blo ) session . commit ( ) | Starts a TPOT automated run that exports directly to base learner setup |
50,636 | def start_greedy_ensemble_search ( automated_run , session , path ) : module = functions . import_string_code_as_module ( automated_run . source ) assert module . metric_to_optimize in automated_run . base_learner_origin . metric_generators best_ensemble = [ ] secondary_learner = automated_run . base_learner_origin . return_estimator ( ) secondary_learner . set_params ( ** module . secondary_learner_hyperparameters ) for i in range ( module . max_num_base_learners ) : best_score = - float ( 'inf' ) current_ensemble = best_ensemble [ : ] for base_learner in session . query ( models . BaseLearner ) . filter_by ( job_status = 'finished' ) . all ( ) : if base_learner in current_ensemble : continue current_ensemble . append ( base_learner ) existing_ensemble = session . query ( models . StackedEnsemble ) . filter_by ( base_learner_origin_id = automated_run . base_learner_origin . id , secondary_learner_hyperparameters = secondary_learner . get_params ( ) , base_learner_ids = sorted ( [ bl . id for bl in current_ensemble ] ) ) . first ( ) if existing_ensemble and existing_ensemble . job_status == 'finished' : score = existing_ensemble . individual_score [ module . metric_to_optimize ] elif existing_ensemble and existing_ensemble . job_status != 'finished' : eval_stacked_ensemble ( existing_ensemble , session , path ) score = existing_ensemble . individual_score [ module . metric_to_optimize ] else : stacked_ensemble = models . StackedEnsemble ( secondary_learner_hyperparameters = secondary_learner . get_params ( ) , base_learners = current_ensemble , base_learner_origin = automated_run . base_learner_origin , job_status = 'started' ) session . add ( stacked_ensemble ) session . commit ( ) eval_stacked_ensemble ( stacked_ensemble , session , path ) score = stacked_ensemble . individual_score [ module . metric_to_optimize ] score = - score if module . invert_metric else score if best_score < score : best_score = score best_ensemble = current_ensemble [ : ] current_ensemble . pop ( ) | Starts an automated ensemble search using greedy forward model selection . |
50,637 | def extraction_data_statistics ( path ) : with functions . DBContextManager ( path ) as session : extraction = session . query ( models . Extraction ) . first ( ) X , y = extraction . return_main_dataset ( ) functions . verify_dataset ( X , y ) if extraction . test_dataset [ 'method' ] == 'split_from_main' : X , X_test , y , y_test = train_test_split ( X , y , test_size = extraction . test_dataset [ 'split_ratio' ] , random_state = extraction . test_dataset [ 'split_seed' ] , stratify = y ) elif extraction . test_dataset [ 'method' ] == 'source' : if 'source' not in extraction . test_dataset or not extraction . test_dataset [ 'source' ] : raise exceptions . UserError ( 'Source is empty' ) extraction_code = extraction . test_dataset [ "source" ] extraction_function = functions . import_object_from_string_code ( extraction_code , "extract_test_dataset" ) X_test , y_test = extraction_function ( ) else : X_test , y_test = None , None extraction_code = extraction . meta_feature_generation [ 'source' ] return_splits_iterable = functions . import_object_from_string_code ( extraction_code , 'return_splits_iterable' ) number_of_splits = 0 test_indices = [ ] try : for train_idx , test_idx in return_splits_iterable ( X , y ) : number_of_splits += 1 test_indices . append ( test_idx ) except Exception as e : raise exceptions . UserError ( 'User code exception' , exception_message = str ( e ) ) test_indices = np . concatenate ( test_indices ) X , y = X [ test_indices ] , y [ test_indices ] extraction_code = extraction . stacked_ensemble_cv [ 'source' ] return_splits_iterable = functions . import_object_from_string_code ( extraction_code , 'return_splits_iterable' ) number_of_splits_stacked_cv = 0 try : for train_idx , test_idx in return_splits_iterable ( X , y ) : number_of_splits_stacked_cv += 1 except Exception as e : raise exceptions . UserError ( 'User code exception' , exception_message = str ( e ) ) data_stats = dict ( ) data_stats [ 'train_data_stats' ] = functions . verify_dataset ( X , y ) if X_test is not None : data_stats [ 'test_data_stats' ] = functions . verify_dataset ( X_test , y_test ) else : data_stats [ 'test_data_stats' ] = None data_stats [ 'holdout_data_stats' ] = { 'number_of_splits' : number_of_splits } data_stats [ 'stacked_ensemble_cv_stats' ] = { 'number_of_splits' : number_of_splits_stacked_cv } extraction . data_statistics = data_stats session . add ( extraction ) session . commit ( ) | Generates data statistics for the given data extraction setup stored in Xcessiv notebook . |
50,638 | def generate_meta_features ( path , base_learner_id ) : with functions . DBContextManager ( path ) as session : base_learner = session . query ( models . BaseLearner ) . filter_by ( id = base_learner_id ) . first ( ) if not base_learner : raise exceptions . UserError ( 'Base learner {} ' 'does not exist' . format ( base_learner_id ) ) base_learner . job_id = get_current_job ( ) . id base_learner . job_status = 'started' session . add ( base_learner ) session . commit ( ) try : est = base_learner . return_estimator ( ) extraction = session . query ( models . Extraction ) . first ( ) X , y = extraction . return_train_dataset ( ) return_splits_iterable = functions . import_object_from_string_code ( extraction . meta_feature_generation [ 'source' ] , 'return_splits_iterable' ) meta_features_list = [ ] trues_list = [ ] for train_index , test_index in return_splits_iterable ( X , y ) : X_train , X_test = X [ train_index ] , X [ test_index ] y_train , y_test = y [ train_index ] , y [ test_index ] est = est . fit ( X_train , y_train ) meta_features_list . append ( getattr ( est , base_learner . base_learner_origin . meta_feature_generator ) ( X_test ) ) trues_list . append ( y_test ) meta_features = np . concatenate ( meta_features_list , axis = 0 ) y_true = np . concatenate ( trues_list ) for key in base_learner . base_learner_origin . metric_generators : metric_generator = functions . import_object_from_string_code ( base_learner . base_learner_origin . metric_generators [ key ] , 'metric_generator' ) base_learner . individual_score [ key ] = metric_generator ( y_true , meta_features ) meta_features_path = base_learner . meta_features_path ( path ) if not os . path . exists ( os . path . dirname ( meta_features_path ) ) : os . makedirs ( os . path . dirname ( meta_features_path ) ) np . save ( meta_features_path , meta_features , allow_pickle = False ) base_learner . job_status = 'finished' base_learner . meta_features_exists = True session . add ( base_learner ) session . commit ( ) except : session . rollback ( ) base_learner . job_status = 'errored' base_learner . description [ 'error_type' ] = repr ( sys . exc_info ( ) [ 0 ] ) base_learner . description [ 'error_value' ] = repr ( sys . exc_info ( ) [ 1 ] ) base_learner . description [ 'error_traceback' ] = traceback . format_exception ( * sys . exc_info ( ) ) session . add ( base_learner ) session . commit ( ) raise | Generates meta - features for specified base learner |
50,639 | def start_automated_run ( path , automated_run_id ) : with functions . DBContextManager ( path ) as session : automated_run = session . query ( models . AutomatedRun ) . filter_by ( id = automated_run_id ) . first ( ) if not automated_run : raise exceptions . UserError ( 'Automated run {} ' 'does not exist' . format ( automated_run_id ) ) automated_run . job_id = get_current_job ( ) . id automated_run . job_status = 'started' session . add ( automated_run ) session . commit ( ) try : if automated_run . category == 'bayes' : automatedruns . start_naive_bayes ( automated_run , session , path ) elif automated_run . category == 'tpot' : automatedruns . start_tpot ( automated_run , session , path ) elif automated_run . category == 'greedy_ensemble_search' : automatedruns . start_greedy_ensemble_search ( automated_run , session , path ) else : raise Exception ( 'Something went wrong. Invalid category for automated run' ) automated_run . job_status = 'finished' session . add ( automated_run ) session . commit ( ) except : session . rollback ( ) automated_run . job_status = 'errored' automated_run . description [ 'error_type' ] = repr ( sys . exc_info ( ) [ 0 ] ) automated_run . description [ 'error_value' ] = repr ( sys . exc_info ( ) [ 1 ] ) automated_run . description [ 'error_traceback' ] = traceback . format_exception ( * sys . exc_info ( ) ) session . add ( automated_run ) session . commit ( ) raise | Starts automated run . This will automatically create base learners until the run finishes or errors out . |
50,640 | def hash_file ( path , block_size = 65536 ) : sha256 = hashlib . sha256 ( ) with open ( path , 'rb' ) as f : for block in iter ( lambda : f . read ( block_size ) , b'' ) : sha256 . update ( block ) return sha256 . hexdigest ( ) | Returns SHA256 checksum of a file |
50,641 | def import_object_from_path ( path , object ) : with open ( path ) as f : return import_object_from_string_code ( f . read ( ) , object ) | Used to import an object from an absolute path . |
50,642 | def import_object_from_string_code ( code , object ) : sha256 = hashlib . sha256 ( code . encode ( 'UTF-8' ) ) . hexdigest ( ) module = imp . new_module ( sha256 ) try : exec_ ( code , module . __dict__ ) except Exception as e : raise exceptions . UserError ( 'User code exception' , exception_message = str ( e ) ) sys . modules [ sha256 ] = module try : return getattr ( module , object ) except AttributeError : raise exceptions . UserError ( "{} not found in code" . format ( object ) ) | Used to import an object from arbitrary passed code . |
50,643 | def import_string_code_as_module ( code ) : sha256 = hashlib . sha256 ( code . encode ( 'UTF-8' ) ) . hexdigest ( ) module = imp . new_module ( sha256 ) try : exec_ ( code , module . __dict__ ) except Exception as e : raise exceptions . UserError ( 'User code exception' , exception_message = str ( e ) ) sys . modules [ sha256 ] = module return module | Used to run arbitrary passed code as a module |
50,644 | def verify_dataset ( X , y ) : X_shape , y_shape = np . array ( X ) . shape , np . array ( y ) . shape if len ( X_shape ) != 2 : raise exceptions . UserError ( "X must be 2-dimensional array" ) if len ( y_shape ) != 1 : raise exceptions . UserError ( "y must be 1-dimensional array" ) if X_shape [ 0 ] != y_shape [ 0 ] : raise exceptions . UserError ( "X must have same number of elements as y" ) return dict ( features_shape = X_shape , labels_shape = y_shape ) | Verifies if a dataset is valid for use i . e . scikit - learn format |
50,645 | def make_serializable ( json ) : new_dict = dict ( ) for key , value in iteritems ( json ) : if is_valid_json ( value ) : new_dict [ key ] = value return new_dict | This function ensures that the dictionary is JSON serializable . If not keys with non - serializable values are removed from the return value . |
50,646 | def get_sample_dataset ( dataset_properties ) : kwargs = dataset_properties . copy ( ) data_type = kwargs . pop ( 'type' ) if data_type == 'multiclass' : try : X , y = datasets . make_classification ( random_state = 8 , ** kwargs ) splits = model_selection . StratifiedKFold ( n_splits = 2 , random_state = 8 ) . split ( X , y ) except Exception as e : raise exceptions . UserError ( repr ( e ) ) elif data_type == 'iris' : X , y = datasets . load_iris ( return_X_y = True ) splits = model_selection . StratifiedKFold ( n_splits = 2 , random_state = 8 ) . split ( X , y ) elif data_type == 'mnist' : X , y = datasets . load_digits ( return_X_y = True ) splits = model_selection . StratifiedKFold ( n_splits = 2 , random_state = 8 ) . split ( X , y ) elif data_type == 'breast_cancer' : X , y = datasets . load_breast_cancer ( return_X_y = True ) splits = model_selection . StratifiedKFold ( n_splits = 2 , random_state = 8 ) . split ( X , y ) elif data_type == 'boston' : X , y = datasets . load_boston ( return_X_y = True ) splits = model_selection . KFold ( n_splits = 2 , random_state = 8 ) . split ( X ) elif data_type == 'diabetes' : X , y = datasets . load_diabetes ( return_X_y = True ) splits = model_selection . KFold ( n_splits = 2 , random_state = 8 ) . split ( X ) else : raise exceptions . UserError ( 'Unknown dataset type {}' . format ( dataset_properties [ 'type' ] ) ) return X , y , splits | Returns sample dataset |
50,647 | def verify_estimator_class ( est , meta_feature_generator , metric_generators , dataset_properties ) : X , y , splits = get_sample_dataset ( dataset_properties ) if not hasattr ( est , "get_params" ) : raise exceptions . UserError ( 'Estimator does not have get_params method' ) if not hasattr ( est , "set_params" ) : raise exceptions . UserError ( 'Estimator does not have set_params method' ) if not hasattr ( est , meta_feature_generator ) : raise exceptions . UserError ( 'Estimator does not have meta-feature generator' ' {}' . format ( meta_feature_generator ) ) performance_dict = dict ( ) true_labels = [ ] preds = [ ] try : for train_index , test_index in splits : X_train , X_test = X [ train_index ] , X [ test_index ] y_train , y_test = y [ train_index ] , y [ test_index ] est . fit ( X_train , y_train ) true_labels . append ( y_test ) preds . append ( getattr ( est , meta_feature_generator ) ( X_test ) ) true_labels = np . concatenate ( true_labels ) preds = np . concatenate ( preds , axis = 0 ) except Exception as e : raise exceptions . UserError ( repr ( e ) ) if preds . shape [ 0 ] != true_labels . shape [ 0 ] : raise exceptions . UserError ( 'Estimator\'s meta-feature generator ' 'does not produce valid shape' ) for key in metric_generators : metric_generator = import_object_from_string_code ( metric_generators [ key ] , 'metric_generator' ) try : performance_dict [ key ] = metric_generator ( true_labels , preds ) except Exception as e : raise exceptions . UserError ( repr ( e ) ) return performance_dict , make_serializable ( est . get_params ( ) ) | Verify if estimator object is valid for use i . e . scikit - learn format |
50,648 | def get_path_from_query_string ( req ) : if req . args . get ( 'path' ) is None : raise exceptions . UserError ( 'Path not found in query string' ) return req . args . get ( 'path' ) | Gets path from query string |
50,649 | def return_main_dataset ( self ) : if not self . main_dataset [ 'source' ] : raise exceptions . UserError ( 'Source is empty' ) extraction_code = self . main_dataset [ "source" ] extraction_function = functions . import_object_from_string_code ( extraction_code , "extract_main_dataset" ) try : X , y = extraction_function ( ) except Exception as e : raise exceptions . UserError ( 'User code exception' , exception_message = str ( e ) ) X , y = np . array ( X ) , np . array ( y ) return X , y | Returns main data set from self |
50,650 | def return_train_dataset ( self ) : X , y = self . return_main_dataset ( ) if self . test_dataset [ 'method' ] == 'split_from_main' : X , X_test , y , y_test = train_test_split ( X , y , test_size = self . test_dataset [ 'split_ratio' ] , random_state = self . test_dataset [ 'split_seed' ] , stratify = y ) return X , y | Returns train data set |
50,651 | def return_estimator ( self ) : extraction_code = self . source estimator = functions . import_object_from_string_code ( extraction_code , "base_learner" ) return estimator | Returns estimator from base learner origin |
50,652 | def export_as_file ( self , filepath , hyperparameters ) : if not filepath . endswith ( '.py' ) : filepath += '.py' file_contents = '' file_contents += self . source file_contents += '\n\nbase_learner.set_params(**{})\n' . format ( hyperparameters ) file_contents += '\nmeta_feature_generator = "{}"\n' . format ( self . meta_feature_generator ) with open ( filepath , 'wb' ) as f : f . write ( file_contents . encode ( 'utf8' ) ) | Generates a Python file with the importable base learner set to hyperparameters |
50,653 | def return_estimator ( self ) : estimator = self . base_learner_origin . return_estimator ( ) estimator = estimator . set_params ( ** self . hyperparameters ) return estimator | Returns base learner using its origin and the given hyperparameters |
50,654 | def meta_features_path ( self , path ) : return os . path . join ( path , app . config [ 'XCESSIV_META_FEATURES_FOLDER' ] , str ( self . id ) ) + '.npy' | Returns path for meta - features |
50,655 | def delete_meta_features ( self , path ) : if os . path . exists ( self . meta_features_path ( path ) ) : os . remove ( self . meta_features_path ( path ) ) | Deletes meta - features of base learner if it exists |
50,656 | def return_secondary_learner ( self ) : estimator = self . base_learner_origin . return_estimator ( ) estimator = estimator . set_params ( ** self . secondary_learner_hyperparameters ) return estimator | Returns secondary learner using its origin and the given hyperparameters |
50,657 | def export_as_code ( self , cv_source ) : rand_value = '' . join ( random . choice ( string . ascii_uppercase + string . digits ) for _ in range ( 25 ) ) base_learner_code = '' base_learner_code += 'base_learner_list_{} = []\n' . format ( rand_value ) base_learner_code += 'meta_feature_generators_list_{} = []\n\n' . format ( rand_value ) for idx , base_learner in enumerate ( self . base_learners ) : base_learner_code += '################################################\n' base_learner_code += '###### Code for building base learner {} ########\n' . format ( idx + 1 ) base_learner_code += '################################################\n' base_learner_code += base_learner . base_learner_origin . source base_learner_code += '\n\n' base_learner_code += 'base_learner' '.set_params(**{})\n' . format ( base_learner . hyperparameters ) base_learner_code += 'base_learner_list_{}.append(base_learner)\n' . format ( rand_value ) base_learner_code += 'meta_feature_generators_list_{}.append("{}")\n' . format ( rand_value , base_learner . base_learner_origin . meta_feature_generator ) base_learner_code += '\n\n' base_learner_code += '################################################\n' base_learner_code += '##### Code for building secondary learner ######\n' base_learner_code += '################################################\n' base_learner_code += self . base_learner_origin . source base_learner_code += '\n\n' base_learner_code += 'base_learner' '.set_params(**{})\n' . format ( self . secondary_learner_hyperparameters ) base_learner_code += 'secondary_learner_{} = base_learner\n' . format ( rand_value ) base_learner_code += '\n\n' base_learner_code += '################################################\n' base_learner_code += '############## Code for CV method ##############\n' base_learner_code += '################################################\n' base_learner_code += cv_source base_learner_code += '\n\n' base_learner_code += '################################################\n' base_learner_code += '######## Code for Xcessiv stacker class ########\n' base_learner_code += '################################################\n' stacker_file_loc = os . path . join ( os . path . abspath ( os . path . dirname ( __file__ ) ) , 'stacker.py' ) with open ( stacker_file_loc ) as f2 : base_learner_code += f2 . read ( ) base_learner_code += '\n\n' ' def {}(self, X):\n' ' return self._process_using_' 'meta_feature_generator(X, "{}")\n\n' . format ( self . base_learner_origin . meta_feature_generator , self . base_learner_origin . meta_feature_generator ) base_learner_code += '\n\n' base_learner_code += 'base_learner = XcessivStackedEnsemble' '(base_learners=base_learner_list_{},' ' meta_feature_generators=meta_feature_generators_list_{},' ' secondary_learner=secondary_learner_{},' ' cv_function=return_splits_iterable)\n' . format ( rand_value , rand_value , rand_value ) return base_learner_code | Returns a string value that contains the Python code for the ensemble |
50,658 | def export_as_file ( self , file_path , cv_source ) : if os . path . exists ( file_path ) : raise exceptions . UserError ( '{} already exists' . format ( file_path ) ) with open ( file_path , 'wb' ) as f : f . write ( self . export_as_code ( cv_source ) . encode ( 'utf8' ) ) | Export the ensemble as a single Python file and saves it to file_path . |
50,659 | def export_as_package ( self , package_path , cv_source ) : if os . path . exists ( package_path ) : raise exceptions . UserError ( '{} already exists' . format ( package_path ) ) package_name = os . path . basename ( os . path . normpath ( package_path ) ) os . makedirs ( package_path ) with open ( os . path . join ( package_path , '__init__.py' ) , 'wb' ) as f : f . write ( 'from {}.builder import xcessiv_ensemble' . format ( package_name ) . encode ( 'utf8' ) ) os . makedirs ( os . path . join ( package_path , 'baselearners' ) ) open ( os . path . join ( package_path , 'baselearners' , '__init__.py' ) , 'a' ) . close ( ) for idx , base_learner in enumerate ( self . base_learners ) : base_learner . export_as_file ( os . path . join ( package_path , 'baselearners' , 'baselearner' + str ( idx ) ) ) self . base_learner_origin . export_as_file ( os . path . join ( package_path , 'metalearner' ) , self . secondary_learner_hyperparameters ) with open ( os . path . join ( package_path , 'cv.py' ) , 'wb' ) as f : f . write ( cv_source . encode ( 'utf8' ) ) ensemble_source = '' stacker_file_loc = os . path . join ( os . path . abspath ( os . path . dirname ( __file__ ) ) , 'stacker.py' ) with open ( stacker_file_loc ) as f : ensemble_source += f . read ( ) ensemble_source += '\n\n' ' def {}(self, X):\n' ' return self._process_using_' 'meta_feature_generator(X, "{}")\n\n' . format ( self . base_learner_origin . meta_feature_generator , self . base_learner_origin . meta_feature_generator ) with open ( os . path . join ( package_path , 'stacker.py' ) , 'wb' ) as f : f . write ( ensemble_source . encode ( 'utf8' ) ) builder_source = '' for idx , base_learner in enumerate ( self . base_learners ) : builder_source += 'from {}.baselearners import baselearner{}\n' . format ( package_name , idx ) builder_source += 'from {}.cv import return_splits_iterable\n' . format ( package_name ) builder_source += 'from {} import metalearner\n' . format ( package_name ) builder_source += 'from {}.stacker import XcessivStackedEnsemble\n' . format ( package_name ) builder_source += '\nbase_learners = [\n' for idx , base_learner in enumerate ( self . base_learners ) : builder_source += ' baselearner{}.base_learner,\n' . format ( idx ) builder_source += ']\n' builder_source += '\nmeta_feature_generators = [\n' for idx , base_learner in enumerate ( self . base_learners ) : builder_source += ' baselearner{}.meta_feature_generator,\n' . format ( idx ) builder_source += ']\n' builder_source += '\nxcessiv_ensemble = XcessivStackedEnsemble(base_learners=base_learners,' ' meta_feature_generators=meta_feature_generators,' ' secondary_learner=metalearner.base_learner,' ' cv_function=return_splits_iterable)\n' with open ( os . path . join ( package_path , 'builder.py' ) , 'wb' ) as f : f . write ( builder_source . encode ( 'utf8' ) ) | Exports the ensemble as a Python package and saves it to package_path . |
50,660 | def verify_full_extraction ( ) : path = functions . get_path_from_query_string ( request ) if request . method == 'POST' : rqtasks . extraction_data_statistics ( path ) with functions . DBContextManager ( path ) as session : extraction = session . query ( models . Extraction ) . first ( ) return jsonify ( extraction . data_statistics ) | This is an experimental endpoint to simultaneously verify data statistics and extraction for training test and holdout datasets . With this the other three verification methods will no longer be necessary . |
50,661 | def create_base_learner ( id ) : path = functions . get_path_from_query_string ( request ) with functions . DBContextManager ( path ) as session : base_learner_origin = session . query ( models . BaseLearnerOrigin ) . filter_by ( id = id ) . first ( ) if base_learner_origin is None : raise exceptions . UserError ( 'Base learner origin {} not found' . format ( id ) , 404 ) if not base_learner_origin . final : raise exceptions . UserError ( 'Base learner origin {} is not final' . format ( id ) ) req_body = request . get_json ( ) est = base_learner_origin . return_estimator ( ) hyperparameters = functions . import_object_from_string_code ( req_body [ 'source' ] , 'params' ) est . set_params ( ** hyperparameters ) hyperparameters = functions . make_serializable ( est . get_params ( ) ) base_learners = session . query ( models . BaseLearner ) . filter_by ( base_learner_origin_id = id , hyperparameters = hyperparameters ) . all ( ) if base_learners : raise exceptions . UserError ( 'Base learner exists with given hyperparameters' ) base_learner = models . BaseLearner ( hyperparameters , 'queued' , base_learner_origin ) if 'single_searches' not in base_learner_origin . description : base_learner_origin . description [ 'single_searches' ] = [ ] base_learner_origin . description [ 'single_searches' ] += ( [ req_body [ 'source' ] ] ) session . add ( base_learner ) session . add ( base_learner_origin ) session . commit ( ) with Connection ( get_redis_connection ( ) ) : rqtasks . generate_meta_features . delay ( path , base_learner . id ) return jsonify ( base_learner . serialize ) | This creates a single base learner from a base learner origin and queues it up |
50,662 | def search_base_learner ( id ) : path = functions . get_path_from_query_string ( request ) req_body = request . get_json ( ) if req_body [ 'method' ] == 'grid' : param_grid = functions . import_object_from_string_code ( req_body [ 'source' ] , 'param_grid' ) iterator = ParameterGrid ( param_grid ) elif req_body [ 'method' ] == 'random' : param_distributions = functions . import_object_from_string_code ( req_body [ 'source' ] , 'param_distributions' ) iterator = ParameterSampler ( param_distributions , n_iter = req_body [ 'n_iter' ] ) else : raise exceptions . UserError ( '{} not a valid search method' . format ( req_body [ 'method' ] ) ) with functions . DBContextManager ( path ) as session : base_learner_origin = session . query ( models . BaseLearnerOrigin ) . filter_by ( id = id ) . first ( ) if base_learner_origin is None : raise exceptions . UserError ( 'Base learner origin {} not found' . format ( id ) , 404 ) if not base_learner_origin . final : raise exceptions . UserError ( 'Base learner origin {} is not final' . format ( id ) ) learners = [ ] for params in iterator : est = base_learner_origin . return_estimator ( ) try : est . set_params ( ** params ) except Exception as e : print ( repr ( e ) ) continue hyperparameters = functions . make_serializable ( est . get_params ( ) ) base_learners = session . query ( models . BaseLearner ) . filter_by ( base_learner_origin_id = id , hyperparameters = hyperparameters ) . all ( ) if base_learners : continue base_learner = models . BaseLearner ( hyperparameters , 'queued' , base_learner_origin ) session . add ( base_learner ) session . commit ( ) with Connection ( get_redis_connection ( ) ) : rqtasks . generate_meta_features . delay ( path , base_learner . id ) learners . append ( base_learner ) if not learners : raise exceptions . UserError ( 'Created 0 new base learners' ) if req_body [ 'method' ] == 'grid' : if 'grid_searches' not in base_learner_origin . description : base_learner_origin . description [ 'grid_searches' ] = [ ] base_learner_origin . description [ 'grid_searches' ] += ( [ req_body [ 'source' ] ] ) elif req_body [ 'method' ] == 'random' : if 'random_searches' not in base_learner_origin . description : base_learner_origin . description [ 'random_searches' ] = [ ] base_learner_origin . description [ 'random_searches' ] += ( [ req_body [ 'source' ] ] ) session . add ( base_learner_origin ) session . commit ( ) return jsonify ( list ( map ( lambda x : x . serialize , learners ) ) ) | Creates a set of base learners from base learner origin using grid search and queues them up |
50,663 | def get_automated_runs ( ) : path = functions . get_path_from_query_string ( request ) if request . method == 'GET' : with functions . DBContextManager ( path ) as session : automated_runs = session . query ( models . AutomatedRun ) . all ( ) return jsonify ( list ( map ( lambda x : x . serialize , automated_runs ) ) ) if request . method == 'POST' : req_body = request . get_json ( ) with functions . DBContextManager ( path ) as session : base_learner_origin = None if req_body [ 'category' ] == 'bayes' or req_body [ 'category' ] == 'greedy_ensemble_search' : base_learner_origin = session . query ( models . BaseLearnerOrigin ) . filter_by ( id = req_body [ 'base_learner_origin_id' ] ) . first ( ) if base_learner_origin is None : raise exceptions . UserError ( 'Base learner origin {} not found' . format ( req_body [ 'base_learner_origin_id' ] ) , 404 ) if not base_learner_origin . final : raise exceptions . UserError ( 'Base learner origin {} is not final' . format ( req_body [ 'base_learner_origin_id' ] ) ) elif req_body [ 'category' ] == 'tpot' : pass else : raise exceptions . UserError ( 'Automated run category' ' {} not recognized' . format ( req_body [ 'category' ] ) ) module = functions . import_string_code_as_module ( req_body [ 'source' ] ) del module automated_run = models . AutomatedRun ( req_body [ 'source' ] , 'queued' , req_body [ 'category' ] , base_learner_origin ) session . add ( automated_run ) session . commit ( ) with Connection ( get_redis_connection ( ) ) : rqtasks . start_automated_run . delay ( path , automated_run . id ) return jsonify ( automated_run . serialize ) | Return all automated runs |
50,664 | def _process_using_meta_feature_generator ( self , X , meta_feature_generator ) : all_learner_meta_features = [ ] for idx , base_learner in enumerate ( self . base_learners ) : single_learner_meta_features = getattr ( base_learner , self . meta_feature_generators [ idx ] ) ( X ) if len ( single_learner_meta_features . shape ) == 1 : single_learner_meta_features = single_learner_meta_features . reshape ( - 1 , 1 ) all_learner_meta_features . append ( single_learner_meta_features ) all_learner_meta_features = np . concatenate ( all_learner_meta_features , axis = 1 ) out = getattr ( self . secondary_learner , meta_feature_generator ) ( all_learner_meta_features ) return out | Process using secondary learner meta - feature generator |
50,665 | def NewEvent ( type : str , id : UUID = None , data : JsonDict = None , metadata : JsonDict = None ) -> NewEventData : return NewEventData ( id or uuid4 ( ) , type , data , metadata ) | Build the data structure for a new event . |
50,666 | def from_bytes ( cls , data ) : len_username = int . from_bytes ( data [ 0 : 2 ] , byteorder = "big" ) offset_username = 2 + len_username username = data [ 2 : offset_username ] . decode ( "UTF-8" ) offset_password = 2 + offset_username len_password = int . from_bytes ( data [ offset_username : offset_password ] , byteorder = "big" ) pass_begin = offset_password pass_end = offset_password + len_password password = data [ pass_begin : pass_end ] . decode ( "UTF-8" ) return cls ( username , password ) | I am so sorry . |
50,667 | def connect ( host = "localhost" , port = 1113 , discovery_host = None , discovery_port = 2113 , username = None , password = None , loop = None , name = None , selector = select_random , ) -> Client : discovery = get_discoverer ( host , port , discovery_host , discovery_port , selector ) dispatcher = MessageDispatcher ( name = name , loop = loop ) connector = Connector ( discovery , dispatcher , name = name ) credential = msg . Credential ( username , password ) if username and password else None return Client ( connector , dispatcher , credential = credential ) | Create a new client . |
50,668 | async def start ( self ) : while True : try : data = await self . reader . read ( 8192 ) if self . _trace_enabled : self . _logger . trace ( "Received %d bytes from remote server:\n%s" , len ( data ) , msg . dump ( data ) , ) await self . process ( data ) except asyncio . CancelledError : return except : logging . exception ( "Unhandled error in Message Reader" ) raise | Loop forever reading messages and invoking the operation that caused them |
50,669 | async def ping ( self , conversation_id : uuid . UUID = None ) -> float : cmd = convo . Ping ( conversation_id = conversation_id or uuid . uuid4 ( ) ) result = await self . dispatcher . start_conversation ( cmd ) return await result | Send a message to the remote server to check liveness . |
50,670 | async def publish_event ( self , stream : str , type : str , body : Optional [ Any ] = None , id : Optional [ uuid . UUID ] = None , metadata : Optional [ Any ] = None , expected_version : int = - 2 , require_master : bool = False , ) -> None : event = msg . NewEvent ( type , id or uuid . uuid4 ( ) , body , metadata ) conversation = convo . WriteEvents ( stream , [ event ] , expected_version = expected_version , require_master = require_master , ) result = await self . dispatcher . start_conversation ( conversation ) return await result | Publish a single event to the EventStore . |
50,671 | async def get_event ( self , stream : str , event_number : int , resolve_links = True , require_master = False , correlation_id : uuid . UUID = None , ) -> msg . Event : correlation_id = correlation_id or uuid . uuid4 ( ) cmd = convo . ReadEvent ( stream , event_number , resolve_links , require_master , conversation_id = correlation_id , ) result = await self . dispatcher . start_conversation ( cmd ) return await result | Get a single event by stream and event number . |
50,672 | async def get ( self , stream : str , direction : msg . StreamDirection = msg . StreamDirection . Forward , from_event : int = 0 , max_count : int = 100 , resolve_links : bool = True , require_master : bool = False , correlation_id : uuid . UUID = None , ) : correlation_id = correlation_id cmd = convo . ReadStreamEvents ( stream , from_event , max_count , resolve_links , require_master , direction = direction , ) result = await self . dispatcher . start_conversation ( cmd ) return await result | Read a range of events from a stream . |
50,673 | async def get_all ( self , direction : msg . StreamDirection = msg . StreamDirection . Forward , from_position : Optional [ Union [ msg . Position , msg . _PositionSentinel ] ] = None , max_count : int = 100 , resolve_links : bool = True , require_master : bool = False , correlation_id : uuid . UUID = None , ) : correlation_id = correlation_id cmd = convo . ReadAllEvents ( msg . Position . for_direction ( direction , from_position ) , max_count , resolve_links , require_master , direction = direction , credentials = self . credential , ) result = await self . dispatcher . start_conversation ( cmd ) return await result | Read a range of events from the whole database . |
50,674 | async def iter ( self , stream : str , direction : msg . StreamDirection = msg . StreamDirection . Forward , from_event : int = None , batch_size : int = 100 , resolve_links : bool = True , require_master : bool = False , correlation_id : uuid . UUID = None , ) : correlation_id = correlation_id or uuid . uuid4 ( ) cmd = convo . IterStreamEvents ( stream , from_event , batch_size , resolve_links , direction = direction , credentials = self . credential , ) result = await self . dispatcher . start_conversation ( cmd ) iterator = await result async for event in iterator : yield event | Read through a stream of events until the end and then stop . |
50,675 | async def iter_all ( self , direction : msg . StreamDirection = msg . StreamDirection . Forward , from_position : Optional [ Union [ msg . Position , msg . _PositionSentinel ] ] = None , batch_size : int = 100 , resolve_links : bool = True , require_master : bool = False , correlation_id : Optional [ uuid . UUID ] = None , ) : correlation_id = correlation_id cmd = convo . IterAllEvents ( msg . Position . for_direction ( direction , from_position ) , batch_size , resolve_links , require_master , direction , self . credential , correlation_id , ) result = await self . dispatcher . start_conversation ( cmd ) iterator = await result async for event in iterator : yield event | Read through all the events in the database . |
50,676 | async def subscribe_to ( self , stream , start_from = - 1 , resolve_link_tos = True , batch_size : int = 100 ) : if start_from == - 1 : cmd : convo . Conversation = convo . SubscribeToStream ( stream , resolve_link_tos , credentials = self . credential ) else : cmd = convo . CatchupSubscription ( stream , start_from , batch_size , credential = self . credential ) future = await self . dispatcher . start_conversation ( cmd ) return await future | Subscribe to receive notifications when a new event is published to a stream . |
50,677 | def prefer_master ( nodes : List [ DiscoveredNode ] ) -> Optional [ DiscoveredNode ] : return max ( nodes , key = attrgetter ( "state" ) ) | Select the master if available otherwise fall back to a replica . |
50,678 | def prefer_replica ( nodes : List [ DiscoveredNode ] ) -> Optional [ DiscoveredNode ] : masters = [ node for node in nodes if node . state == NodeState . Master ] replicas = [ node for node in nodes if node . state != NodeState . Master ] if replicas : return random . choice ( replicas ) else : return masters [ 0 ] | Select a random replica if any are available or fall back to the master . |
50,679 | def create_event_handler ( event_type , handler ) : target_name = '{hash}_{event_type}' . format ( hash = hash ( handler ) , event_type = event_type ) def handle_comm_opened ( comm , msg ) : @ comm . on_msg def _handle_msg ( msg ) : data = msg [ 'content' ] [ 'data' ] event = json . loads ( data ) return_value = handler ( event ) if return_value : comm . send ( return_value ) comm . send ( 'Comm target "{target_name}" registered by vdom' . format ( target_name = target_name ) ) if get_ipython ( ) : get_ipython ( ) . kernel . comm_manager . register_target ( target_name , handle_comm_opened ) return target_name | Register a comm and return a serializable object with target name |
50,680 | def to_json ( el , schema = None ) : if type ( el ) is str : json_el = el elif type ( el ) is list : json_el = list ( map ( to_json , el ) ) elif type ( el ) is dict : assert 'tagName' in el json_el = el . copy ( ) if 'attributes' not in el : json_el [ 'attributes' ] = { } if 'children' not in el : json_el [ 'children' ] = [ ] elif isinstance ( el , VDOM ) : json_el = el . to_dict ( ) else : json_el = el if schema : try : validate ( instance = json_el , schema = schema , cls = Draft4Validator ) except ValidationError as e : raise ValidationError ( _validate_err_template . format ( schema , e ) ) return json_el | Convert an element to VDOM JSON |
50,681 | def create_component ( tag_name , allow_children = True ) : def _component ( * children , ** kwargs ) : if 'children' in kwargs : children = kwargs . pop ( 'children' ) else : if len ( children ) == 1 and isinstance ( children [ 0 ] , list ) : children = tuple ( children [ 0 ] ) style = None event_handlers = None attributes = dict ( ** kwargs ) if 'style' in kwargs : style = kwargs . pop ( 'style' ) if 'attributes' in kwargs : attributes = kwargs [ 'attributes' ] for key , value in attributes . items ( ) : if callable ( value ) : attributes = attributes . copy ( ) if event_handlers == None : event_handlers = { key : attributes . pop ( key ) } else : event_handlers [ key ] = attributes . pop ( key ) if not allow_children and children : raise ValueError ( '<{tag_name} /> cannot have children' . format ( tag_name = tag_name ) ) v = VDOM ( tag_name , attributes , style , children , None , event_handlers ) return v return _component | Create a component for an HTML Tag |
50,682 | def validate ( self , schema ) : try : validate ( instance = self . to_dict ( ) , schema = schema , cls = Draft4Validator ) except ValidationError as e : raise ValidationError ( _validate_err_template . format ( VDOM_SCHEMA , e ) ) | Validate VDOM against given JSON Schema |
50,683 | def to_dict ( self ) : attributes = dict ( self . attributes . items ( ) ) if self . style : attributes . update ( { "style" : dict ( self . style . items ( ) ) } ) vdom_dict = { 'tagName' : self . tag_name , 'attributes' : attributes } if self . event_handlers : event_handlers = dict ( self . event_handlers . items ( ) ) for key , value in event_handlers . items ( ) : value = create_event_handler ( key , value ) event_handlers [ key ] = value vdom_dict [ 'eventHandlers' ] = event_handlers if self . key : vdom_dict [ 'key' ] = self . key vdom_dict [ 'children' ] = [ c . to_dict ( ) if isinstance ( c , VDOM ) else c for c in self . children ] return vdom_dict | Converts VDOM object to a dictionary that passes our schema |
50,684 | def _guess_type ( mrz_lines ) : try : if len ( mrz_lines ) == 3 : return 'TD1' elif len ( mrz_lines ) == 2 and len ( mrz_lines [ 0 ] ) < 40 and len ( mrz_lines [ 1 ] ) < 40 : return 'MRVB' if mrz_lines [ 0 ] [ 0 ] . upper ( ) == 'V' else 'TD2' elif len ( mrz_lines ) == 2 : return 'MRVA' if mrz_lines [ 0 ] [ 0 ] . upper ( ) == 'V' else 'TD3' else : return None except Exception : return None | Guesses the type of the MRZ from given lines . Returns TD1 TD2 TD3 MRVA MRVB or None . The algorithm is basically just counting lines looking at their length and checking whether the first character is a V |
50,685 | def remove_component ( self , name ) : if name not in self . components : raise Exception ( "No component named %s" % name ) del self . components [ name ] del self . depends [ name ] for p in self . provides [ name ] : del self . whoprovides [ p ] self . invalidate ( p ) del self . provides [ name ] | Removes an existing component with a given name invalidating all the values computed by the previous component . |
50,686 | def replace_component ( self , name , callable , provides = None , depends = None ) : self . remove_component ( name ) self . add_component ( name , callable , provides , depends ) | Changes an existing component with a given name invalidating all the values computed by the previous component and its successors . |
50,687 | def invalidate ( self , key ) : if key not in self . data : return del self . data [ key ] for cname in self . components : if key in self . depends [ cname ] : for downstream_key in self . provides [ cname ] : self . invalidate ( downstream_key ) | Remove the given data item along with all items that depend on it in the graph . |
50,688 | def ocr ( img , mrz_mode = True , extra_cmdline_params = '' ) : input_file_name = '%s.bmp' % _tempnam ( ) output_file_name_base = '%s' % _tempnam ( ) output_file_name = "%s.txt" % output_file_name_base try : if str ( img . dtype ) . startswith ( 'float' ) and np . nanmin ( img ) >= 0 and np . nanmax ( img ) <= 1 : img = img . astype ( np . float64 ) * ( np . power ( 2.0 , 8 ) - 1 ) + 0.499999999 img = img . astype ( np . uint8 ) imwrite ( input_file_name , img ) if mrz_mode : config = ( "--psm 6 -c tessedit_char_whitelist=ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789><" " -c load_system_dawg=F -c load_freq_dawg=F {}" ) . format ( extra_cmdline_params ) else : config = "{}" . format ( extra_cmdline_params ) pytesseract . run_tesseract ( input_file_name , output_file_name_base , 'txt' , lang = None , config = config ) if sys . version_info . major == 3 : f = open ( output_file_name , encoding = 'utf-8' ) else : f = open ( output_file_name ) try : return f . read ( ) . strip ( ) finally : f . close ( ) finally : pytesseract . cleanup ( input_file_name ) pytesseract . cleanup ( output_file_name ) | Runs Tesseract on a given image . Writes an intermediate tempfile and then runs the tesseract command on the image . |
50,689 | def approx_equal ( self , center , width , height , angle , tol = 1e-6 ) : "Method mainly useful for testing" return abs ( self . cx - center [ 0 ] ) < tol and abs ( self . cy - center [ 1 ] ) < tol and abs ( self . width - width ) < tol and abs ( self . height - height ) < tol and abs ( self . angle - angle ) < tol | Method mainly useful for testing |
50,690 | def rotated ( self , rotation_center , angle ) : rot = np . array ( [ [ np . cos ( angle ) , np . sin ( angle ) ] , [ - np . sin ( angle ) , np . cos ( angle ) ] ] ) t = np . asfarray ( rotation_center ) new_c = np . dot ( rot . T , ( self . center - t ) ) + t return RotatedBox ( new_c , self . width , self . height , ( self . angle + angle ) % ( np . pi * 2 ) ) | Returns a RotatedBox that is obtained by rotating this box around a given center by a given angle . |
50,691 | def as_poly ( self , margin_width = 0 , margin_height = 0 ) : v_hor = ( self . width / 2 + margin_width ) * np . array ( [ np . cos ( self . angle ) , np . sin ( self . angle ) ] ) v_vert = ( self . height / 2 + margin_height ) * np . array ( [ - np . sin ( self . angle ) , np . cos ( self . angle ) ] ) c = np . array ( [ self . cx , self . cy ] ) return np . vstack ( [ c - v_hor - v_vert , c + v_hor - v_vert , c + v_hor + v_vert , c - v_hor + v_vert ] ) | Converts this box to a polygon i . e . 4x2 array representing the four corners starting from lower left to upper left counterclockwise . |
50,692 | def extract_from_image ( self , img , scale = 1.0 , margin_width = 5 , margin_height = 5 ) : rotate_by = ( np . pi / 2 - self . angle ) * 180 / np . pi img_rotated = transform . rotate ( img , angle = rotate_by , center = [ self . center [ 1 ] * scale , self . center [ 0 ] * scale ] , resize = True ) shift_c , shift_r = self . _compensate_rotation_shift ( img , scale ) r1 = max ( int ( ( self . center [ 0 ] - self . height / 2 - margin_height ) * scale - shift_r ) , 0 ) r2 = int ( ( self . center [ 0 ] + self . height / 2 + margin_height ) * scale - shift_r ) c1 = max ( int ( ( self . center [ 1 ] - self . width / 2 - margin_width ) * scale - shift_c ) , 0 ) c2 = int ( ( self . center [ 1 ] + self . width / 2 + margin_width ) * scale - shift_c ) return img_rotated [ r1 : r2 , c1 : c2 ] | Extracts the contents of this box from a given image . For that the image is unrotated by the appropriate angle and the corresponding part is extracted from it . |
50,693 | def read_mrz ( file , save_roi = False , extra_cmdline_params = '' ) : p = MRZPipeline ( file , extra_cmdline_params ) mrz = p . result if mrz is not None : mrz . aux [ 'text' ] = p [ 'text' ] if save_roi : mrz . aux [ 'roi' ] = p [ 'roi' ] return mrz | The main interface function to this module encapsulating the recognition pipeline . Given an image filename runs MRZPipeline on it returning the parsed MRZ object . |
50,694 | def _imread ( self , file ) : img = skimage_io . imread ( file , as_gray = self . as_gray , plugin = 'imageio' ) if img is not None and len ( img . shape ) != 2 : img = skimage_io . imread ( file , as_gray = self . as_gray , plugin = 'matplotlib' ) return img | Proxy to skimage . io . imread with some fixes . |
50,695 | def _are_aligned_angles ( self , b1 , b2 ) : "Are two boxes aligned according to their angle?" return abs ( b1 - b2 ) <= self . angle_tol or abs ( np . pi - abs ( b1 - b2 ) ) <= self . angle_tol | Are two boxes aligned according to their angle? |
50,696 | def _are_nearby_parallel_boxes ( self , b1 , b2 ) : "Are two boxes nearby, parallel, and similar in width?" if not self . _are_aligned_angles ( b1 . angle , b2 . angle ) : return False angle = min ( b1 . angle , b2 . angle ) return abs ( np . dot ( b1 . center - b2 . center , [ - np . sin ( angle ) , np . cos ( angle ) ] ) ) < self . lineskip_tol * ( b1 . height + b2 . height ) and ( b1 . width > 0 ) and ( b2 . width > 0 ) and ( 0.5 < b1 . width / b2 . width < 2.0 ) | Are two boxes nearby parallel and similar in width? |
50,697 | def _merge_any_two_boxes ( self , box_list ) : n = len ( box_list ) for i in range ( n ) : for j in range ( i + 1 , n ) : if self . _are_nearby_parallel_boxes ( box_list [ i ] , box_list [ j ] ) : a , b = box_list [ i ] , box_list [ j ] merged_points = np . vstack ( [ a . points , b . points ] ) merged_box = RotatedBox . from_points ( merged_points , self . box_type ) if merged_box . width / merged_box . height >= self . min_box_aspect : box_list . remove ( a ) box_list . remove ( b ) box_list . append ( merged_box ) return True return False | Given a list of boxes finds two nearby parallel ones and merges them . Returns false if none found . |
50,698 | def _try_larger_image ( self , roi , cur_text , cur_mrz , filter_order = 3 ) : if roi . shape [ 1 ] <= 700 : scale_by = int ( 1050.0 / roi . shape [ 1 ] + 0.5 ) roi_lg = transform . rescale ( roi , scale_by , order = filter_order , mode = 'constant' , multichannel = False , anti_aliasing = True ) new_text = ocr ( roi_lg , extra_cmdline_params = self . extra_cmdline_params ) new_mrz = MRZ . from_ocr ( new_text ) new_mrz . aux [ 'method' ] = 'rescaled(%d)' % filter_order if new_mrz . valid_score > cur_mrz . valid_score : cur_mrz = new_mrz cur_text = new_text return cur_text , cur_mrz | Attempts to improve the OCR result by scaling the image . If the new mrz is better returns it otherwise returns the old mrz . |
50,699 | def mrz ( ) : parser = argparse . ArgumentParser ( description = 'Run the MRZ OCR recognition algorithm on the given image.' ) parser . add_argument ( 'filename' ) parser . add_argument ( '--json' , action = 'store_true' , help = 'Produce JSON (rather than tabular) output' ) parser . add_argument ( '--legacy' , action = 'store_true' , help = 'Use the "legacy" Tesseract OCR engine (--oem 0). Despite the name, it most often results in better ' 'results. It is not the default option, because it will only work if ' 'your Tesseract installation includes the legacy *.traineddata files. You can download them at ' 'https://github.com/tesseract-ocr/tesseract/wiki/Data-Files#data-files-for-version-400-november-29-2016' ) parser . add_argument ( '-r' , '--save-roi' , default = None , help = 'Output the region of the image that is detected to contain the MRZ to the given png file' ) parser . add_argument ( '--version' , action = 'version' , version = 'PassportEye MRZ v%s' % passporteye . __version__ ) args = parser . parse_args ( ) try : extra_params = '--oem 0' if args . legacy else '' filename , mrz_ , walltime = process_file ( ( args . filename , args . save_roi is not None , extra_params ) ) except TesseractNotFoundError : sys . stderr . write ( "ERROR: The tesseract executable was not found.\n" "Please, make sure Tesseract is installed and the appropriate directory is included " "in your PATH environment variable.\n" ) sys . exit ( 1 ) except TesseractError as ex : sys . stderr . write ( "ERROR: %s" % ex . message ) sys . exit ( ex . status ) d = mrz_ . to_dict ( ) if mrz_ is not None else { 'mrz_type' : None , 'valid' : False , 'valid_score' : 0 } d [ 'walltime' ] = walltime d [ 'filename' ] = filename if args . save_roi is not None and mrz_ is not None and 'roi' in mrz_ . aux : io . imsave ( args . save_roi , mrz_ . aux [ 'roi' ] ) if not args . json : for k in d : print ( "%s\t%s" % ( k , str ( d [ k ] ) ) ) else : print ( json . dumps ( d , indent = 2 ) ) | Command - line script for extracting MRZ from a given image |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.