idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
61,000
def parse_version ( output ) : for x in output . splitlines ( ) : match = VERSION_PATTERN . match ( x ) if match : return match . group ( 'version' ) . strip ( ) return None
Parses the supplied output and returns the version string .
61,001
def parse_alert ( output ) : for x in output . splitlines ( ) : match = ALERT_PATTERN . match ( x ) if match : rec = { 'timestamp' : datetime . strptime ( match . group ( 'timestamp' ) , '%m/%d/%y-%H:%M:%S.%f' ) , 'sid' : int ( match . group ( 'sid' ) ) , 'revision' : int ( match . group ( 'revision' ) ) , 'priority' :...
Parses the supplied output and yields any alerts .
61,002
def run ( self , pcap ) : proc = Popen ( self . _snort_cmd ( pcap ) , stdout = PIPE , stderr = PIPE , universal_newlines = True ) stdout , stderr = proc . communicate ( ) if proc . returncode != 0 : raise Exception ( "\n" . join ( [ "Execution failed return code: {0}" . format ( proc . returncode ) , stderr or "" ] ) )...
Runs snort against the supplied pcap .
61,003
def run ( self , pcap ) : tmpdir = None try : tmpdir = tempfile . mkdtemp ( prefix = 'tmpsuri' ) proc = Popen ( self . _suri_cmd ( pcap , tmpdir ) , stdout = PIPE , stderr = PIPE , universal_newlines = True ) stdout , stderr = proc . communicate ( ) if proc . returncode != 0 : raise Exception ( "\n" . join ( [ "Executi...
Runs suricata against the supplied pcap .
61,004
def analyse_pcap ( infile , filename ) : tmp = tempfile . NamedTemporaryFile ( suffix = ".pcap" , delete = False ) m = hashlib . md5 ( ) results = { 'filename' : filename , 'status' : 'Failed' , 'apiversion' : __version__ , } try : size = 0 while True : buf = infile . read ( 16384 ) if not buf : break tmp . write ( buf...
Run IDS across the supplied file .
61,005
def submit_and_render ( ) : data = request . files . file template = env . get_template ( "results.html" ) if not data : pass results = analyse_pcap ( data . file , data . filename ) results . update ( base ) return template . render ( results )
Blocking POST handler for file submission . Runs snort on supplied file and returns results as rendered html .
61,006
def api_submit ( ) : data = request . files . file response . content_type = 'application/json' if not data or not hasattr ( data , 'file' ) : return json . dumps ( { "status" : "Failed" , "stderr" : "Missing form params" } ) return json . dumps ( analyse_pcap ( data . file , data . filename ) , default = jsondate , in...
Blocking POST handler for file submission . Runs snort on supplied file and returns results as json text .
61,007
def main ( ) : parser = argparse . ArgumentParser ( ) parser . add_argument ( "-H" , "--host" , help = "Web server Host address to bind to" , default = "0.0.0.0" , action = "store" , required = False ) parser . add_argument ( "-p" , "--port" , help = "Web server Port to bind to" , default = 8080 , action = "store" , re...
Main entrypoint for command - line webserver .
61,008
def is_pcap ( pcap ) : with open ( pcap , 'rb' ) as tmp : header = tmp . read ( 4 ) if header == b"\xa1\xb2\xc3\xd4" or header == b"\xd4\xc3\xb2\xa1" : return True return False
Simple test for pcap magic bytes in supplied file .
61,009
def _run_ids ( runner , pcap ) : run = { 'name' : runner . conf . get ( 'name' ) , 'module' : runner . conf . get ( 'module' ) , 'ruleset' : runner . conf . get ( 'ruleset' , 'default' ) , 'status' : STATUS_FAILED , } try : run_start = datetime . now ( ) version , alerts = runner . run ( pcap ) run [ 'version' ] = vers...
Runs the specified IDS runner .
61,010
def run ( pcap ) : start = datetime . now ( ) errors = [ ] status = STATUS_FAILED analyses = [ ] pool = ThreadPool ( MAX_THREADS ) try : if not is_pcap ( pcap ) : raise Exception ( "Not a valid pcap file" ) runners = [ ] for conf in Config ( ) . modules . values ( ) : runner = registry . get ( conf [ 'module' ] ) if no...
Runs all configured IDS instances against the supplied pcap .
61,011
def _set_up_pool_config ( self ) : self . _max_conns = self . settings_dict [ 'OPTIONS' ] . get ( 'MAX_CONNS' , pool_config_defaults [ 'MAX_CONNS' ] ) self . _min_conns = self . settings_dict [ 'OPTIONS' ] . get ( 'MIN_CONNS' , self . _max_conns ) self . _test_on_borrow = self . settings_dict [ "OPTIONS" ] . get ( 'TES...
Helper to configure pool options during DatabaseWrapper initialization .
61,012
def _create_connection_pool ( self , conn_params ) : connection_pools_lock . acquire ( ) try : if ( self . alias not in connection_pools or connection_pools [ self . alias ] [ 'settings' ] != self . settings_dict ) : logger . info ( "Creating connection pool for db alias %s" % self . alias ) logger . info ( " using MI...
Helper to initialize the connection pool .
61,013
def close ( self ) : if self . _wrapped_connection and self . _pool : logger . debug ( "Returning connection %s to pool %s" % ( self . _wrapped_connection , self . _pool ) ) self . _pool . putconn ( self . _wrapped_connection ) self . _wrapped_connection = None
Override to return the connection to the pool rather than closing it .
61,014
def b58encode_int ( i , default_one = True ) : if not i and default_one : return alphabet [ 0 ] string = "" while i : i , idx = divmod ( i , 58 ) string = alphabet [ idx ] + string return string
Encode an integer using Base58
61,015
def breadcrumb_safe ( context , label , viewname , * args , ** kwargs ) : append_breadcrumb ( context , _ ( label ) , viewname , args , kwargs ) return ''
Same as breadcrumb but label is not escaped .
61,016
def breadcrumb_raw ( context , label , viewname , * args , ** kwargs ) : append_breadcrumb ( context , escape ( label ) , viewname , args , kwargs ) return ''
Same as breadcrumb but label is not translated .
61,017
def breadcrumb_raw_safe ( context , label , viewname , * args , ** kwargs ) : append_breadcrumb ( context , label , viewname , args , kwargs ) return ''
Same as breadcrumb but label is not escaped and translated .
61,018
def render_breadcrumbs ( context , * args ) : try : template_path = args [ 0 ] except IndexError : template_path = getattr ( settings , 'BREADCRUMBS_TEMPLATE' , 'django_bootstrap_breadcrumbs/bootstrap2.html' ) links = [ ] for ( label , viewname , view_args , view_kwargs ) in context [ 'request' ] . META . get ( CONTEXT...
Render breadcrumbs html using bootstrap css classes .
61,019
def _find_symbol ( self , module , name , fallback = None ) : if not hasattr ( module , name ) and fallback : return self . _find_symbol ( module , fallback , None ) return getattr ( module , name )
Find the symbol of the specified name inside the module or raise an exception .
61,020
def apply ( self , incoming ) : assert len ( incoming ) == self . size self . incoming = incoming outgoing = self . activation ( self . incoming ) assert len ( outgoing ) == self . size self . outgoing = outgoing
Store the incoming activation apply the activation function and store the result as outgoing activation .
61,021
def delta ( self , above ) : return self . activation . delta ( self . incoming , self . outgoing , above )
The derivative of the activation function at the current state .
61,022
def feed ( self , weights , data ) : assert len ( data ) == self . layers [ 0 ] . size self . layers [ 0 ] . apply ( data ) connections = zip ( self . layers [ : - 1 ] , weights , self . layers [ 1 : ] ) for previous , weight , current in connections : incoming = self . forward ( weight , previous . outgoing ) current ...
Evaluate the network with alternative weights on the input data and return the output activation .
61,023
def _init_network ( self ) : self . network = Network ( self . problem . layers ) self . weights = Matrices ( self . network . shapes ) if self . load : loaded = np . load ( self . load ) assert loaded . shape == self . weights . shape , ( 'weights to load must match problem definition' ) self . weights . flat = loaded...
Define model and initialize weights .
61,024
def _init_training ( self ) : if self . check : self . backprop = CheckedBackprop ( self . network , self . problem . cost ) else : self . backprop = BatchBackprop ( self . network , self . problem . cost ) self . momentum = Momentum ( ) self . decent = GradientDecent ( ) self . decay = WeightDecay ( ) self . tying = W...
Classes needed during training .
61,025
def _every ( times , step_size , index ) : current = index * step_size step = current // times * times reached = current >= step overshot = current >= step + step_size return current and reached and not overshot
Given a loop over batches of an iterable and an operation that should be performed every few elements . Determine whether the operation should be called for the current index .
61,026
def parse_tax_lvl ( entry , tax_lvl_depth = [ ] ) : depth_and_name = re . match ( '^( *)(.*)' , entry [ 'sci_name' ] ) depth = len ( depth_and_name . group ( 1 ) ) // 2 name = depth_and_name . group ( 2 ) del tax_lvl_depth [ depth : ] tax_lvl_depth . append ( ( entry [ 'rank' ] , name ) ) tax_lvl = { x [ 0 ] : x [ 1 ] ...
Parse a single kraken - report entry and return a dictionary of taxa for its named ranks .
61,027
def parse_kraken_report ( kdata , max_rank , min_rank ) : taxa = OrderedDict ( ) counts = OrderedDict ( ) r = 0 max_rank_idx = ranks . index ( max_rank ) min_rank_idx = ranks . index ( min_rank ) for entry in kdata : erank = entry [ 'rank' ] . strip ( ) if erank in ranks : r = ranks . index ( erank ) tax_lvl = parse_ta...
Parse a single output file from the kraken - report tool . Return a list of counts at each of the acceptable taxonomic levels and a list of NCBI IDs and a formatted string representing their taxonomic hierarchies .
61,028
def process_samples ( kraken_reports_fp , max_rank , min_rank ) : taxa = OrderedDict ( ) sample_counts = OrderedDict ( ) for krep_fp in kraken_reports_fp : if not osp . isfile ( krep_fp ) : raise RuntimeError ( "ERROR: File '{}' not found." . format ( krep_fp ) ) sample_id = osp . splitext ( osp . split ( krep_fp ) [ 1...
Parse all kraken - report data files into sample counts dict and store global taxon id - > taxonomy data
61,029
def create_biom_table ( sample_counts , taxa ) : data = [ [ 0 if taxid not in sample_counts [ sid ] else sample_counts [ sid ] [ taxid ] for sid in sample_counts ] for taxid in taxa ] data = np . array ( data , dtype = int ) tax_meta = [ { 'taxonomy' : taxa [ taxid ] } for taxid in taxa ] gen_str = "kraken-biom v{} ({}...
Create a BIOM table from sample counts and taxonomy metadata .
61,030
def write_biom ( biomT , output_fp , fmt = "hdf5" , gzip = False ) : opener = open mode = 'w' if gzip and fmt != "hdf5" : if not output_fp . endswith ( ".gz" ) : output_fp += ".gz" opener = gzip_open mode = 'wt' if fmt == "hdf5" : opener = h5py . File with opener ( output_fp , mode ) as biom_f : if fmt == "json" : biom...
Write the BIOM table to a file .
61,031
def write_otu_file ( otu_ids , fp ) : fpdir = osp . split ( fp ) [ 0 ] if not fpdir == "" and not osp . isdir ( fpdir ) : raise RuntimeError ( "Specified path does not exist: {}" . format ( fpdir ) ) with open ( fp , 'wt' ) as outf : outf . write ( '\n' . join ( otu_ids ) )
Write out a file containing only the list of OTU IDs from the kraken data . One line per ID .
61,032
def transform ( self , X ) : assert np . shape ( X ) [ 0 ] == len ( self . _weights ) , ( 'BlendingOptimizer: Number of models to blend its predictions and weights does not match: ' 'n_models={}, weights_len={}' . format ( np . shape ( X ) [ 0 ] , len ( self . _weights ) ) ) blended_predictions = np . average ( np . po...
Performs predictions blending using the trained weights .
61,033
def fit_transform ( self , X , y , step_size = 0.1 , init_weights = None , warm_start = False ) : self . fit ( X = X , y = y , step_size = step_size , init_weights = init_weights , warm_start = warm_start ) return self . transform ( X = X )
Fit optimizer to X then transforms X . See fit and transform for further explanation .
61,034
def escape_tags ( value , valid_tags ) : value = conditional_escape ( value ) if valid_tags : tag_re = re . compile ( r'&lt;(\s*/?\s*(%s))(.*?\s*)&gt;' % '|' . join ( re . escape ( tag ) for tag in valid_tags ) ) value = tag_re . sub ( _replace_quot , value ) value = value . replace ( "&lt;!--" , "<!--" ) . replace ( "...
Strips text from the given html string leaving only tags . This functionality requires BeautifulSoup nothing will be done otherwise .
61,035
def _get_seo_content_types ( seo_models ) : try : return [ ContentType . objects . get_for_model ( m ) . id for m in seo_models ] except Exception : return [ ]
Returns a list of content types from the models defined in settings .
61,036
def register_seo_admin ( admin_site , metadata_class ) : if metadata_class . _meta . use_sites : path_admin = SitePathMetadataAdmin model_instance_admin = SiteModelInstanceMetadataAdmin model_admin = SiteModelMetadataAdmin view_admin = SiteViewMetadataAdmin else : path_admin = PathMetadataAdmin model_instance_admin = M...
Register the backends specified in Meta . backends with the admin .
61,037
def _construct_form ( self , i , ** kwargs ) : form = super ( MetadataFormset , self ) . _construct_form ( i , ** kwargs ) form . empty_permitted = False form . has_changed = lambda : True if self . instance : self . instance . __seo_metadata_handled = True return form
Override the method to change the form attribute empty_permitted .
61,038
def _get_metadata_model ( name = None ) : if name is not None : try : return registry [ name ] except KeyError : if len ( registry ) == 1 : valid_names = 'Try using the name "%s" or simply leaving it ' 'out altogether.' % list ( registry ) [ 0 ] else : valid_names = "Valid names are " + ", " . join ( '"%s"' % k for k i...
Find registered Metadata object .
61,039
def _resolve_value ( self , name ) : name = str ( name ) if name in self . _metadata . _meta . elements : element = self . _metadata . _meta . elements [ name ] if element . editable : value = getattr ( self , name ) if value : return value populate_from = element . populate_from if isinstance ( populate_from , collect...
Returns an appropriate value for the given name .
61,040
def _urls_for_js ( urls = None ) : if urls is None : from . urls import urlpatterns urls = [ url . name for url in urlpatterns if getattr ( url , 'name' , None ) ] urls = dict ( zip ( urls , [ get_uri_template ( url ) for url in urls ] ) ) urls . update ( getattr ( settings , 'LEAFLET_STORAGE_EXTRA_URLS' , { } ) ) retu...
Return templated URLs prepared for javascript .
61,041
def decorated_patterns ( func , * urls ) : def decorate ( urls , func ) : for url in urls : if isinstance ( url , RegexURLPattern ) : url . __class__ = DecoratedURLPattern if not hasattr ( url , "_decorate_with" ) : setattr ( url , "_decorate_with" , [ ] ) url . _decorate_with . append ( func ) elif isinstance ( url , ...
Utility function to decorate a group of url in urls . py
61,042
def get_custom_fields ( self ) : return CustomField . objects . filter ( content_type = ContentType . objects . get_for_model ( self ) )
Return a list of custom fields for this model
61,043
def get_custom_field ( self , field_name ) : content_type = ContentType . objects . get_for_model ( self ) return CustomField . objects . get ( content_type = content_type , name = field_name )
Get a custom field object for this model field_name - Name of the custom field you want .
61,044
def get_custom_value ( self , field_name ) : custom_field = self . get_custom_field ( field_name ) return CustomFieldValue . objects . get_or_create ( field = custom_field , object_id = self . id ) [ 0 ] . value
Get a value for a specified custom field field_name - Name of the custom field you want .
61,045
def set_custom_value ( self , field_name , value ) : custom_field = self . get_custom_field ( field_name ) custom_value = CustomFieldValue . objects . get_or_create ( field = custom_field , object_id = self . id ) [ 0 ] custom_value . value = value custom_value . save ( )
Set a value for a specified custom field field_name - Name of the custom field you want . value - Value to set it to
61,046
def assign_item ( self , item , origin ) : closest_cluster = origin for cluster in self . __clusters : if self . distance ( item , centroid ( cluster ) ) < self . distance ( item , centroid ( closest_cluster ) ) : closest_cluster = cluster if id ( closest_cluster ) != id ( origin ) : self . move_item ( item , origin , ...
Assigns an item from a given cluster to the closest located cluster .
61,047
def move_item ( self , item , origin , destination ) : if self . equality : item_index = 0 for i , element in enumerate ( origin ) : if self . equality ( element , item ) : item_index = i break else : item_index = origin . index ( item ) destination . append ( origin . pop ( item_index ) )
Moves an item from one cluster to anoter cluster .
61,048
def initialise_clusters ( self , input_ , clustercount ) : self . __clusters = [ ] for _ in range ( clustercount ) : self . __clusters . append ( [ ] ) count = 0 for item in input_ : self . __clusters [ count % clustercount ] . append ( item ) count += 1
Initialises the clusters by distributing the items from the data . evenly across n clusters
61,049
def publish_progress ( self , total , current ) : if self . progress_callback : self . progress_callback ( total , current )
If a progress function was supplied this will call that function with the total number of elements and the remaining number of elements .
61,050
def set_linkage_method ( self , method ) : if method == 'single' : self . linkage = single elif method == 'complete' : self . linkage = complete elif method == 'average' : self . linkage = average elif method == 'uclus' : self . linkage = uclus elif hasattr ( method , '__call__' ) : self . linkage = method else : raise...
Sets the method to determine the distance between two clusters .
61,051
def cluster ( self , matrix = None , level = None , sequence = None ) : logger . info ( "Performing cluster()" ) if matrix is None : level = 0 sequence = 0 matrix = [ ] linkage = partial ( self . linkage , distance_function = self . distance ) initial_element_count = len ( self . _data ) while len ( matrix ) > 2 or mat...
Perform hierarchical clustering .
61,052
def flatten ( L ) : if not isinstance ( L , list ) : return [ L ] if L == [ ] : return L return flatten ( L [ 0 ] ) + flatten ( L [ 1 : ] )
Flattens a list .
61,053
def fullyflatten ( container ) : flattened_items = [ ] for item in container : if hasattr ( item , 'items' ) : flattened_items = flattened_items + fullyflatten ( item . items ) else : flattened_items . append ( item ) return flattened_items
Completely flattens out a cluster and returns a one - dimensional set containing the cluster s items . This is useful in cases where some items of the cluster are clusters in their own right and you only want the items .
61,054
def minkowski_distance ( x , y , p = 2 ) : from math import pow assert len ( y ) == len ( x ) assert len ( x ) >= 1 sum = 0 for i in range ( len ( x ) ) : sum += abs ( x [ i ] - y [ i ] ) ** p return pow ( sum , 1.0 / float ( p ) )
Calculates the minkowski distance between two points .
61,055
def magnitude ( a ) : "calculates the magnitude of a vecor" from math import sqrt sum = 0 for coord in a : sum += coord ** 2 return sqrt ( sum )
calculates the magnitude of a vecor
61,056
def dotproduct ( a , b ) : "Calculates the dotproduct between two vecors" assert ( len ( a ) == len ( b ) ) out = 0 for i in range ( len ( a ) ) : out += a [ i ] * b [ i ] return out
Calculates the dotproduct between two vecors
61,057
def centroid ( data , method = median ) : "returns the central vector of a list of vectors" out = [ ] for i in range ( len ( data [ 0 ] ) ) : out . append ( method ( [ x [ i ] for x in data ] ) ) return tuple ( out )
returns the central vector of a list of vectors
61,058
def display ( self , depth = 0 ) : print ( depth * " " + "[level %s]" % self . level ) for item in self . items : if isinstance ( item , Cluster ) : item . display ( depth + 1 ) else : print ( depth * " " + "%s" % item )
Pretty - prints this cluster . Useful for debuging .
61,059
def getlevel ( self , threshold ) : left = self . items [ 0 ] right = self . items [ 1 ] if self . level <= threshold : return [ fullyflatten ( self . items ) ] if isinstance ( left , Cluster ) and left . level <= threshold : if isinstance ( right , Cluster ) : return [ fullyflatten ( left . items ) ] + right . getleve...
Retrieve all clusters up to a specific level threshold . This level - threshold represents the maximum distance between two clusters . So the lower you set this threshold the more clusters you will receive and the higher you set it you will receive less but bigger clusters .
61,060
def jsmin ( js , ** kwargs ) : if not is_3 : if cStringIO and not isinstance ( js , unicode ) : klass = cStringIO . StringIO else : klass = StringIO . StringIO else : klass = io . StringIO ins = klass ( js ) outs = klass ( ) JavascriptMinify ( ins , outs , ** kwargs ) . minify ( ) return outs . getvalue ( )
returns a minified version of the javascript string
61,061
def cached ( fun ) : _cache = { } @ wraps ( fun ) def newfun ( a , b , distance_function ) : frozen_a = frozenset ( a ) frozen_b = frozenset ( b ) if ( frozen_a , frozen_b ) not in _cache : result = fun ( a , b , distance_function ) _cache [ ( frozen_a , frozen_b ) ] = result return _cache [ ( frozen_a , frozen_b ) ] r...
memoizing decorator for linkage functions .
61,062
def single ( a , b , distance_function ) : left_a , right_a = min ( a ) , max ( a ) left_b , right_b = min ( b ) , max ( b ) result = min ( distance_function ( left_a , right_b ) , distance_function ( left_b , right_a ) ) return result
Given two collections a and b this will return the distance of the points which are closest together . distance_function is used to determine the distance between two elements .
61,063
def average ( a , b , distance_function ) : distances = [ distance_function ( x , y ) for x in a for y in b ] return sum ( distances ) / len ( distances )
Given two collections a and b this will return the mean of all distances . distance_function is used to determine the distance between two elements .
61,064
def worker ( self ) : tasks_completed = 0 for task in iter ( self . task_queue . get , 'STOP' ) : col_index , item , item2 = task if not hasattr ( item , '__iter__' ) or isinstance ( item , tuple ) : item = [ item ] if not hasattr ( item2 , '__iter__' ) or isinstance ( item2 , tuple ) : item2 = [ item2 ] result = ( col...
Multiprocessing task function run by worker processes
61,065
def genmatrix ( self , num_processes = 1 ) : use_multiprocessing = num_processes > 1 if use_multiprocessing : self . task_queue = Queue ( ) self . done_queue = Queue ( ) self . matrix = [ ] logger . info ( "Generating matrix for %s items - O(n^2)" , len ( self . data ) ) if use_multiprocessing : logger . info ( "Using ...
Actually generate the matrix
61,066
def validate ( fname ) : validation = { "errors" : [ ] , "warnings" : [ ] } for line in _process ( fname ) : kind , message = _determine ( line ) if kind in validation : validation [ kind ] . append ( message ) return validation
This function uses dciodvfy to generate a list of warnings and errors discovered within the DICOM file .
61,067
def numpy ( self ) : image_reader = gdcm . ImageReader ( ) image_reader . SetFileName ( self . fname ) if not image_reader . Read ( ) : raise IOError ( "Could not read DICOM image" ) pixel_array = self . _gdcm_to_numpy ( image_reader . GetImage ( ) ) return pixel_array
Grabs image data and converts it to a numpy array
61,068
def _gdcm_to_numpy ( self , image ) : gdcm_typemap = { gdcm . PixelFormat . INT8 : numpy . int8 , gdcm . PixelFormat . UINT8 : numpy . uint8 , gdcm . PixelFormat . UINT16 : numpy . uint16 , gdcm . PixelFormat . INT16 : numpy . int16 , gdcm . PixelFormat . UINT32 : numpy . uint32 , gdcm . PixelFormat . INT32 : numpy . i...
Converts a GDCM image to a numpy array .
61,069
def save_as_plt ( self , fname , pixel_array = None , vmin = None , vmax = None , cmap = None , format = None , origin = None ) : from matplotlib . backends . backend_agg import FigureCanvasAgg as FigureCanvas from matplotlib . figure import Figure from pylab import cm if pixel_array is None : pixel_array = self . nump...
This method saves the image from a numpy array using matplotlib
61,070
def read ( self ) : def ds ( data_element ) : value = self . _str_filter . ToStringPair ( data_element . GetTag ( ) ) if value [ 1 ] : return DataElement ( data_element , value [ 0 ] . strip ( ) , value [ 1 ] . strip ( ) ) results = [ data for data in self . walk ( ds ) if data is not None ] return results
Returns array of dictionaries containing all the data elements in the DICOM file .
61,071
def walk ( self , fn ) : if not hasattr ( fn , "__call__" ) : raise TypeError ( ) dataset = self . _dataset iterator = dataset . GetDES ( ) . begin ( ) while ( not iterator . equal ( dataset . GetDES ( ) . end ( ) ) ) : data_element = iterator . next ( ) yield fn ( data_element ) header = self . _header iterator = head...
Loops through all data elements and allows a function to interact with each data element . Uses a generator to improve iteration .
61,072
def find ( self , group = None , element = None , name = None , VR = None ) : results = self . read ( ) if name is not None : def find_name ( data_element ) : return data_element . name . lower ( ) == name . lower ( ) return filter ( find_name , results ) if group is not None : def find_group ( data_element ) : return ...
Searches for data elements in the DICOM file given the filters supplied to this method .
61,073
def anonymize ( self ) : self . _anon_obj = gdcm . Anonymizer ( ) self . _anon_obj . SetFile ( self . _file ) self . _anon_obj . RemoveGroupLength ( ) if self . _anon_tags is None : self . _anon_tags = get_anon_tags ( ) for tag in self . _anon_tags : cur_tag = tag [ 'Tag' ] . replace ( "(" , "" ) cur_tag = cur_tag . re...
According to PS 3 . 15 - 2008 basic application level De - Indentification of a DICOM file requires replacing the values of a set of data elements
61,074
def image ( self ) : if self . _image is None : self . _image = Image ( self . fname ) return self . _image
Read the loaded DICOM image data
61,075
def repo_name ( self ) : ds = [ [ x . repo_name ] for x in self . repos ] df = pd . DataFrame ( ds , columns = [ 'repository' ] ) return df
Returns a DataFrame of the repo names present in this project directory
61,076
def command ( self ) : print ( 'pynYNAB CSV import' ) args = self . parser . parse_args ( ) verify_common_args ( args ) verify_csvimport ( args . schema , args . accountname ) client = clientfromkwargs ( ** args ) delta = do_csvimport ( args , client ) client . push ( expected_delta = delta )
Manually import a CSV into a nYNAB budget
61,077
def command ( self ) : print ( 'pynYNAB OFX import' ) args = self . parser . parse_args ( ) verify_common_args ( args ) client = clientfromkwargs ( ** args ) delta = do_ofximport ( args . file , client ) client . push ( expected_delta = delta )
Manually import an OFX into a nYNAB budget
61,078
def default_listener ( col_attr , default ) : @ event . listens_for ( col_attr , "init_scalar" , retval = True , propagate = True ) def init_scalar ( target , value , dict_ ) : if default . is_callable : value = default . arg ( None ) elif default . is_scalar : value = default . arg else : raise NotImplementedError ( "...
Establish a default - setting listener .
61,079
def has_coverage ( self ) : if os . path . exists ( self . git_dir + os . sep + '.coverage' ) : try : with open ( self . git_dir + os . sep + '.coverage' , 'r' ) as f : blob = f . read ( ) blob = blob . split ( '!' ) [ 2 ] json . loads ( blob ) return True except Exception : return False else : return False
Returns a boolean for is a parseable . coverage file can be found in the repository
61,080
def __check_extension ( files , ignore_globs = None , include_globs = None ) : if include_globs is None or include_globs == [ ] : include_globs = [ '*' ] out = { } for key in files . keys ( ) : if ignore_globs is not None : count_exclude = sum ( [ 1 if fnmatch . fnmatch ( key , g ) else 0 for g in ignore_globs ] ) else...
Internal method to filter a list of file changes by extension and ignore_dirs .
61,081
def _repo_name ( self ) : if self . _git_repo_name is not None : return self . _git_repo_name else : reponame = self . repo . git_dir . split ( os . sep ) [ - 2 ] if reponame . strip ( ) == '' : return 'unknown_repo' return reponame
Returns the name of the repository using the local directory name .
61,082
def _decode ( self , obj , context ) : return b'' . join ( map ( int2byte , [ c + 0x60 for c in bytearray ( obj ) ] ) ) . decode ( "utf8" )
Get the python representation of the obj
61,083
def update ( self , instance , validated_data ) : model = self . Meta . model meta = self . Meta . model . _meta original_virtual_fields = list ( meta . virtual_fields ) if hasattr ( model , '_hstore_virtual_fields' ) : for field in model . _hstore_virtual_fields . values ( ) : meta . virtual_fields . remove ( field ) ...
temporarily remove hstore virtual fields otherwise DRF considers them many2many
61,084
def update_image ( self , data ) : if 1 in data . shape : data = data . squeeze ( ) if self . conf . contrast_level is not None : clevels = [ self . conf . contrast_level , 100.0 - self . conf . contrast_level ] imin , imax = np . percentile ( data , clevels ) data = np . clip ( ( data - imin ) / ( imax - imin + 1.e-8 ...
update image on panel as quickly as possible
61,085
def set_viewlimits ( self , axes = None ) : if axes is None : axes = self . axes xmin , xmax , ymin , ymax = self . data_range if len ( self . conf . zoom_lims ) > 1 : zlims = self . conf . zoom_lims [ - 1 ] if axes in zlims : xmin , xmax , ymin , ymax = zlims [ axes ] xmin = max ( self . data_range [ 0 ] , xmin ) xmax...
update xy limits of a plot
61,086
def zoom_leftup ( self , event = None ) : if self . zoom_ini is None : return ini_x , ini_y , ini_xd , ini_yd = self . zoom_ini try : dx = abs ( ini_x - event . x ) dy = abs ( ini_y - event . y ) except : dx , dy = 0 , 0 t0 = time . time ( ) self . rbbox = None self . zoom_ini = None if ( dx > 3 ) and ( dy > 3 ) and ( ...
leftup event handler for zoom mode in images
61,087
def collect_directories ( self , directories ) : directories = util . to_absolute_paths ( directories ) if not self . recursive : return self . _remove_blacklisted ( directories ) recursive_dirs = set ( ) for dir_ in directories : walk_iter = os . walk ( dir_ , followlinks = True ) walk_iter = [ w [ 0 ] for w in walk_i...
Collects all the directories into a set object .
61,088
def remove_directories ( self , directories ) : directories = util . to_absolute_paths ( directories ) self . plugin_directories = util . remove_from_set ( self . plugin_directories , directories )
Removes any directories from the set of plugin directories .
61,089
def remove_blacklisted_directories ( self , directories ) : directories = util . to_absolute_paths ( directories ) black_dirs = self . blacklisted_directories black_dirs = util . remove_from_set ( black_dirs , directories )
Attempts to remove the directories from the set of blacklisted directories . If a particular directory is not found in the set of blacklisted method will continue on silently .
61,090
def _remove_blacklisted ( self , directories ) : directories = util . to_absolute_paths ( directories ) directories = util . remove_from_set ( directories , self . blacklisted_directories ) return directories
Attempts to remove the blacklisted directories from directories and then returns whatever is left in the set .
61,091
def imread ( filename , * args , ** kwargs ) : with TIFFfile ( filename ) as tif : return tif . asarray ( * args , ** kwargs )
Return image data from TIFF file as numpy array .
61,092
def read_nih_image_header ( fd , byte_order , dtype , count ) : fd . seek ( 12 , 1 ) return { 'version' : struct . unpack ( byte_order + 'H' , fd . read ( 2 ) ) [ 0 ] }
Read NIH_IMAGE_HEADER tag from file and return as dictionary .
61,093
def read_mm_header ( fd , byte_order , dtype , count ) : return numpy . rec . fromfile ( fd , MM_HEADER , 1 , byteorder = byte_order ) [ 0 ]
Read MM_HEADER tag from file and return as numpy . rec . array .
61,094
def read_mm_uic1 ( fd , byte_order , dtype , count ) : t = fd . read ( 8 * count ) t = struct . unpack ( '%s%iI' % ( byte_order , 2 * count ) , t ) return dict ( ( MM_TAG_IDS [ k ] , v ) for k , v in zip ( t [ : : 2 ] , t [ 1 : : 2 ] ) if k in MM_TAG_IDS )
Read MM_UIC1 tag from file and return as dictionary .
61,095
def read_mm_uic2 ( fd , byte_order , dtype , count ) : result = { 'number_planes' : count } values = numpy . fromfile ( fd , byte_order + 'I' , 6 * count ) result [ 'z_distance' ] = values [ 0 : : 6 ] // values [ 1 : : 6 ] return result
Read MM_UIC2 tag from file and return as dictionary .
61,096
def read_mm_uic3 ( fd , byte_order , dtype , count ) : t = numpy . fromfile ( fd , byte_order + 'I' , 2 * count ) return { 'wavelengths' : t [ 0 : : 2 ] // t [ 1 : : 2 ] }
Read MM_UIC3 tag from file and return as dictionary .
61,097
def read_cz_lsm_info ( fd , byte_order , dtype , count ) : result = numpy . rec . fromfile ( fd , CZ_LSM_INFO , 1 , byteorder = byte_order ) [ 0 ] { 50350412 : '1.3' , 67127628 : '2.0' } [ result . magic_number ] return result
Read CS_LSM_INFO tag from file and return as numpy . rec . array .
61,098
def read_cz_lsm_scan_info ( fd , byte_order ) : block = Record ( ) blocks = [ block ] unpack = struct . unpack if 0x10000000 != struct . unpack ( byte_order + "I" , fd . read ( 4 ) ) [ 0 ] : raise ValueError ( "not a lsm_scan_info structure" ) fd . read ( 8 ) while True : entry , dtype , size = unpack ( byte_order + "I...
Read LSM scan information from file and return as Record .
61,099
def _replace_by ( module_function , warn = False ) : def decorate ( func , module_function = module_function , warn = warn ) : sys . path . append ( os . path . dirname ( __file__ ) ) try : module , function = module_function . split ( '.' ) func , oldfunc = getattr ( __import__ ( module ) , function ) , func globals (...
Try replace decorated function by module . function .