idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
48,700
def _find_quantile_level ( density , x , y , xp , yp , quantile , acc = .01 , ret_err = False ) : if quantile >= 1 or quantile <= 0 : raise ValueError ( "Invalid value for `quantile`: {}" . format ( quantile ) ) bad = get_bad_vals ( xp , yp ) xp = xp [ ~ bad ] yp = yp [ ~ bad ] level = quantile err = 1 itfac = 1 nev = xp . size while np . abs ( err ) > acc : conts = find_contours_level ( density , x , y , level , closed = True ) isin = 0 for ii in range ( nev ) : for cc in conts : isin += PolygonFilter . point_in_poly ( ( xp [ ii ] , yp [ ii ] ) , poly = cc ) break err = quantile - ( nev - isin ) / nev level += err * itfac itfac *= .9 if ret_err : return level , err else : return level
Find density level for a given data quantile by iteration
48,701
def search ( query , query_type = DEFAULT_QUERY_TYPE ) : statement , arguments = _build_search ( query ) if statement is None and arguments is None : return QueryResults ( [ ] , [ ] , 'AND' ) with db_connect ( ) as db_connection : with db_connection . cursor ( ) as cursor : cursor . execute ( statement , arguments ) search_results = cursor . fetchall ( ) return QueryResults ( search_results , query , query_type )
Search database using parsed query .
48,702
def fix_quotes ( cls , query_string ) : if query_string . count ( '"' ) % 2 == 0 : return query_string fields = [ ] def f ( match ) : fields . append ( match . string [ match . start ( ) : match . end ( ) ] ) return '' terms = re . sub ( r'[^\s:]*:("[^"]*"|[^\s]*)' , f , query_string ) query_string = '{}" {}' . format ( terms . strip ( ) , ' ' . join ( fields ) ) return query_string
Heuristic attempt to fix unbalanced quotes in query_string .
48,703
def from_raw_query ( cls , query_string ) : try : node_tree = grammar . parse ( query_string ) except IncompleteParseError : query_string = cls . fix_quotes ( query_string ) node_tree = grammar . parse ( query_string ) structured_query = DictFormater ( ) . visit ( node_tree ) return cls ( [ t for t in structured_query if t [ 1 ] . lower ( ) not in STOPWORDS ] )
Parse raw string to query .
48,704
def highlighted_abstract ( self ) : abstract_terms = self . fields . get ( 'abstract' , [ ] ) if abstract_terms : sql = _read_sql_file ( 'highlighted-abstract' ) else : sql = _read_sql_file ( 'get-abstract' ) arguments = { 'id' : self [ 'id' ] , 'query' : ' & ' . join ( abstract_terms ) , } with db_connect ( ) as db_connection : with db_connection . cursor ( ) as cursor : cursor . execute ( sql , arguments ) hl_abstract = cursor . fetchone ( ) if hl_abstract : return hl_abstract [ 0 ]
Highlight the found terms in the abstract text .
48,705
def highlighted_fulltext ( self ) : terms = self . fields . get ( 'fulltext' , [ ] ) if not terms : return None arguments = { 'id' : self [ 'id' ] , 'query' : ' & ' . join ( terms ) , } with db_connect ( ) as db_connection : with db_connection . cursor ( ) as cursor : cursor . execute ( _read_sql_file ( 'highlighted-fulltext' ) , arguments ) hl_fulltext = cursor . fetchone ( ) [ 0 ] return hl_fulltext
Highlight the found terms in the fulltext .
48,706
def pdf ( self , data_predict = None ) : r if data_predict is None : data_predict = self . data else : data_predict = _adjust_shape ( data_predict , self . k_vars ) pdf_est = [ ] for i in range ( np . shape ( data_predict ) [ 0 ] ) : pdf_est . append ( gpke ( self . bw , data = self . data , data_predict = data_predict [ i , : ] , var_type = self . var_type ) / self . nobs ) pdf_est = np . squeeze ( pdf_est ) return pdf_est
r Evaluate the probability density function .
48,707
def find_xenon_grpc_jar ( ) : prefix = Path ( sys . prefix ) locations = [ prefix / 'lib' , prefix / 'local' / 'lib' ] for location in locations : jar_file = location / 'xenon-grpc-{}-all.jar' . format ( xenon_grpc_version ) if not jar_file . exists ( ) : continue else : return str ( jar_file ) return None
Find the Xenon - GRPC jar - file windows version .
48,708
def send_keystrokes ( ip , keystrokes , wait = False ) : tv_url = 'http://{}:6095/controller?action=keyevent&keycode=' . format ( ip ) for keystroke in keystrokes : if keystroke == 'wait' or wait is True : time . sleep ( 0.7 ) else : request = requests . get ( tv_url + keystroke ) if request . status_code != 200 : return False return True
Connects to TV and sends keystroke via HTTP .
48,709
def mute ( ip ) : tv_url = 'http://{}:6095/controller?action=keyevent&keycode=' . format ( ip ) count = 0 while count > 30 : count = count + 1 request = requests . get ( tv_url + 'volumedown' ) if request . status_code != 200 : return False return True
Polyfill for muting the TV .
48,710
def in_book_search ( request ) : results = { } args = request . matchdict ident_hash = args [ 'ident_hash' ] args [ 'search_term' ] = request . params . get ( 'q' , '' ) query_type = request . params . get ( 'query_type' , '' ) combiner = '' if query_type : if query_type . lower ( ) == 'or' : combiner = '_or' id , version = split_ident_hash ( ident_hash ) args [ 'uuid' ] = id args [ 'version' ] = version with db_connect ( ) as db_connection : with db_connection . cursor ( ) as cursor : cursor . execute ( SQL [ 'get-collated-state' ] , args ) res = cursor . fetchall ( ) if res and res [ 0 ] [ 0 ] : statement = SQL [ 'get-in-collated-book-search' ] else : statement = SQL [ 'get-in-book-search' ] cursor . execute ( statement . format ( combiner = combiner ) , args ) res = cursor . fetchall ( ) results [ 'results' ] = { 'query' : [ ] , 'total' : len ( res ) , 'items' : [ ] } results [ 'results' ] [ 'query' ] = { 'id' : ident_hash , 'search_term' : args [ 'search_term' ] , } for uuid , version , title , snippet , matches , rank in res : results [ 'results' ] [ 'items' ] . append ( { 'rank' : '{}' . format ( rank ) , 'id' : '{}@{}' . format ( uuid , version ) , 'title' : '{}' . format ( title ) , 'snippet' : '{}' . format ( snippet ) , 'matches' : '{}' . format ( matches ) , } ) resp = request . response resp . status = '200 OK' resp . content_type = 'application/json' resp . body = json . dumps ( results ) return resp
Full text in - book search .
48,711
def in_book_search_highlighted_results ( request ) : results = { } args = request . matchdict ident_hash = args [ 'ident_hash' ] page_ident_hash = args [ 'page_ident_hash' ] try : page_uuid , _ = split_ident_hash ( page_ident_hash ) except IdentHashShortId as e : page_uuid = get_uuid ( e . id ) except IdentHashMissingVersion as e : page_uuid = e . id args [ 'page_uuid' ] = page_uuid args [ 'search_term' ] = request . params . get ( 'q' , '' ) query_type = request . params . get ( 'query_type' , '' ) combiner = '' if query_type : if query_type . lower ( ) == 'or' : combiner = '_or' id , version = split_ident_hash ( ident_hash ) args [ 'uuid' ] = id args [ 'version' ] = version with db_connect ( ) as db_connection : with db_connection . cursor ( ) as cursor : cursor . execute ( SQL [ 'get-collated-state' ] , args ) res = cursor . fetchall ( ) if res and res [ 0 ] [ 0 ] : statement = SQL [ 'get-in-collated-book-search-full-page' ] else : statement = SQL [ 'get-in-book-search-full-page' ] cursor . execute ( statement . format ( combiner = combiner ) , args ) res = cursor . fetchall ( ) results [ 'results' ] = { 'query' : [ ] , 'total' : len ( res ) , 'items' : [ ] } results [ 'results' ] [ 'query' ] = { 'search_term' : args [ 'search_term' ] , 'collection_id' : ident_hash , } for uuid , version , title , headline , rank in res : results [ 'results' ] [ 'items' ] . append ( { 'rank' : '{}' . format ( rank ) , 'id' : '{}' . format ( page_ident_hash ) , 'title' : '{}' . format ( title ) , 'html' : '{}' . format ( headline ) , } ) resp = request . response resp . status = '200 OK' resp . content_type = 'application/json' resp . body = json . dumps ( results ) return resp
In - book search - returns a highlighted version of the HTML .
48,712
def gpke ( bw , data , data_predict , var_type , ckertype = 'gaussian' , okertype = 'wangryzin' , ukertype = 'aitchisonaitken' , tosum = True ) : r kertypes = dict ( c = ckertype , o = okertype , u = ukertype ) Kval = np . empty ( data . shape ) for ii , vtype in enumerate ( var_type ) : func = kernel_func [ kertypes [ vtype ] ] Kval [ : , ii ] = func ( bw [ ii ] , data [ : , ii ] , data_predict [ ii ] ) iscontinuous = np . array ( [ c == 'c' for c in var_type ] ) dens = Kval . prod ( axis = 1 ) / np . prod ( bw [ iscontinuous ] ) if tosum : return dens . sum ( axis = 0 ) else : return dens
r Returns the non - normalized Generalized Product Kernel Estimator
48,713
def _compute_bw ( self , bw ) : if bw is None : bw = 'normal_reference' if not isinstance ( bw , string_types ) : self . _bw_method = "user-specified" res = np . asarray ( bw ) else : self . _bw_method = bw if bw == 'normal_reference' : bwfunc = self . _normal_reference elif bw == 'cv_ml' : bwfunc = self . _cv_ml else : bwfunc = self . _cv_ls res = bwfunc ( ) return res
Computes the bandwidth of the data .
48,714
def _set_defaults ( self , defaults ) : self . n_res = defaults . n_res self . n_sub = defaults . n_sub self . randomize = defaults . randomize self . return_median = defaults . return_median self . efficient = defaults . efficient self . return_only_bw = defaults . return_only_bw self . n_jobs = defaults . n_jobs
Sets the default values for the efficient estimation
48,715
def get_version ( ) : config = RawConfigParser ( ) config . read ( os . path . join ( '..' , 'setup.cfg' ) ) return config . get ( 'metadata' , 'version' )
Return package version from setup . cfg
48,716
def to_string ( self ) : root = etree . Element ( 'sitemapindex' , nsmap = { None : SITEMAP_NS } ) for sitemap in self . sitemaps : sm = etree . SubElement ( root , 'sitemap' ) etree . SubElement ( sm , 'loc' ) . text = sitemap . url if hasattr ( sitemap . lastmod , 'strftime' ) : etree . SubElement ( sm , 'lastmod' ) . text = sitemap . lastmod . strftime ( '%Y-%m-%d' ) elif isinstance ( sitemap . lastmod , str ) : etree . SubElement ( sm , 'lastmod' ) . text = sitemap . lastmod return etree . tostring ( root , pretty_print = True , xml_declaration = True , encoding = 'utf-8' )
Convert SitemapIndex into a string .
48,717
def add_url ( self , * args , ** kwargs ) : if len ( args ) == 1 and not kwargs and isinstance ( args [ 0 ] , UrlEntry ) : self . urls . append ( args [ 0 ] ) else : self . urls . append ( UrlEntry ( * args , ** kwargs ) )
Add a new url to the sitemap .
48,718
def to_string ( self ) : root = etree . Element ( 'urlset' , nsmap = { None : SITEMAP_NS } ) for url in self . urls : url . generate ( root ) return etree . tostring ( root , pretty_print = True , xml_declaration = True , encoding = 'utf-8' )
Convert the sitemap into a string .
48,719
def notblocked ( page ) : for blocked in PAGES_TO_BLOCK : if blocked [ 0 ] != '*' : blocked = '*' + blocked rx = re . compile ( blocked . replace ( '*' , '[^$]*' ) ) if rx . match ( page ) : return False return True
Determine if given url is a page that should be in sitemap .
48,720
def sitemap_index ( request ) : sitemaps = [ ] with db_connect ( ) as db_connection : with db_connection . cursor ( ) as cursor : cursor . execute ( ) for author , revised in cursor . fetchall ( ) : sitemaps . append ( Sitemap ( url = request . route_url ( 'sitemap' , from_id = author ) , lastmod = revised ) ) si = SitemapIndex ( sitemaps = sitemaps ) resp = request . response resp . status = '200 OK' resp . content_type = 'text/xml' resp . body = si ( ) return resp
Return a sitemap index xml file for search engines .
48,721
def get_volume ( cont , pos_x , pos_y , pix ) : if np . isscalar ( pos_x ) : cont = [ cont ] ret_list = False else : ret_list = True pos_x = np . atleast_1d ( pos_x ) pos_y = np . atleast_1d ( pos_y ) if pos_x . size != pos_y . size : raise ValueError ( "Size of `pos_x` and `pos_y` must match!" ) if pos_x . size > 1 and len ( cont ) <= 1 : raise ValueError ( "Number of given contours too small!" ) v_avg = np . zeros_like ( pos_x , dtype = float ) * np . nan for ii in range ( min ( len ( cont ) , pos_x . shape [ 0 ] ) ) : cc = cont [ ii ] if cc . shape [ 0 ] >= 4 : contour_x = cc [ : , 0 ] - pos_x [ ii ] / pix contour_y = cc [ : , 1 ] - pos_y [ ii ] / pix contour_x , contour_y = counter_clockwise ( contour_x , contour_y ) ind_low = np . where ( contour_y < 0 ) contour_y_low = np . copy ( contour_y ) contour_y_low [ ind_low ] = 0 ind_upp = np . where ( contour_y > 0 ) contour_y_upp = np . copy ( contour_y ) contour_y_upp [ ind_upp ] = 0 Z = contour_x Z = np . hstack ( [ Z , Z [ 0 ] ] ) Zp = Z [ 0 : - 1 ] dZ = Z [ 1 : ] - Zp contour_y_low = np . hstack ( [ contour_y_low , contour_y_low [ 0 ] ] ) contour_y_upp = np . hstack ( [ contour_y_upp , contour_y_upp [ 0 ] ] ) vol_low = _vol_helper ( contour_y_low , Z , Zp , dZ , pix ) vol_upp = _vol_helper ( contour_y_upp , Z , Zp , dZ , pix ) v_avg [ ii ] = ( vol_low + vol_upp ) / 2 if not ret_list : v_avg = v_avg [ 0 ] return v_avg
Calculate the volume of a polygon revolved around an axis
48,722
def counter_clockwise ( cx , cy ) : angles = np . unwrap ( np . arctan2 ( cy , cx ) ) grad = np . gradient ( angles ) if np . average ( grad ) > 0 : return cx [ : : - 1 ] , cy [ : : - 1 ] else : return cx , cy
Put contour coordinates into counter - clockwise order
48,723
def extras ( request ) : key = request . matchdict . get ( 'key' , '' ) . lstrip ( '/' ) key_map = { 'languages' : _get_available_languages_and_count , 'subjects' : _get_subject_list , 'featured' : _get_featured_links , 'messages' : _get_service_state_messages , 'licenses' : _get_licenses } with db_connect ( ) as db_connection : with db_connection . cursor ( ) as cursor : if key : proc = key_map [ key ] metadata = { key : proc ( cursor ) } else : metadata = { key : proc ( cursor ) for ( key , proc ) in key_map . items ( ) } resp = request . response resp . status = '200 OK' resp . content_type = 'application/json' resp . body = json . dumps ( metadata ) return resp
Return a dict with archive metadata for webview .
48,724
def timeout ( delay , call , * args , ** kwargs ) : return_value = None def target ( ) : nonlocal return_value return_value = call ( * args , ** kwargs ) t = Thread ( target = target ) t . start ( ) t . join ( delay ) if t . is_alive ( ) : raise RuntimeError ( "Operation did not complete within time." ) return return_value
Run a function call for delay seconds and raise a RuntimeError if the operation didn t complete .
48,725
def create_parser ( name , description = None ) : prog = _gen_prog_name ( name ) parser = argparse . ArgumentParser ( prog = prog , description = description ) parser . add_argument ( 'config_uri' , help = "Configuration INI file." ) parser . add_argument ( '--config-name' , action = 'store' , default = 'main' , help = "Supply a section name in the configuration" ) return parser
Create an argument parser with the given name and description .
48,726
def get_app_settings_from_arguments ( args ) : config_filepath = os . path . abspath ( args . config_uri ) return get_appsettings ( config_filepath , name = args . config_name )
Parse argparse style arguments into app settings .
48,727
def check_socket ( host , port ) : with closing ( socket . socket ( socket . AF_INET , socket . SOCK_STREAM ) ) as sock : return sock . connect_ex ( ( host , port ) ) == 0
Checks if port is open on host . This is used to check if the Xenon - GRPC server is running .
48,728
def get_secure_channel ( crt_file , key_file , port = 50051 ) : creds = grpc . ssl_channel_credentials ( root_certificates = open ( str ( crt_file ) , 'rb' ) . read ( ) , private_key = open ( str ( key_file ) , 'rb' ) . read ( ) , certificate_chain = open ( str ( crt_file ) , 'rb' ) . read ( ) ) address = "{}:{}" . format ( socket . gethostname ( ) , port ) channel = grpc . secure_channel ( address , creds ) return channel
Try to connect over a secure channel .
48,729
def find_free_port ( ) : with closing ( socket . socket ( socket . AF_INET , socket . SOCK_STREAM ) ) as sock : sock . bind ( ( '' , 0 ) ) return sock . getsockname ( ) [ 1 ]
Finds a free port .
48,730
def print_stream ( file , name ) : logger = logging . getLogger ( 'xenon.{}' . format ( name ) ) for line in file : logger . info ( '[{}] {}' . format ( name , line . strip ( ) ) )
Print stream from file to logger .
48,731
def init ( port = None , do_not_exit = False , disable_tls = False , log_level = 'WARNING' ) : logger = logging . getLogger ( 'xenon' ) logger . setLevel ( logging . INFO ) logger_handler = logging . StreamHandler ( ) logger_handler . setFormatter ( logging . Formatter ( style = '{' ) ) logger_handler . setLevel ( getattr ( logging , log_level ) ) logger . addHandler ( logger_handler ) if port is None : port = find_free_port ( ) if __server__ . process is not None : logger . warning ( "You tried to run init(), but the server is already running." ) return __server__ __server__ . port = port __server__ . disable_tls = disable_tls __server__ . __enter__ ( ) if not do_not_exit : atexit . register ( __server__ . __exit__ , None , None , None ) return __server__
Start the Xenon GRPC server on the specified port or if a service is already running on that port connect to that .
48,732
def add_cors_headers ( request , response ) : response . headerlist . append ( ( 'Access-Control-Allow-Origin' , '*' ) ) response . headerlist . append ( ( 'Access-Control-Allow-Methods' , 'GET, OPTIONS' ) ) response . headerlist . append ( ( 'Access-Control-Allow-Headers' , ',' . join ( DEFAULT_ACCESS_CONTROL_ALLOW_HEADERS ) ) )
Add cors headers needed for web app implementation .
48,733
def trace ( self ) : if self . _trace is None : self . _trace = self . load_trace ( self . mname ) return self . _trace
Initializes the trace data
48,734
def load_trace ( mname ) : tname = TraceColumn . find_trace_file ( mname ) trace = { } if tname is None : pass elif tname . suffix == ".tdms" : mdata = TdmsFile ( str ( mname ) ) sampleids = mdata . object ( "Cell Track" , "FL1index" ) . data tdata = TdmsFile ( str ( tname ) ) for trace_key in dfn . FLUOR_TRACES : group , ch = naming . tr_data_map [ trace_key ] try : trdat = tdata . object ( group , ch ) . data except KeyError : pass else : if trdat is not None and trdat . size != 0 : trace [ trace_key ] = np . split ( trdat , sampleids [ 1 : ] ) return trace
Loads the traces and returns them as a dictionary
48,735
def find_trace_file ( mname ) : mname = pathlib . Path ( mname ) tname = None if mname . exists ( ) : cand = mname . with_name ( mname . name [ : - 5 ] + "_traces.tdms" ) if cand . exists ( ) : tname = cand return tname
Tries to find the traces tdms file name
48,736
def get_docker_secret ( name , default = None , cast_to = str , autocast_name = True , getenv = True , safe = True , secrets_dir = os . path . join ( root , 'var' , 'run' , 'secrets' ) ) : name_secret = name . lower ( ) if autocast_name else name name_env = name . upper ( ) if autocast_name else name value = None try : with open ( os . path . join ( secrets_dir , name_secret ) , 'r' ) as secret_file : value = secret_file . read ( ) except IOError as e : if getenv : value = os . environ . get ( name_env ) if value is None : value = default try : if value is None : raise TypeError ( 'value is None' ) if cast_to == bool : if value not in ( 'True' , 'true' , 'False' , 'false' ) : raise ValueError ( 'value %s not of type bool' % value ) value = 1 if value in ( 'True' , 'true' ) else 0 return cast_to ( value ) except ( TypeError , ValueError ) as e : if safe : return default raise e
This function fetches a docker secret
48,737
def _update_hash ( self , arg ) : if isinstance ( arg , np . ndarray ) : self . ahash . update ( arg . view ( np . uint8 ) ) elif isinstance ( arg , list ) : [ self . _update_hash ( a ) for a in arg ] else : self . ahash . update ( str ( arg ) . encode ( 'utf-8' ) )
Takes an argument and updates the hash . The argument can be an np . array string or list of things that are convertable to strings .
48,738
def clear_cache ( ) : del Cache . _keys for k in list ( Cache . _cache . keys ( ) ) : it = Cache . _cache . pop ( k ) del it del Cache . _cache Cache . _keys = [ ] Cache . _cache = { } gc . collect ( )
Remove all cached objects
48,739
def write_file ( self , filename , filetype , data ) : state = self . begin_transaction ( ) try : directory = self . directory_class ( self . header ) self . get_directory ( directory ) dirent = directory . add_dirent ( filename , filetype ) data = to_numpy ( data ) sector_list = self . build_sectors ( data ) vtoc = self . get_vtoc_object ( ) directory . save_dirent ( self , dirent , vtoc , sector_list ) self . write_sector_list ( sector_list ) self . write_sector_list ( vtoc ) self . write_sector_list ( directory ) except errors . AtrError : self . rollback_transaction ( state ) raise finally : self . get_metadata ( )
Write data to a file on disk
48,740
def _apply_scale ( self , a , scale , feat ) : if scale == "linear" : b = a elif scale == "log" : with warnings . catch_warnings ( record = True ) as w : warnings . simplefilter ( "always" ) b = np . log ( a ) if len ( w ) : warnings . warn ( "Invalid values encounterd in np.log " "while scaling feature '{}'!" . format ( feat ) ) else : raise ValueError ( "`scale` must be either 'linear' or 'log', " + "got '{}'!" . format ( scale ) ) return b
Helper function for transforming an aray to log - scale
48,741
def features ( self ) : mycols = [ ] for col in dfn . feature_names : if col in self : mycols . append ( col ) mycols . sort ( ) return mycols
All available features
48,742
def get_downsampled_scatter ( self , xax = "area_um" , yax = "deform" , downsample = 0 , xscale = "linear" , yscale = "linear" ) : if downsample < 0 : raise ValueError ( "`downsample` must be zero or positive!" ) downsample = int ( downsample ) xax = xax . lower ( ) yax = yax . lower ( ) x = self [ xax ] [ self . filter . all ] y = self [ yax ] [ self . filter . all ] xs = self . _apply_scale ( x , xscale , xax ) ys = self . _apply_scale ( y , yscale , yax ) _ , _ , idx = downsampling . downsample_grid ( xs , ys , samples = downsample , ret_idx = True ) self . _plot_filter = idx return x [ idx ] , y [ idx ]
Downsampling by removing points at dense locations
48,743
def get_kde_contour ( self , xax = "area_um" , yax = "deform" , xacc = None , yacc = None , kde_type = "histogram" , kde_kwargs = { } , xscale = "linear" , yscale = "linear" ) : xax = xax . lower ( ) yax = yax . lower ( ) kde_type = kde_type . lower ( ) if kde_type not in kde_methods . methods : raise ValueError ( "Not a valid kde type: {}!" . format ( kde_type ) ) x = self [ xax ] [ self . filter . all ] y = self [ yax ] [ self . filter . all ] xs = self . _apply_scale ( x , xscale , xax ) ys = self . _apply_scale ( y , yscale , yax ) if xacc is None : xacc = kde_methods . bin_width_doane ( xs ) / 5 if yacc is None : yacc = kde_methods . bin_width_doane ( ys ) / 5 bad = kde_methods . get_bad_vals ( xs , ys ) xc = xs [ ~ bad ] yc = ys [ ~ bad ] xnum = int ( np . ceil ( ( xc . max ( ) - xc . min ( ) ) / xacc ) ) ynum = int ( np . ceil ( ( yc . max ( ) - yc . min ( ) ) / yacc ) ) xlin = np . linspace ( xc . min ( ) , xc . max ( ) , xnum , endpoint = True ) ylin = np . linspace ( yc . min ( ) , yc . max ( ) , ynum , endpoint = True ) xmesh , ymesh = np . meshgrid ( xlin , ylin , indexing = "ij" ) kde_fct = kde_methods . methods [ kde_type ] if len ( x ) : density = kde_fct ( events_x = xs , events_y = ys , xout = xmesh , yout = ymesh , ** kde_kwargs ) else : density = [ ] if xscale == "log" : xmesh = np . exp ( xmesh ) if yscale == "log" : ymesh = np . exp ( ymesh ) return xmesh , ymesh , density
Evaluate the kernel density estimate for contour plots
48,744
def get_kde_scatter ( self , xax = "area_um" , yax = "deform" , positions = None , kde_type = "histogram" , kde_kwargs = { } , xscale = "linear" , yscale = "linear" ) : xax = xax . lower ( ) yax = yax . lower ( ) kde_type = kde_type . lower ( ) if kde_type not in kde_methods . methods : raise ValueError ( "Not a valid kde type: {}!" . format ( kde_type ) ) x = self [ xax ] [ self . filter . all ] y = self [ yax ] [ self . filter . all ] xs = self . _apply_scale ( x , xscale , xax ) ys = self . _apply_scale ( y , yscale , yax ) if positions is None : posx = None posy = None else : posx = self . _apply_scale ( positions [ 0 ] , xscale , xax ) posy = self . _apply_scale ( positions [ 1 ] , yscale , yax ) kde_fct = kde_methods . methods [ kde_type ] if len ( x ) : density = kde_fct ( events_x = xs , events_y = ys , xout = posx , yout = posy , ** kde_kwargs ) else : density = [ ] return density
Evaluate the kernel density estimate for scatter plots
48,745
def polygon_filter_add ( self , filt ) : if not isinstance ( filt , ( PolygonFilter , int , float ) ) : msg = "`filt` must be a number or instance of PolygonFilter!" raise ValueError ( msg ) if isinstance ( filt , PolygonFilter ) : uid = filt . unique_id else : uid = int ( filt ) self . config [ "filtering" ] [ "polygon filters" ] . append ( uid )
Associate a Polygon Filter with this instance
48,746
def polygon_filter_rm ( self , filt ) : if not isinstance ( filt , ( PolygonFilter , int , float ) ) : msg = "`filt` must be a number or instance of PolygonFilter!" raise ValueError ( msg ) if isinstance ( filt , PolygonFilter ) : uid = filt . unique_id else : uid = int ( filt ) self . config [ "filtering" ] [ "polygon filters" ] . remove ( uid )
Remove a polygon filter from this instance
48,747
def new_dataset ( data , identifier = None ) : if isinstance ( data , dict ) : return fmt_dict . RTDC_Dict ( data , identifier = identifier ) elif isinstance ( data , ( str_types ) ) or isinstance ( data , pathlib . Path ) : return load_file ( data , identifier = identifier ) elif isinstance ( data , RTDCBase ) : return fmt_hierarchy . RTDC_Hierarchy ( data , identifier = identifier ) else : msg = "data type not supported: {}" . format ( data . __class__ ) raise NotImplementedError ( msg )
Initialize a new RT - DC dataset
48,748
def parse_log ( log , url_pattern ) : hits = { } initial_timestamp = None def clean_timestamp ( v ) : return ' ' . join ( v ) . strip ( '[]' ) for line in log : data = line . split ( ) if not initial_timestamp : initial_timestamp = clean_timestamp ( data [ 3 : 5 ] ) match = url_pattern . match ( data [ 6 ] ) if match : ident_hash = '@' . join ( match . groups ( ) ) if ident_hash : hits [ ident_hash ] = hits . get ( ident_hash , 0 ) + 1 else : end_timestamp = clean_timestamp ( data [ 3 : 5 ] ) return hits , initial_timestamp , end_timestamp
Parse log buffer based on url_pattern .
48,749
def fintlist ( alist ) : outlist = [ ] if not isinstance ( alist , ( list , tuple ) ) : alist = alist . strip ( ) . strip ( "[] " ) . split ( "," ) for it in alist : if it : outlist . append ( fint ( it ) ) return outlist
A list of integers
48,750
def avi ( self , path , filtered = True , override = False ) : path = pathlib . Path ( path ) ds = self . rtdc_ds if path . suffix != ".avi" : path = path . with_name ( path . name + ".avi" ) if not override and path . exists ( ) : raise OSError ( "File already exists: {}\n" . format ( str ( path ) . encode ( "ascii" , "ignore" ) ) + "Please use the `override=True` option." ) if "image" in ds : vout = imageio . get_writer ( uri = path , format = "FFMPEG" , fps = 25 , codec = "rawvideo" , pixelformat = "yuv420p" , macro_block_size = None , ffmpeg_log_level = "error" ) for evid in np . arange ( len ( ds ) ) : if filtered and not ds . _filter [ evid ] : continue try : image = ds [ "image" ] [ evid ] except BaseException : warnings . warn ( "Could not read image {}!" . format ( evid ) , NoImageWarning ) continue else : if np . isnan ( image [ 0 , 0 ] ) : image = np . zeros_like ( image , dtype = np . uint8 ) image = image . reshape ( image . shape [ 0 ] , image . shape [ 1 ] , 1 ) image = np . repeat ( image , 3 , axis = 2 ) vout . append_data ( image ) else : msg = "No image data to export: dataset {} !" . format ( ds . title ) raise OSError ( msg )
Exports filtered event images to an avi file
48,751
def fcs ( self , path , features , filtered = True , override = False ) : features = [ c . lower ( ) for c in features ] ds = self . rtdc_ds path = pathlib . Path ( path ) if path . suffix != ".fcs" : path = path . with_name ( path . name + ".fcs" ) if not override and path . exists ( ) : raise OSError ( "File already exists: {}\n" . format ( str ( path ) . encode ( "ascii" , "ignore" ) ) + "Please use the `override=True` option." ) for c in features : if c not in dfn . scalar_feature_names : msg = "Unknown or unsupported feature name: {}" . format ( c ) raise ValueError ( msg ) chn_names = [ dfn . feature_name2label [ c ] for c in features ] if filtered : data = [ ds [ c ] [ ds . _filter ] for c in features ] else : data = [ ds [ c ] for c in features ] data = np . array ( data ) . transpose ( ) fcswrite . write_fcs ( filename = str ( path ) , chn_names = chn_names , data = data )
Export the data of an RT - DC dataset to an . fcs file
48,752
def tsv ( self , path , features , filtered = True , override = False ) : features = [ c . lower ( ) for c in features ] path = pathlib . Path ( path ) ds = self . rtdc_ds if path . suffix != ".tsv" : path = path . with_name ( path . name + ".tsv" ) if not override and path . exists ( ) : raise OSError ( "File already exists: {}\n" . format ( str ( path ) . encode ( "ascii" , "ignore" ) ) + "Please use the `override=True` option." ) for c in features : if c not in dfn . scalar_feature_names : raise ValueError ( "Unknown feature name {}" . format ( c ) ) with path . open ( "w" ) as fd : header1 = "\t" . join ( [ c for c in features ] ) fd . write ( "# " + header1 + "\n" ) header2 = "\t" . join ( [ dfn . feature_name2label [ c ] for c in features ] ) fd . write ( "# " + header2 + "\n" ) with path . open ( "ab" ) as fd : if filtered : data = [ ds [ c ] [ ds . _filter ] for c in features ] else : data = [ ds [ c ] for c in features ] np . savetxt ( fd , np . array ( data ) . transpose ( ) , fmt = str ( "%.10e" ) , delimiter = "\t" )
Export the data of the current instance to a . tsv file
48,753
def hashfile ( fname , blocksize = 65536 , count = 0 ) : hasher = hashlib . md5 ( ) fname = pathlib . Path ( fname ) with fname . open ( 'rb' ) as fd : buf = fd . read ( blocksize ) ii = 0 while len ( buf ) > 0 : hasher . update ( buf ) buf = fd . read ( blocksize ) ii += 1 if count and ii == count : break return hasher . hexdigest ( )
Compute md5 hex - hash of a file
48,754
def obj2str ( obj ) : if isinstance ( obj , str_types ) : return obj . encode ( "utf-8" ) elif isinstance ( obj , pathlib . Path ) : return obj2str ( str ( obj ) ) elif isinstance ( obj , ( bool , int , float ) ) : return str ( obj ) . encode ( "utf-8" ) elif obj is None : return b"none" elif isinstance ( obj , np . ndarray ) : return obj . tostring ( ) elif isinstance ( obj , tuple ) : return obj2str ( list ( obj ) ) elif isinstance ( obj , list ) : return b"" . join ( obj2str ( o ) for o in obj ) elif isinstance ( obj , dict ) : return obj2str ( list ( obj . items ( ) ) ) elif hasattr ( obj , "identifier" ) : return obj2str ( obj . identifier ) elif isinstance ( obj , h5py . Dataset ) : return obj2str ( obj [ 0 ] ) else : raise ValueError ( "No rule to convert object '{}' to string." . format ( obj . __class__ ) )
String representation of an object for hashing
48,755
def create_self_signed_cert ( ) : config_dir = Path ( BaseDirectory . xdg_config_home ) / 'xenon-grpc' config_dir . mkdir ( parents = True , exist_ok = True ) key_prefix = gethostname ( ) crt_file = config_dir / ( '%s.crt' % key_prefix ) key_file = config_dir / ( '%s.key' % key_prefix ) if crt_file . exists ( ) and key_file . exists ( ) : return crt_file , key_file logger = logging . getLogger ( 'xenon' ) logger . info ( "Creating authentication keys for xenon-grpc." ) k = crypto . PKey ( ) k . generate_key ( crypto . TYPE_RSA , 1024 ) cert = crypto . X509 ( ) cert . get_subject ( ) . CN = gethostname ( ) cert . set_serial_number ( 1000 ) cert . gmtime_adj_notBefore ( 0 ) cert . gmtime_adj_notAfter ( 10 * 365 * 24 * 3600 ) cert . set_issuer ( cert . get_subject ( ) ) cert . set_pubkey ( k ) cert . sign ( k , 'sha256' ) open ( str ( crt_file ) , "wb" ) . write ( crypto . dump_certificate ( crypto . FILETYPE_PEM , cert ) ) open ( str ( key_file ) , "wb" ) . write ( crypto . dump_privatekey ( crypto . FILETYPE_PEM , k ) ) return crt_file , key_file
Creates a self - signed certificate key pair .
48,756
def get_project_name_from_path ( path , append_mx = False ) : path = pathlib . Path ( path ) if path . suffix == ".tdms" : dirn = path . parent mx = path . name . split ( "_" ) [ 0 ] elif path . is_dir ( ) : dirn = path mx = "" else : dirn = path . parent mx = "" project = "" if mx : para = dirn / ( mx + "_para.ini" ) if para . exists ( ) : with para . open ( ) as fd : lines = fd . readlines ( ) for line in lines : if line . startswith ( "Sample Name =" ) : project = line . split ( "=" ) [ 1 ] . strip ( ) break if not project : root1 , trail1 = dirn . parent , dirn . name root2 , trail2 = root1 . parent , root1 . name trail3 = root2 . name if trail1 . lower ( ) in [ "online" , "offline" ] : project = trail2 elif ( trail1 . lower ( ) == "data" and trail2 . lower ( ) in [ "online" , "offline" ] ) : project = trail3 else : project = trail1 if append_mx : project += " - " + mx return project
Get the project name from a path .
48,757
def get_tdms_files ( directory ) : path = pathlib . Path ( directory ) . resolve ( ) tdmslist = [ r for r in path . rglob ( "*.tdms" ) if r . is_file ( ) ] tdmslist = [ r for r in tdmslist if not r . name . endswith ( "_traces.tdms" ) ] return sorted ( tdmslist )
Recursively find projects based on . tdms file endings
48,758
def _init_data_with_tdms ( self , tdms_filename ) : tdms_file = TdmsFile ( str ( tdms_filename ) ) table = "Cell Track" for arg in naming . tdms2dclab : try : data = tdms_file . object ( table , arg ) . data except KeyError : pass else : if data is None or len ( data ) == 0 : continue self . _events [ naming . tdms2dclab [ arg ] ] = data tdms_config = Configuration ( files = [ self . path . with_name ( self . _mid + "_para.ini" ) , self . path . with_name ( self . _mid + "_camera.ini" ) ] , ) dclab_config = Configuration ( ) for section in naming . configmap : for pname in naming . configmap [ section ] : meta = naming . configmap [ section ] [ pname ] typ = dfn . config_funcs [ section ] [ pname ] if isinstance ( meta , tuple ) : osec , opar = meta if osec in tdms_config and opar in tdms_config [ osec ] : val = tdms_config [ osec ] . pop ( opar ) dclab_config [ section ] [ pname ] = typ ( val ) else : dclab_config [ section ] [ pname ] = typ ( meta ) self . config = dclab_config self . _complete_config_tdms ( tdms_config ) self . _init_filters ( )
Initializes the current RT - DC dataset with a tdms file .
48,759
def hash ( self ) : if self . _hash is None : fsh = [ self . path . with_name ( self . _mid + "_camera.ini" ) , self . path . with_name ( self . _mid + "_para.ini" ) ] tohash = [ hashfile ( f ) for f in fsh ] tohash . append ( self . path . name ) tohash . append ( hashfile ( self . path , blocksize = 65536 , count = 20 ) ) self . _hash = hashobj ( tohash ) return self . _hash
Hash value based on file name and . ini file content
48,760
def determine_offset ( self ) : fref = self . _contour_data . get_frame ( 0 ) f0 = self . frame [ 0 ] f1 = self . frame [ 1 ] if np . allclose ( fref , f0 ) : self . event_offset = 0 elif np . allclose ( fref , f1 ) : self . event_offset = 1 else : msg = "Contour data has unknown offset (frame {})!" . format ( fref ) raise IndexError ( msg ) self . _initialized = True
Determines the offset of the contours w . r . t . other data columns
48,761
def find_contour_file ( rtdc_dataset ) : cont_id = rtdc_dataset . path . stem cands = [ c . name for c in rtdc_dataset . _fdir . rglob ( "*_contours.txt" ) ] cands = sorted ( cands ) for c1 in cands : if c1 . startswith ( cont_id ) : cfile = rtdc_dataset . _fdir / c1 break else : for c2 in cands : if ( c2 . split ( "_" ) [ 0 ] == rtdc_dataset . _mid ) : cfile = rtdc_dataset . _fdir / c2 break else : msg = "No contour data found for {}" . format ( rtdc_dataset ) warnings . warn ( msg , NoContourDataWarning ) cfile = None return cfile
Tries to find a contour file that belongs to an RTDC dataset
48,762
def _index_file ( self ) : with self . filename . open ( ) as fd : data = fd . read ( ) ident = "Contour in frame" self . _data = data . split ( ident ) [ 1 : ] self . _initialized = True
Open and index the contour file
48,763
def get_frame ( self , idx ) : cont = self . data [ idx ] frame = int ( cont . strip ( ) . split ( " " , 1 ) [ 0 ] ) return frame
Return the frame number of a contour
48,764
def get_viscosity ( medium = "CellCarrier" , channel_width = 20.0 , flow_rate = 0.16 , temperature = 23.0 ) : if medium . lower ( ) not in [ "cellcarrier" , "cellcarrier b" , "water" ] : raise ValueError ( "Invalid medium: {}" . format ( medium ) ) term1 = 1.1856 * 6 * flow_rate * 1e-9 / ( channel_width * 1e-6 ) ** 3 * 2 / 3 if medium == "CellCarrier" : temp_corr = ( temperature / 23.2 ) ** - 0.866 term2 = 0.6771 / 0.5928 + 0.2121 / ( 0.5928 * 0.677 ) eta = 0.179 * ( term1 * term2 ) ** ( 0.677 - 1 ) * temp_corr * 1e3 elif medium == "CellCarrier B" : temp_corr = ( temperature / 23.6 ) ** - 0.866 term2 = 0.6771 / 0.5928 + 0.2121 / ( 0.5928 * 0.634 ) eta = 0.360 * ( term1 * term2 ) ** ( 0.634 - 1 ) * temp_corr * 1e3 elif medium == "water" : if np . min ( temperature ) < 0 or np . max ( temperature ) > 40 : msg = "For water, the temperature must be in [0, 40] degC! " "Got min/max values of '{}'." . format ( np . min ( temperature ) , np . max ( temperature ) ) raise ValueError ( msg ) eta0 = 1.002 right = ( 20 - temperature ) / ( temperature + 96 ) * ( + 1.2364 - 1.37e-3 * ( 20 - temperature ) + 5.7e-6 * ( 20 - temperature ) ** 2 ) eta = eta0 * 10 ** right return eta
Returns the viscosity for RT - DC - specific media
48,765
def get_statistics ( ds , methods = None , features = None ) : if methods is None : cls = list ( Statistics . available_methods . keys ( ) ) avm = Statistics . available_methods me1 = [ m for m in cls if not avm [ m ] . req_feature ] me2 = [ m for m in cls if avm [ m ] . req_feature ] methods = me1 + me2 if features is None : features = dfn . scalar_feature_names else : features = [ a . lower ( ) for a in features ] header = [ ] values = [ ] for ft in features : for mt in methods : meth = Statistics . available_methods [ mt ] if meth . req_feature : if ft in ds : values . append ( meth ( ds = ds , feature = ft ) ) else : values . append ( np . nan ) header . append ( " " . join ( [ mt , dfn . feature_name2label [ ft ] ] ) ) else : if not header . count ( mt ) : values . append ( meth ( ds = ds ) ) header . append ( mt ) return header , values
Compute statistics for an RT - DC dataset
48,766
def mode ( data ) : n = data . shape [ 0 ] iqr = np . percentile ( data , 75 ) - np . percentile ( data , 25 ) bin_size = 2 * iqr / n ** ( 1 / 3 ) if bin_size == 0 : return np . nan databin = np . round ( data / bin_size ) * bin_size + bin_size / 2 u , indices = np . unique ( databin , return_inverse = True ) mode = u [ np . argmax ( np . bincount ( indices ) ) ] return mode
Compute an intelligent value for the mode
48,767
def _get_data ( self , kwargs ) : if "ds" not in kwargs : raise ValueError ( "Keyword argument 'ds' missing." ) ds = kwargs [ "ds" ] if self . req_feature : if "feature" not in kwargs : raise ValueError ( "Keyword argument 'feature' missing." ) return self . get_feature ( ds , kwargs [ "feature" ] ) else : return ds
Convenience wrapper to get statistics data
48,768
def get_feature ( self , ds , feat ) : if ds . config [ "filtering" ] [ "enable filters" ] : x = ds [ feat ] [ ds . _filter ] else : x = ds [ feat ] bad = np . isnan ( x ) | np . isinf ( x ) xout = x [ ~ bad ] return xout
Return filtered feature data
48,769
def get_exception ( self ) : if self . exc_info : try : six . reraise ( * self . exc_info ) except Exception as e : return e
Retrieve the exception
48,770
def has_key ( cls , * args ) : key = args if len ( args ) > 1 else args [ 0 ] return key in cls . _instances
Check whether flyweight object with specified key has already been created .
48,771
def from_key ( cls , * args ) : key = args if len ( args ) > 1 else args [ 0 ] return cls . _instances . get ( key , None )
Return flyweight object with specified key if it has already been created .
48,772
def write ( self , name , ** data ) : data [ "name" ] = name if not ( "timestamp" in data ) : data [ "timestamp" ] = datetime . utcnow ( ) try : self . client . index ( index = self . get_index ( ) , doc_type = self . doc_type , id = None , body = data ) except TransportError as exc : logger . warning ( 'writing metric %r failure %r' , data , exc )
Write the metric to elasticsearch
48,773
def bulk_write ( self , metrics ) : actions = [ ] index = self . get_index ( ) for metric in metrics : actions . append ( { 'index' : { '_index' : index , '_type' : self . doc_type } } ) actions . append ( metric ) try : self . client . bulk ( actions ) except TransportError as exc : logger . warning ( 'bulk_write metrics %r failure %r' , metrics , exc )
Write multiple metrics to elasticsearch in one request
48,774
def search ( query , query_type , nocache = False ) : settings = get_current_registry ( ) . settings memcache_servers = settings [ 'memcache-servers' ] . split ( ) if not memcache_servers : return database_search ( query , query_type ) search_params = [ ] search_params += copy . deepcopy ( query . terms ) search_params += copy . deepcopy ( query . filters ) search_params += [ ( 'sort' , i ) for i in query . sorts ] search_params . sort ( key = lambda record : ( record [ 0 ] , record [ 1 ] ) ) search_params . append ( ( 'query_type' , query_type ) ) search_key = u' ' . join ( [ u'"{}"' . format ( u':' . join ( param ) ) for param in search_params ] ) mc_search_key = binascii . hexlify ( hashlib . pbkdf2_hmac ( 'sha1' , search_key . encode ( 'utf-8' ) , b'' , 1 ) ) mc = memcache . Client ( memcache_servers , server_max_value_length = 128 * 1024 * 1024 , debug = 0 ) if not nocache : search_results = mc . get ( mc_search_key ) else : search_results = None if not search_results : search_results = database_search ( query , query_type ) cache_length = int ( settings [ 'search-cache-expiration' ] ) if ( len ( search_params ) == 2 and search_params [ 0 ] [ 0 ] == 'subject' or search_params [ 0 ] [ 0 ] == 'text' and ' ' not in search_params [ 0 ] [ 1 ] ) : cache_length = int ( settings [ 'search-long-cache-expiration' ] ) mc . set ( mc_search_key , search_results , time = cache_length , min_compress_len = 1024 * 1024 ) return search_results
Search archive contents .
48,775
def available_features ( rtdc_ds ) : cols = { } for inst in AncillaryFeature . features : if inst . is_available ( rtdc_ds ) : cols [ inst . feature_name ] = inst return cols
Determine available features for an RT - DC dataset
48,776
def compute ( self , rtdc_ds ) : data = self . method ( rtdc_ds ) dsize = len ( rtdc_ds ) - len ( data ) if dsize > 0 : msg = "Growing feature {} in {} by {} to match event number!" warnings . warn ( msg . format ( self . feature_name , rtdc_ds , abs ( dsize ) ) , BadFeatureSizeWarning ) data . resize ( len ( rtdc_ds ) , refcheck = False ) data [ - dsize : ] = np . nan elif dsize < 0 : msg = "Shrinking feature {} in {} by {} to match event number!" warnings . warn ( msg . format ( self . feature_name , rtdc_ds , abs ( dsize ) ) , BadFeatureSizeWarning ) data . resize ( len ( rtdc_ds ) , refcheck = False ) if isinstance ( data , np . ndarray ) : data . setflags ( write = False ) elif isinstance ( data , list ) : for item in data : if isinstance ( item , np . ndarray ) : item . setflags ( write = False ) return data
Compute the feature with self . method
48,777
def get_instances ( feature_name ) : feats = [ ] for ft in AncillaryFeature . features : if ft . feature_name == feature_name : feats . append ( ft ) return feats
Return all all instances that compute feature_name
48,778
def hash ( self , rtdc_ds ) : hasher = hashlib . md5 ( ) for col in self . req_features : hasher . update ( obj2str ( rtdc_ds [ col ] ) ) for sec , keys in self . req_config : for key in keys : val = rtdc_ds . config [ sec ] [ key ] data = "{}:{}={}" . format ( sec , key , val ) hasher . update ( obj2str ( data ) ) return hasher . hexdigest ( )
Used for identifying an ancillary computation
48,779
def is_available ( self , rtdc_ds , verbose = False ) : for item in self . req_config : section , keys = item if section not in rtdc_ds . config : if verbose : print ( "{} not in config" . format ( section ) ) return False else : for key in keys : if key not in rtdc_ds . config [ section ] : if verbose : print ( "{} not in config['{}']" . format ( key , section ) ) return False for col in self . req_features : if col not in rtdc_ds : return False for of in AncillaryFeature . features : if of == self : continue elif of . feature_name == self . feature_name : if of . priority <= self . priority : continue else : if of . is_available ( rtdc_ds ) : return False else : continue else : continue return True
Check whether the feature is available
48,780
def write ( self , name , ** data ) : data [ "name" ] = name if not ( "timestamp" in data ) : data [ "timestamp" ] = datetime . utcnow ( ) try : self . producer . send ( topic = self . topic , value = data ) self . producer . flush ( ) except ( KafkaTimeoutError , NoBrokersAvailable ) as exc : logger . warning ( 'writing metric %r failure %r' , data , exc )
Write the metric to kafka
48,781
def bulk_write ( self , metrics ) : try : for metric in metrics : self . producer . send ( self . topic , metric ) self . producer . flush ( ) except ( KafkaTimeoutError , NoBrokersAvailable ) as exc : logger . warning ( 'bulk_write metrics %r failure %r' , metrics , exc )
Write multiple metrics to kafka in one request
48,782
def safe_stat ( path , timeout = 1 , cmd = None ) : "Use threads and a subproc to bodge a timeout on top of filesystem access" global safe_stat_process if cmd is None : cmd = [ '/usr/bin/stat' ] cmd . append ( path ) def target ( ) : global safe_stat_process logger . debug ( 'Stat thread started' ) safe_stat_process = subprocess . Popen ( cmd , stdout = PIPE , stderr = PIPE ) _results = safe_stat_process . communicate ( ) logger . debug ( 'Stat thread finished' ) thread = threading . Thread ( target = target ) thread . start ( ) thread . join ( timeout ) if thread . is_alive ( ) : safe_stat_process . terminate ( ) thread . join ( ) return safe_stat_process . returncode == 0
Use threads and a subproc to bodge a timeout on top of filesystem access
48,783
def get_polygon_filter_names ( ) : names = [ ] for p in PolygonFilter . instances : names . append ( p . name ) return names
Get the names of all polygon filters in the order of creation
48,784
def _check_data ( self ) : if self . axes is None : raise PolygonFilterError ( "`axes` parm not set." ) if self . points is None : raise PolygonFilterError ( "`points` parm not set." ) self . points = np . array ( self . points ) if self . points . shape [ 1 ] != 2 : raise PolygonFilterError ( "data points' shape[1] must be 2." ) if self . name is None : self . name = "polygon filter {}" . format ( self . unique_id ) if not isinstance ( self . inverted , bool ) : raise PolygonFilterError ( "`inverted` must be boolean." )
Check if the data given is valid
48,785
def _load ( self , filename ) : filename = pathlib . Path ( filename ) with filename . open ( ) as fd : data = fd . readlines ( ) bool_head = [ l . strip ( ) . startswith ( "[" ) for l in data ] int_head = np . squeeze ( np . where ( bool_head ) ) int_head = np . atleast_1d ( int_head ) start = int_head [ self . fileid ] + 1 if len ( int_head ) > self . fileid + 1 : end = int_head [ self . fileid + 1 ] else : end = len ( data ) subdata = data [ start : end ] subdata = [ [ it . strip ( ) for it in l . split ( "=" ) ] for l in subdata ] points = [ ] for var , val in subdata : if var . lower ( ) == "x axis" : xaxis = val . lower ( ) elif var . lower ( ) == "y axis" : yaxis = val . lower ( ) elif var . lower ( ) == "name" : self . name = val elif var . lower ( ) == "inverted" : if val == "True" : self . inverted = True elif var . lower ( ) . startswith ( "point" ) : val = np . array ( val . strip ( "[]" ) . split ( ) , dtype = float ) points . append ( [ int ( var [ 5 : ] ) , val ] ) else : raise KeyError ( "Unknown variable: {} = {}" . format ( var , val ) ) self . axes = ( xaxis , yaxis ) points . sort ( ) self . points = np . array ( [ p [ 1 ] for p in points ] ) unique_id = int ( data [ start - 1 ] . strip ( ) . strip ( "Polygon []" ) ) self . _set_unique_id ( unique_id )
Import all filters from a text file
48,786
def _set_unique_id ( self , unique_id ) : assert isinstance ( unique_id , int ) , "unique_id must be an integer" if PolygonFilter . instace_exists ( unique_id ) : newid = max ( PolygonFilter . _instance_counter , unique_id + 1 ) msg = "PolygonFilter with unique_id '{}' exists." . format ( unique_id ) msg += " Using new unique id '{}'." . format ( newid ) warnings . warn ( msg , FilterIdExistsWarning ) unique_id = newid ic = max ( PolygonFilter . _instance_counter , unique_id + 1 ) PolygonFilter . _instance_counter = ic self . unique_id = unique_id
Define a unique id
48,787
def copy ( self , invert = False ) : if invert : inverted = not self . inverted else : inverted = self . inverted return PolygonFilter ( axes = self . axes , points = self . points , name = self . name , inverted = inverted )
Return a copy of the current instance
48,788
def filter ( self , datax , datay ) : f = np . ones ( datax . shape , dtype = bool ) for i , p in enumerate ( zip ( datax , datay ) ) : f [ i ] = PolygonFilter . point_in_poly ( p , self . points ) if self . inverted : np . invert ( f , f ) return f
Filter a set of datax and datay according to self . points
48,789
def get_instance_from_id ( unique_id ) : for instance in PolygonFilter . instances : if instance . unique_id == unique_id : return instance raise KeyError ( "PolygonFilter with unique_id {} not found." . format ( unique_id ) )
Get an instance of the PolygonFilter using a unique id
48,790
def import_all ( path ) : plist = [ ] fid = 0 while True : try : p = PolygonFilter ( filename = path , fileid = fid ) plist . append ( p ) fid += 1 except IndexError : break return plist
Import all polygons from a . poly file .
48,791
def point_in_poly ( p , poly ) : poly = np . array ( poly ) n = poly . shape [ 0 ] inside = False x , y = p if ( x <= poly [ : , 0 ] . max ( ) and x > poly [ : , 0 ] . min ( ) and y <= poly [ : , 1 ] . max ( ) and y > poly [ : , 1 ] . min ( ) ) : p1x , p1y = poly [ 0 ] for ii in range ( n ) : p2x , p2y = poly [ ( ii + 1 ) % n ] if ( y > min ( p1y , p2y ) and y <= max ( p1y , p2y ) and x <= max ( p1x , p2x ) ) : if p1x == p2x or x <= ( y - p1y ) * ( p2x - p1x ) / ( p2y - p1y ) + p1x : inside = not inside p1x , p1y = p2x , p2y return inside
Determine whether a point is within a polygon area
48,792
def remove ( unique_id ) : for p in PolygonFilter . instances : if p . unique_id == unique_id : PolygonFilter . instances . remove ( p )
Remove a polygon filter from PolygonFilter . instances
48,793
def save_all ( polyfile ) : nump = len ( PolygonFilter . instances ) if nump == 0 : raise PolygonFilterError ( "There are not polygon filters to save." ) for p in PolygonFilter . instances : polyobj = p . save ( polyfile , ret_fobj = True ) polyobj . close ( )
Save all polygon filters
48,794
def get_resource ( request ) : hash = request . matchdict [ 'hash' ] with db_connect ( ) as db_connection : with db_connection . cursor ( ) as cursor : args = dict ( hash = hash ) cursor . execute ( SQL [ 'get-resource' ] , args ) try : mimetype , file = cursor . fetchone ( ) except TypeError : raise httpexceptions . HTTPNotFound ( ) resp = request . response resp . status = "200 OK" resp . content_type = mimetype resp . body = file [ : ] return resp
Retrieve a file s data .
48,795
def get_style_bits ( match = False , comment = False , selected = False , data = False , diff = False , user = 0 ) : style_bits = 0 if user : style_bits |= ( user & user_bit_mask ) if diff : style_bits |= diff_bit_mask if match : style_bits |= match_bit_mask if comment : style_bits |= comment_bit_mask if data : style_bits |= ( data_style & user_bit_mask ) if selected : style_bits |= selected_bit_mask return style_bits
Return an int value that contains the specified style bits set .
48,796
def get_style_mask ( ** kwargs ) : bits = get_style_bits ( ** kwargs ) if 'user' in kwargs and kwargs [ 'user' ] : bits |= user_bit_mask else : bits &= ( 0xff ^ user_bit_mask ) return 0xff ^ bits
Get the bit mask that when anded with data will turn off the selected bits
48,797
def byte_bounds_offset ( self ) : if self . data . base is None : if self . is_indexed : basearray = self . data . np_data else : basearray = self . data return 0 , len ( basearray ) return int ( self . data_start - self . base_start ) , int ( self . data_end - self . base_start )
Return start and end offsets of this segment s data into the base array s data .
48,798
def get_raw_index ( self , i ) : if self . is_indexed : return int ( self . order [ i ] ) if self . data . base is None : return int ( i ) return int ( self . data_start - self . base_start + i )
Get index into base array s raw data given the index into this segment
48,799
def get_indexes_from_base ( self ) : if self . is_indexed : return np . copy ( self . order [ i ] ) if self . data . base is None : i = 0 else : i = self . get_raw_index ( 0 ) return np . arange ( i , i + len ( self ) , dtype = np . uint32 )
Get array of indexes from the base array as if this raw data were indexed .