idx int64 0 63k | question stringlengths 53 5.28k | target stringlengths 5 805 |
|---|---|---|
9,800 | def joint_hex ( x , y , ** kwargs ) : return sns . jointplot ( x , y , kind = 'hex' , stat_func = None , marginal_kws = { 'kde' : True } , ** kwargs ) | Seaborn Joint Hexplot with marginal KDE + hists . |
9,801 | def execute ( self , time_interval ) : with WorkflowMonitor ( self ) : for factor in self . factors [ : : - 1 ] : if factor . tool . name == "asset_writer" : factor . execute ( time_interval ) for factor in self . factors [ : : - 1 ] : if factor . sink is None or factor . sink . is_leaf and factor . tool . name != "asset_writer" : factor . execute ( time_interval ) | Here we execute the factors over the streams in the workflow Execute the factors in reverse order . We can t just execute the last factor because there may be multiple leaf factors that aren t triggered by upstream computations . |
9,802 | def _add_node ( self , node ) : self . nodes [ node . node_id ] = node logging . info ( "Added node with id {} containing {} streams" . format ( node . node_id , len ( node . streams ) ) ) | Add a node to the workflow |
9,803 | def _add_factor ( self , factor ) : self . factors . append ( factor ) logging . info ( "Added factor with tool {} " . format ( factor . tool ) ) | Add a factor to the workflow |
9,804 | def create_factor_general ( self , * args , ** kwargs ) : try : return self . create_factor ( * args , ** kwargs ) except TypeError : pass try : return self . create_multi_output_factor ( * args , ** kwargs ) except TypeError : pass try : return self . create_node_creation_factor ( * args , ** kwargs ) except TypeError : pass raise FactorDefinitionError ( "Could not find a matching signature" ) | General signature for factor creation that tries each of the factor creation types using duck typing |
9,805 | def create_multi_output_factor ( self , tool , source , splitting_node , sink ) : if source and not isinstance ( source , Node ) : raise ValueError ( "Expected Node, got {}" . format ( type ( source ) ) ) if not isinstance ( sink , Node ) : raise ValueError ( "Expected Node, got {}" . format ( type ( sink ) ) ) if not isinstance ( tool , MultiOutputTool ) : raise ValueError ( "Expected MultiOutputTool, got {}" . format ( type ( tool ) ) ) input_plates = source . plates if source else [ ] output_plates = sink . plates if len ( input_plates ) > 1 : raise NotImplementedError if len ( output_plates ) == 0 : raise ValueError ( "No output plate found" ) if len ( output_plates ) == 1 : if not self . check_multi_output_plate_compatibility ( input_plates , output_plates [ 0 ] ) : raise IncompatiblePlatesError ( "Parent plate does not match input plate" ) factor = MultiOutputFactor ( tool = tool , source_node = source , splitting_node = splitting_node , sink_node = sink , input_plate = input_plates [ 0 ] if input_plates else None , output_plates = output_plates [ 0 ] ) else : if len ( output_plates ) > 2 : raise NotImplementedError if len ( input_plates ) != 1 : raise IncompatiblePlatesError ( "Require an input plate to match all but one of the output plates" ) if output_plates [ 0 ] == input_plates [ 0 ] : output_plate = output_plates [ 1 ] else : if output_plates [ 1 ] . plate_id != input_plates [ 0 ] . plate_id : raise IncompatiblePlatesError ( "Require an input plate to match all but one of the output plates" ) output_plate = output_plates [ 0 ] output_plates [ 1 ] , output_plates [ 0 ] = output_plates [ 0 ] , output_plates [ 1 ] if not output_plate . is_root : match = False parent = input_plates [ 0 ] . parent while parent is not None : if parent . plate_id == output_plate . parent . plate_id : match = True break parent = parent . parent if not match : raise IncompatiblePlatesError ( "Require an input plate to match all but one of the output plates" ) factor = MultiOutputFactor ( tool = tool , source_node = source , sink_node = sink , splitting_node = splitting_node , input_plate = input_plates [ 0 ] , output_plates = output_plates ) self . _add_factor ( factor ) return factor | Creates a multi - output factor . This takes a single node applies a MultiOutputTool to create multiple nodes on a new plate Instantiates a single tool for all of the input plate values and connects the source and sink nodes with that tool . |
9,806 | def create_node_creation_factor ( self , tool , source , output_plate , plate_manager ) : if not isinstance ( tool , PlateCreationTool ) : raise ValueError ( "Expected PlateCreationTool, got {}" . format ( type ( tool ) ) ) input_plates = source . plates if source else [ ] if len ( input_plates ) > 1 : raise NotImplementedError factor = NodeCreationFactor ( tool = tool , source_node = source , input_plate = input_plates [ 0 ] if input_plates else None , output_plate = output_plate , plate_manager = plate_manager ) self . _add_factor ( factor ) return factor | Creates a factor that itself creates an output node and ensures that the plate for the output node exists along with all relevant meta - data |
9,807 | def check_plate_compatibility ( tool , source_plate , sink_plate ) : if sink_plate == source_plate . parent : return None if sink_plate . meta_data_id == source_plate . meta_data_id : if sink_plate . is_sub_plate ( source_plate ) : return None return "Sink plate {} is not a simplification of source plate {}" . format ( sink_plate . plate_id , source_plate . plate_id ) meta_data_diff = set ( source_plate . ancestor_meta_data_ids ) - set ( sink_plate . ancestor_meta_data_ids ) if len ( meta_data_diff ) == 1 : if tool . aggregation_meta_data not in meta_data_diff : return "Aggregate tool meta data ({}) " "does not match the diff between source and sink plates ({})" . format ( tool . aggregation_meta_data , list ( meta_data_diff ) [ 0 ] ) else : return "{} not in source's parent plates" . format ( sink_plate . plate_id ) | Checks whether the source and sink plate are compatible given the tool |
9,808 | def check_multi_output_plate_compatibility ( source_plates , sink_plate ) : if len ( source_plates ) == 0 : if sink_plate . parent is not None : return False else : if sink_plate . parent is None : return False else : if sink_plate . parent . plate_id != source_plates [ 0 ] . plate_id : return False return True | Check multi - output plate compatibility . This ensures that the source plates and sink plates match for a multi - output plate |
9,809 | def to_dict ( self , tool_long_names = True ) : d = dict ( nodes = [ ] , factors = [ ] , plates = defaultdict ( list ) ) for node in self . nodes : node_id = self . nodes [ node ] . node_id d [ 'nodes' ] . append ( { 'id' : node_id } ) for plate_id in self . nodes [ node ] . plate_ids : d [ 'plates' ] [ plate_id ] . append ( { 'id' : node_id , 'type' : 'node' } ) for factor in self . factors : tool = str ( factor . tool ) if tool_long_names else factor . tool . name try : sources = [ s . node_id for s in factor . sources ] except AttributeError : if factor . source : sources = [ factor . source . node_id ] else : sources = [ ] d [ 'factors' ] . append ( { 'id' : tool , 'sources' : sources , 'sink' : factor . sink . node_id } ) try : if factor . plates : for plate in factor . plates : d [ 'plates' ] [ plate . plate_id ] . append ( { 'id' : tool , 'type' : 'factor' } ) else : d [ 'plates' ] [ 'root' ] . append ( { 'id' : tool , 'type' : 'factor' } ) except AttributeError : pass d [ 'plates' ] = dict ( d [ 'plates' ] ) return d | Get a representation of the workflow as a dictionary for display purposes |
9,810 | def to_json ( self , formatter = None , tool_long_names = True , ** kwargs ) : d = self . to_dict ( tool_long_names = tool_long_names ) if formatter : d = formatter ( d ) return json . dumps ( d , ** kwargs ) | Get a JSON representation of the workflow |
9,811 | def parameters_dict ( self ) : d = { } for k , v in self . __dict__ . items ( ) : if not k . startswith ( "_" ) : d [ k ] = v return d | Get the tool parameters as a simple dictionary |
9,812 | def parameters ( self ) : parameters = [ ] for k , v in self . __dict__ . items ( ) : if k . startswith ( "_" ) : continue is_function = False is_set = False if callable ( v ) : value = pickle . dumps ( func_dump ( v ) ) is_function = True elif isinstance ( v , set ) : value = list ( v ) is_set = True else : value = v parameters . append ( dict ( key = k , value = value , is_function = is_function , is_set = is_set ) ) return parameters | Get the tool parameters |
9,813 | def parameters_from_model ( parameters_model ) : parameters = { } for p in parameters_model : if p . is_function : code , defaults , closure = pickle . loads ( p . value ) parameters [ p . key ] = func_load ( code , defaults , closure , globs = globals ( ) ) elif p . is_set : parameters [ p . key ] = set ( p . value ) else : parameters [ p . key ] = p . value return parameters | Get the tool parameters model from dictionaries |
9,814 | def get_model ( self ) : return ToolModel ( name = self . name , version = "0.0.0" , parameters = self . parameters_from_dicts ( self . parameters ) ) | Gets the mongoengine model for this tool which serializes parameters that are functions |
9,815 | def write_to_history ( ** kwargs ) : from hyperstream import HyperStream hs = HyperStream ( loglevel = logging . CRITICAL , file_logger = False , console_logger = False , mqtt_logger = None ) if hs . current_session : hs . current_session . write_to_history ( ** kwargs ) | Write to the history of executions of this tool |
9,816 | def plot_dom_parameters ( data , detector , filename , label , title , vmin = 0.0 , vmax = 10.0 , cmap = 'RdYlGn_r' , under = 'deepskyblue' , over = 'deeppink' , underfactor = 1.0 , overfactor = 1.0 , missing = 'lightgray' , hide_limits = False ) : x , y , _ = zip ( * detector . doms . values ( ) ) fig , ax = plt . subplots ( figsize = ( 10 , 6 ) ) cmap = plt . get_cmap ( cmap ) cmap . set_over ( over , 1.0 ) cmap . set_under ( under , 1.0 ) m_size = 100 scatter_args = { 'edgecolors' : 'None' , 'vmin' : vmin , 'vmax' : vmax , } sc_inactive = ax . scatter ( x , y , c = missing , label = 'missing' , s = m_size * 0.9 , ** scatter_args ) xa , ya = map ( np . array , zip ( * data . keys ( ) ) ) zs = np . array ( list ( data . values ( ) ) ) in_range_idx = np . logical_and ( zs >= vmin , zs <= vmax ) sc = ax . scatter ( xa [ in_range_idx ] , ya [ in_range_idx ] , c = zs [ in_range_idx ] , cmap = cmap , s = m_size , ** scatter_args ) if not hide_limits : under_idx = zs < vmin ax . scatter ( xa [ under_idx ] , ya [ under_idx ] , c = under , label = '< {0}' . format ( vmin ) , s = m_size * underfactor , ** scatter_args ) over_idx = zs > vmax ax . scatter ( xa [ over_idx ] , ya [ over_idx ] , c = over , label = '> {0}' . format ( vmax ) , s = m_size * overfactor , ** scatter_args ) cb = plt . colorbar ( sc ) cb . set_label ( label ) ax . set_title ( "{0}\n{1} UTC" . format ( title , datetime . utcnow ( ) . strftime ( "%c" ) ) ) ax . set_xlabel ( "DU" ) ax . set_ylabel ( "DOM" ) ax . set_ylim ( - 2 ) ax . set_yticks ( range ( 1 , 18 + 1 ) ) major_locator = pylab . MaxNLocator ( integer = True ) sc_inactive . axes . xaxis . set_major_locator ( major_locator ) ax . legend ( bbox_to_anchor = ( 0. , - .16 , 1. , .102 ) , loc = 1 , ncol = 2 , mode = "expand" , borderaxespad = 0. ) fig . tight_layout ( ) plt . savefig ( filename , dpi = 120 , bbox_inches = "tight" ) plt . close ( 'all' ) | Creates a plot in the classical monitoring . km3net . de style . |
9,817 | def make_dom_map ( pmt_directions , values , nside = 512 , d = 0.2 , smoothing = 0.1 ) : import healpy as hp discs = [ hp . query_disc ( nside , dir , 0.2 ) for dir in pmt_directions ] npix = hp . nside2npix ( nside ) pixels = np . zeros ( npix ) for disc , value in zip ( discs , values ) : for d in disc : pixels [ d ] = value if smoothing > 0 : return hp . sphtfunc . smoothing ( pixels , fwhm = smoothing , iter = 1 ) return pixels | Create a mollweide projection of a DOM with given PMTs . |
9,818 | def calculated_intervals ( self , value ) : if not value : self . _calculated_intervals = TimeIntervals ( ) return if isinstance ( value , TimeInterval ) : value = TimeIntervals ( [ value ] ) elif isinstance ( value , TimeIntervals ) : pass elif isinstance ( value , list ) : value = TimeIntervals ( value ) else : raise TypeError ( "Expected list/TimeInterval/TimeIntervals, got {}" . format ( type ( value ) ) ) for interval in value : if interval . end > utcnow ( ) : raise ValueError ( "Calculated intervals should not be in the future" ) self . _calculated_intervals = value | Set the calculated intervals This will be written to the stream_status collection if it s in the database channel |
9,819 | def purge ( self ) : self . channel . purge_stream ( self . stream_id , remove_definition = False , sandbox = None ) | Purge the stream . This removes all data and clears the calculated intervals |
9,820 | def window ( self , time_interval = None , force_calculation = False ) : if not time_interval : if self . calculated_intervals : time_interval = self . calculated_intervals [ - 1 ] else : raise ValueError ( "No calculations have been performed and no time interval was provided" ) elif isinstance ( time_interval , TimeInterval ) : time_interval = TimeInterval ( time_interval . start , time_interval . end ) elif isinstance ( time_interval , Iterable ) : time_interval = parse_time_tuple ( * time_interval ) if isinstance ( time_interval , RelativeTimeInterval ) : raise NotImplementedError elif isinstance ( time_interval , RelativeTimeInterval ) : raise NotImplementedError else : raise TypeError ( "Expected TimeInterval or (start, end) tuple of type str or datetime, got {}" . format ( type ( time_interval ) ) ) return StreamView ( stream = self , time_interval = time_interval , force_calculation = force_calculation ) | Gets a view on this stream for the time interval given |
9,821 | def load ( self ) : with switch_db ( StreamDefinitionModel , 'hyperstream' ) : self . mongo_model = StreamDefinitionModel . objects . get ( __raw__ = self . stream_id . as_raw ( ) ) self . _calculated_intervals = self . mongo_model . get_calculated_intervals ( ) | Load the stream definition from the database |
9,822 | def calculated_intervals ( self ) : if self . _calculated_intervals is None : logging . debug ( "get calculated intervals" ) self . load ( ) return self . mongo_model . get_calculated_intervals ( ) return self . _calculated_intervals | Gets the calculated intervals from the database |
9,823 | def GenericPump ( filenames , use_jppy = False , name = "GenericPump" , ** kwargs ) : if isinstance ( filenames , str ) : filenames = [ filenames ] try : iter ( filenames ) except TypeError : log . critical ( "Don't know how to iterate through filenames." ) raise TypeError ( "Invalid filenames." ) extensions = set ( os . path . splitext ( fn ) [ 1 ] for fn in filenames ) if len ( extensions ) > 1 : log . critical ( "Mixed filetypes, please use only files of the same type" ) raise IOError ( "Mixed filetypes." ) extension = list ( extensions ) [ 0 ] io = { '.evt' : EvtPump , '.h5' : HDF5Pump , '.root' : EventPump if use_jppy else AanetPump , '.dat' : DAQPump , '.dqd' : CLBPump , } if extension not in io : log . critical ( "No pump found for file extension '{0}'" . format ( extension ) ) raise ValueError ( "Unknown filetype" ) missing_files = [ fn for fn in filenames if not os . path . exists ( fn ) ] if missing_files : if len ( missing_files ) == len ( filenames ) : message = "None of the given files could be found." log . critical ( message ) raise SystemExit ( message ) else : log . warning ( "The following files are missing and ignored: {}" . format ( ', ' . join ( missing_files ) ) ) input_files = set ( filenames ) - set ( missing_files ) if len ( input_files ) == 1 : return io [ extension ] ( filename = filenames [ 0 ] , name = name , ** kwargs ) else : return io [ extension ] ( filenames = filenames , name = name , ** kwargs ) | A generic pump which utilises the appropriate pump . |
9,824 | def read_calibration ( detx = None , det_id = None , from_file = False , det_id_table = None ) : from km3pipe . calib import Calibration if not ( detx or det_id or from_file ) : return None if detx is not None : return Calibration ( filename = detx ) if from_file : det_ids = np . unique ( det_id_table ) if len ( det_ids ) > 1 : log . critical ( "Multiple detector IDs found in events." ) det_id = det_ids [ 0 ] if det_id is not None : if det_id < 0 : log . warning ( "Negative detector ID found ({0}). This is a MC " "detector and cannot be retrieved from the DB." . format ( det_id ) ) return None return Calibration ( det_id = det_id ) return None | Retrive calibration from file the DB . |
9,825 | def edit ( self , text ) : if isinstance ( text , unicode ) : text = text . encode ( self . _encoding ) if self . _editor is None : printer . p ( 'Warning: no editor found, skipping edit' ) return text with tempfile . NamedTemporaryFile ( mode = 'w+' , suffix = 'kolekto-edit' ) as ftmp : ftmp . write ( text ) ftmp . flush ( ) subprocess . Popen ( [ self . _editor , ftmp . name ] ) . wait ( ) ftmp . seek ( 0 ) edited = ftmp . read ( ) return edited | Edit a text using an external editor . |
9,826 | def register ( self , plugin ) : self . needed_listeners -= plugin . listeners self . needed_messengers -= plugin . messengers if self . needed_messengers == self . needed_listeners == set ( ) : self . valid = True self . dispatcher . register ( plugin ) | Take a feather . plugin . Plugin and tell our dispatcher about it . |
9,827 | def start ( self ) : if not self . valid : err = ( "\nMessengers and listeners that still need set:\n\n" "messengers : %s\n\n" "listeners : %s\n" ) raise InvalidApplication ( err % ( self . needed_messengers , self . needed_listeners ) ) self . dispatcher . start ( ) | If we have a set of plugins that provide our expected listeners and messengers tell our dispatcher to start up . Otherwise raise InvalidApplication |
9,828 | def execute_condition ( cond ) : condition_method = 'rulengine.conditions.c_{0}_{1}' . format ( cond . data_type , cond . operator ) try : func = import_class ( condition_method ) except AttributeError : condition_method = 'rulengine.conditions.c_{0}' . format ( cond . data_type ) func = import_class ( condition_method ) executable_cond = convert_condition_to_executable ( cond ) return func ( executable_cond ) | Get a rule instance for given operator and return condition lambda func |
9,829 | def makemigrations ( self ) : UNCHANGED = [ ] with Session ( self . settings ) as conn : cursor = conn . cursor ( ) for name , model in self . models . items ( ) : print ( "Running migrations... on table: %s" % model . __name__ . lower ( ) ) columns = self . description ( model ) table = name . lower ( ) QUERY = "CREATE TABLE IF NOT EXISTS %s (" % table for field , FieldType in model . columns . items ( ) : QUERY += "%s %s, " % ( field , FieldType ) if columns : self . UpdateColums ( cursor , field , FieldType , model , columns , UNCHANGED ) QUERY = QUERY [ : - 2 ] + ") ENGINE=InnoDB" print ( QUERY ) try : cursor . execute ( QUERY ) except mysql . Error as e : raise e return True | Do database migrations 1 . Creates new tables from models 2 . Updates columns and columns |
9,830 | def UpdateColums ( self , cursor , field , FieldType , model , columns , UNCHANGED ) : table = model . __name__ . lower ( ) if field not in columns : n = UNCHANGED . pop ( ) new_sql = f"ALTER TABLE {table} ADD COLUMN {field} {FieldType} AFTER {n}" cursor . execute ( new_sql ) print ( "\n\n" , new_sql ) else : UNCHANGED . append ( field ) TCOLS = set ( columns ) - set ( model . _fields ) for col in TCOLS : columns . remove ( col ) QRY = f"ALTER TABLE {table} DROP COLUMN {col}" cursor . execute ( QRY ) print ( "\n\n" , QRY ) return True | Updates the columns . Dont call directly |
9,831 | def srv_event ( token , hits , url = RBA_URL ) : if url is None : log . error ( "Please provide a valid RainbowAlga URL." ) return ws_url = url + '/message' if isinstance ( hits , pd . core . frame . DataFrame ) : pos = [ tuple ( x ) for x in hits [ [ 'x' , 'y' , 'z' ] ] . values ] time = list ( hits [ 'time' ] ) tot = list ( hits [ 'tot' ] ) elif isinstance ( hits , Table ) : pos = list ( zip ( hits . pos_x , hits . pos_y , hits . pos_z ) ) time = list ( hits . time ) tot = list ( hits . tot ) else : log . error ( "No calibration information found in hits (type: {0})" . format ( type ( hits ) ) ) return event = { "hits" : { 'pos' : pos , 'time' : time , 'tot' : tot , } } srv_data ( ws_url , token , event , 'event' ) | Serve event to RainbowAlga |
9,832 | def srv_data ( url , token , data , kind ) : ws = websocket . create_connection ( url ) message = { 'token' : token , 'data' : data , 'kind' : kind } ws . send ( pd . io . json . dumps ( message ) ) ws . close ( ) | Serve data to RainbowAlga |
9,833 | def raw_message_to ( self , token , message ) : if token not in self . _clients : log . critical ( "Client with token '{0}' not found!" . format ( token ) ) return client = self . _clients [ token ] try : client . write_message ( message ) except ( AttributeError , tornado . websocket . WebSocketClosedError ) : log . error ( "Lost connection to client '{0}'" . format ( client ) ) else : print ( "Sent {0} bytes." . format ( len ( message ) ) ) | Convert message to JSON and send it to the client with token |
9,834 | def message ( self , data , kind = "info" ) : message = pd . io . json . dumps ( { 'kind' : kind , 'data' : data } ) print ( "Sent {0} bytes." . format ( len ( message ) ) ) self . write_message ( message ) | Convert message to json and send it to the clients |
9,835 | def execute_once ( self , string ) : for rule in self . rules : if rule [ 0 ] in string : pos = string . find ( rule [ 0 ] ) self . last_rule = rule return string [ : pos ] + rule [ 1 ] + string [ pos + len ( rule [ 0 ] ) : ] self . last_rule = None return string | Execute only one rule . |
9,836 | def compile ( self ) : result = TEMPLATE for rule in self . rules : if rule [ 2 ] : arrow = '=>' else : arrow = '->' repr_rule = repr ( rule [ 0 ] + arrow + rule [ 1 ] ) result += "algo.add_rule({repr_rule})\n" . format ( repr_rule = repr_rule ) result += "for line in stdin:\n" result += " print(algo.execute(''.join(line.split())))" return result | Return python code for create and execute algo . |
9,837 | def get_sources ( self , plate , plate_value , sources = None ) : if sources is None : sources = [ ] if self . sources : for si , source in enumerate ( self . sources ) : if len ( source . streams ) == 1 and None in source . streams : sources . append ( source . streams [ None ] ) elif plate_value in source . streams : sources . append ( source . streams [ plate_value ] ) else : pass if not plate . is_root : parent_plate_value = tuple ( pv for pv in plate_value if pv [ 0 ] != plate . meta_data_id ) sources = self . get_sources ( plate . parent , parent_plate_value , sources ) return sources | Gets the source streams for a given plate value on a plate . Also populates with source streams that are valid for the parent plates of this plate with the appropriate meta - data for the parent plate . |
9,838 | def get_global_sources ( self ) : sources = [ ] if self . sources : for source in self . sources : if None in source . streams : sources . append ( source . streams [ None ] ) return sources | Gets streams that live outside of the plates |
9,839 | def get_alignment_stream ( self , plate = None , plate_value = None ) : if not self . alignment_node : return None if plate is not None or plate_value is not None : raise NotImplementedError ( "Currently only alignment nodes outside of plates are supported" ) return self . alignment_node . streams [ plate ] | Gets the alignment stream for a particular plate value |
9,840 | def get_splitting_stream ( self , input_plate_value ) : if not self . splitting_node : return None if len ( self . splitting_node . plates ) == 0 : return self . splitting_node . streams [ None ] if len ( self . splitting_node . plates ) > 1 : raise ValueError ( "Splitting node cannot live on multiple plates for factor {}" . format ( self . factor_id ) ) if not self . input_plate and len ( self . splitting_node . plates ) > 0 : raise ValueError ( "Splitting node cannot live on a plate if there is no input plate" ) splitting_plate = self . splitting_node . plates [ 0 ] if self . input_plate == splitting_plate : splitting_stream = self . splitting_node . streams [ input_plate_value ] else : if splitting_plate . is_child ( self . input_plate ) : ppv = filter ( lambda x : all ( p in input_plate_value for p in x ) , self . input_plate . parent . values ) if len ( ppv ) != 1 : raise ValueError ( "Parent plate value not found" ) splitting_stream = self . splitting_node . streams [ ppv [ 0 ] ] elif splitting_plate . is_descendant ( self . input_plate ) : ppv = filter ( lambda x : all ( p in input_plate_value for p in x ) , self . input_plate . parent . values ) if len ( ppv ) != 1 : raise ValueError ( "Parent plate value not found" ) splitting_stream = self . splitting_node . streams [ ppv ] else : raise IncompatiblePlatesError ( "Splitting node plate {} does not match input plate {} for factor {}" . format ( self . input_plate , self . splitting_node . plates [ 0 ] , self . factor_id ) ) return splitting_stream | Get the splitting stream |
9,841 | def update_computed_intervals ( sinks , time_interval ) : for sink in sinks : sink . calculated_intervals += time_interval required_intervals = TimeIntervals ( [ time_interval ] ) - sink . calculated_intervals if not required_intervals . is_empty : raise RuntimeError ( 'Tool execution did not cover the time interval {}' . format ( required_intervals ) ) | Update computed intervals |
9,842 | def getEvoBibAsBibtex ( * keys , ** kw ) : res = [ ] for key in keys : bib = get_url ( "http://bibliography.lingpy.org/raw.php?key=" + key , log = kw . get ( 'log' ) ) . text try : res . append ( '@' + bib . split ( '@' ) [ 1 ] . split ( '</pre>' ) [ 0 ] ) except IndexError : res . append ( '@misc{' + key + ',\nNote={missing source}\n\n}' ) return '\n\n' . join ( res ) | Download bibtex format and parse it from EvoBib |
9,843 | def download_and_unpack ( self , url , * paths , ** kw ) : with self . temp_download ( url , 'ds.zip' , log = kw . pop ( 'log' , None ) ) as zipp : with TemporaryDirectory ( ) as tmpdir : with zipfile . ZipFile ( zipp . as_posix ( ) ) as zipf : for path in paths : zipf . extract ( as_posix ( path ) , path = tmpdir . as_posix ( ) ) copy ( tmpdir . joinpath ( path ) , self ) | Download a zipfile and immediately unpack selected content . |
9,844 | def getRanking ( self , profile , sampleFileName = None ) : if sampleFileName != None : candScoresMap = self . getCandScoresMapFromSamplesFile ( profile , sampleFileName ) else : candScoresMap = self . getCandScoresMap ( profile ) reverseCandScoresMap = dict ( ) for key , value in candScoresMap . items ( ) : if value not in reverseCandScoresMap . keys ( ) : reverseCandScoresMap [ value ] = [ key ] else : reverseCandScoresMap [ value ] . append ( key ) if self . maximizeCandScore == True : sortedCandScores = sorted ( reverseCandScoresMap . keys ( ) , reverse = True ) else : sortedCandScores = sorted ( reverseCandScoresMap . keys ( ) ) ranking = [ ] for candScore in sortedCandScores : for cand in reverseCandScoresMap [ candScore ] : ranking . append ( cand ) return ranking | Returns a list of lists that orders all candidates in tiers from best to worst when we use MCMC approximation to compute Bayesian utilities for an election profile . |
9,845 | def getCandScoresMap ( self , profile ) : wmg = profile . getWmg ( True ) V = self . getInitialSample ( wmg ) utilities = dict ( ) for cand in profile . candMap . keys ( ) : utilities [ cand ] = 0.0 for i in range ( 0 , self . burnIn ) : V = self . sampleGenerator . getNextSample ( V ) for i in range ( 0 , self . n2 ) : for j in range ( 0 , self . n1 ) : V = self . sampleGenerator . getNextSample ( V ) for cand in profile . candMap . keys ( ) : utilities [ cand ] += self . utilityFunction . getUtility ( [ cand ] , V ) for cand in profile . candMap . keys ( ) : utilities [ cand ] = utilities [ cand ] / self . n2 return utilities | Returns a dictonary that associates the integer representation of each candidate with the Bayesian utilities we approximate from our sampling of the profile . |
9,846 | def getCandScoresMapFromSamplesFile ( self , profile , sampleFileName ) : wmg = profile . getWmg ( True ) utilities = dict ( ) for cand in wmg . keys ( ) : utilities [ cand ] = 0.0 sampleFile = open ( sampleFileName ) for i in range ( 0 , SAMPLESFILEMETADATALINECOUNT ) : sampleFile . readline ( ) for i in range ( 0 , self . burnIn ) : sampleFile . readline ( ) numSamples = 0 for i in range ( 0 , self . n2 * self . n1 ) : line = sampleFile . readline ( ) if i % self . n1 != 0 : continue sample = json . loads ( line ) for cand in wmg . keys ( ) : utilities [ cand ] += self . utilityFunction . getUtility ( [ cand ] , sample ) numSamples += 1 sampleFile . close ( ) for key in utilities . keys ( ) : utilities [ key ] = utilities [ key ] / numSamples return utilities | Returns a dictonary that associates the integer representation of each candidate with the Bayesian utilities we approximate from the samples we generated into a file . |
9,847 | def printMcmcSamplesToFile ( self , profile , numSamples , outFileName ) : wmg = profile . getWmg ( True ) V = self . getInitialSample ( wmg ) outFile = open ( outFileName , 'w' ) outFile . write ( "m," + str ( profile . numCands ) + '\n' ) outFile . write ( "phi," + str ( self . phi ) + '\n' ) outFile . write ( "numSamples," + str ( numSamples ) ) for i in range ( 0 , numSamples ) : V = self . sampleGenerator . getNextSample ( V ) outFile . write ( "\n" + json . dumps ( V ) ) outFile . close ( ) | Generate samples to a file . |
9,848 | def kendallTau ( self , orderVector , wmgMap ) : discordantPairs = 0.0 for i in itertools . combinations ( orderVector , 2 ) : discordantPairs = discordantPairs + max ( 0 , wmgMap [ i [ 1 ] ] [ i [ 0 ] ] ) return discordantPairs | Given a ranking for a single vote and a wmg for the entire election calculate the kendall - tau distance . a . k . a the number of discordant pairs between the wmg for the vote and the wmg for the election . Currently we expect the vote to be a strict complete ordering over the candidates . |
9,849 | def getInitialSample ( self , wmg ) : V = copy . deepcopy ( wmg . keys ( ) ) random . shuffle ( V ) return V | Generate an initial sample for the Markov chain . This function will return a list containing integer representations of each candidate in order of their rank in the current vote from first to last . The list will be a complete strict ordering over the candidates . Initially we rank the candidates in random order . |
9,850 | def getInitialSample ( self , wmg ) : cands = range ( len ( wmg ) ) allPairs = itertools . combinations ( cands , 2 ) V = self . createBinaryRelation ( len ( cands ) ) for pair in allPairs : if wmg [ pair [ 0 ] + 1 ] [ pair [ 1 ] + 1 ] > 0 : V [ pair [ 0 ] ] [ pair [ 1 ] ] = 1 V [ pair [ 1 ] ] [ pair [ 0 ] ] = 0 else : V [ pair [ 0 ] ] [ pair [ 1 ] ] = 0 V [ pair [ 1 ] ] [ pair [ 0 ] ] = 1 return V | Generate an initial sample for the Markov chain . This function will return a two - dimensional array of integers such that for each pair of candidates cand1 and cand2 the array contains 1 if more votes rank cand1 above cand2 and 0 otherwise . |
9,851 | def filter_input ( keys , raw ) : if len ( keys ) == 1 : if keys [ 0 ] in UI . keys [ 'up' ] : keys [ 0 ] = 'up' elif keys [ 0 ] in UI . keys [ 'down' ] : keys [ 0 ] = 'down' elif len ( keys [ 0 ] ) == 4 and keys [ 0 ] [ 0 ] == 'mouse press' : if keys [ 0 ] [ 1 ] == 4 : keys [ 0 ] = 'up' elif keys [ 0 ] [ 1 ] == 5 : keys [ 0 ] = 'down' return keys | Adds fancy mouse wheel functionality and VI navigation to ListBox |
9,852 | def wordlist2cognates ( wordlist , source , expert = 'expert' , ref = 'cogid' ) : for k in wordlist : yield dict ( Form_ID = wordlist [ k , 'lid' ] , ID = k , Form = wordlist [ k , 'ipa' ] , Cognateset_ID = '{0}-{1}' . format ( slug ( wordlist [ k , 'concept' ] ) , wordlist [ k , ref ] ) , Cognate_Detection_Method = expert , Source = source ) | Turn a wordlist into a cognate set list using the cldf parameters . |
9,853 | def _cldf2wld ( dataset ) : header = [ f for f in dataset . dataset . lexeme_class . fieldnames ( ) if f != 'ID' ] D = { 0 : [ 'lid' ] + [ h . lower ( ) for h in header ] } for idx , row in enumerate ( dataset . objects [ 'FormTable' ] ) : row = deepcopy ( row ) row [ 'Segments' ] = ' ' . join ( row [ 'Segments' ] ) D [ idx + 1 ] = [ row [ 'ID' ] ] + [ row [ h ] for h in header ] return D | Make lingpy - compatible dictinary out of cldf main data . |
9,854 | def _cldf2lexstat ( dataset , segments = 'segments' , transcription = 'value' , row = 'parameter_id' , col = 'language_id' ) : D = _cldf2wld ( dataset ) return lingpy . LexStat ( D , segments = segments , transcription = transcription , row = row , col = col ) | Read LexStat object from cldf dataset . |
9,855 | def _cldf2wordlist ( dataset , row = 'parameter_id' , col = 'language_id' ) : return lingpy . Wordlist ( _cldf2wld ( dataset ) , row = row , col = col ) | Read worldist object from cldf dataset . |
9,856 | def iter_cognates ( dataset , column = 'Segments' , method = 'turchin' , threshold = 0.5 , ** kw ) : if method == 'turchin' : for row in dataset . objects [ 'FormTable' ] : sounds = '' . join ( lingpy . tokens2class ( row [ column ] , 'dolgo' ) ) if sounds . startswith ( 'V' ) : sounds = 'H' + sounds sounds = '-' . join ( [ s for s in sounds if s != 'V' ] [ : 2 ] ) cogid = slug ( row [ 'Parameter_ID' ] ) + '-' + sounds if '0' not in sounds : yield dict ( Form_ID = row [ 'ID' ] , Form = row [ 'Value' ] , Cognateset_ID = cogid , Cognate_Detection_Method = 'CMM' ) if method in [ 'sca' , 'lexstat' ] : lex = _cldf2lexstat ( dataset ) if method == 'lexstat' : lex . get_scorer ( ** kw ) lex . cluster ( method = method , threshold = threshold , ref = 'cogid' ) for k in lex : yield Cognate ( Form_ID = lex [ k , 'lid' ] , Form = lex [ k , 'value' ] , Cognateset_ID = lex [ k , 'cogid' ] , Cognate_Detection_Method = method + '-t{0:.2f}' . format ( threshold ) ) | Compute cognates automatically for a given dataset . |
9,857 | def iter_alignments ( dataset , cognate_sets , column = 'Segments' , method = 'library' ) : if not isinstance ( dataset , lingpy . basic . parser . QLCParser ) : wordlist = _cldf2wordlist ( dataset ) cognates = { r [ 'Form_ID' ] : r for r in cognate_sets } wordlist . add_entries ( 'cogid' , 'lid' , lambda x : cognates [ x ] [ 'Cognateset_ID' ] if x in cognates else 0 ) alm = lingpy . Alignments ( wordlist , ref = 'cogid' , row = 'parameter_id' , col = 'language_id' , segments = column . lower ( ) ) alm . align ( method = method ) for k in alm : if alm [ k , 'lid' ] in cognates : cognate = cognates [ alm [ k , 'lid' ] ] cognate [ 'Alignment' ] = alm [ k , 'alignment' ] cognate [ 'Alignment_Method' ] = method else : alm = lingpy . Alignments ( dataset , ref = 'cogid' ) alm . align ( method = method ) for cognate in cognate_sets : idx = cognate [ 'ID' ] or cognate [ 'Form_ID' ] cognate [ 'Alignment' ] = alm [ int ( idx ) , 'alignment' ] cognate [ 'Alignment_Method' ] = 'SCA-' + method | Function computes automatic alignments and writes them to file . |
9,858 | def tohdf5 ( input_files , output_file , n_events , conv_times_to_jte , ** kwargs ) : if len ( input_files ) > 1 : cprint ( "Preparing to convert {} files to HDF5." . format ( len ( input_files ) ) ) from km3pipe import Pipeline from km3pipe . io import GenericPump , HDF5Sink , HDF5MetaData for input_file in input_files : cprint ( "Converting '{}'..." . format ( input_file ) ) if len ( input_files ) > 1 : output_file = input_file + '.h5' meta_data = kwargs . copy ( ) meta_data [ 'origin' ] = input_file pipe = Pipeline ( ) pipe . attach ( HDF5MetaData , data = meta_data ) pipe . attach ( GenericPump , filenames = input_file , ** kwargs ) pipe . attach ( StatusBar , every = 250 ) if conv_times_to_jte : from km3modules . mc import MCTimeCorrector pipe . attach ( MCTimeCorrector ) pipe . attach ( HDF5Sink , filename = output_file , ** kwargs ) pipe . drain ( n_events ) cprint ( "File '{}' was converted." . format ( input_file ) ) | Convert Any file to HDF5 file |
9,859 | def update_channels ( self ) : logging . info ( "Updating channels" ) with switch_db ( StreamDefinitionModel , 'hyperstream' ) : for s in StreamDefinitionModel . objects ( ) : try : stream_id = StreamId ( name = s . stream_id . name , meta_data = s . stream_id . meta_data ) except AttributeError as e : raise e logging . debug ( "Processing {}" . format ( stream_id ) ) try : channel = self . get_channel ( s . channel_id ) except ChannelNotFoundError as e : logging . warn ( e ) continue last_accessed = utcnow ( ) last_updated = s . last_updated if s . last_updated else utcnow ( ) if stream_id in channel . streams : if isinstance ( channel , ( AssetsChannel , AssetsFileChannel ) ) : continue raise StreamAlreadyExistsError ( stream_id ) from . import MemoryChannel , DatabaseChannel if isinstance ( channel , MemoryChannel ) : channel . create_stream ( stream_id ) elif isinstance ( channel , DatabaseChannel ) : if channel == self . assets : stream_type = AssetStream else : stream_type = DatabaseStream channel . streams [ stream_id ] = stream_type ( channel = channel , stream_id = stream_id , calculated_intervals = None , last_accessed = last_accessed , last_updated = last_updated , sandbox = s . sandbox , mongo_model = s ) else : logging . warn ( "Unable to parse stream {}" . format ( stream_id ) ) | Pulls out all of the stream definitions from the database and populates the channels with stream references |
9,860 | def get_tool_class ( self , tool ) : if isinstance ( tool , string_types ) : tool_id = StreamId ( tool ) elif isinstance ( tool , StreamId ) : tool_id = tool else : raise TypeError ( tool ) tool_stream_view = None if tool_id in self . tools : tool_stream_view = self . tools [ tool_id ] . window ( ( MIN_DATE , self . tools . up_to_timestamp ) ) else : for tool_channel in self . tool_channels : if tool_channel == self . tools : continue if tool_id in tool_channel : tool_stream_view = tool_channel [ tool_id ] . window ( ( MIN_DATE , tool_channel . up_to_timestamp ) ) if tool_stream_view is None : raise ToolNotFoundError ( tool ) last = tool_stream_view . last ( ) if last is None : raise ToolNotFoundError ( tool ) return tool_stream_view . last ( ) . value | Gets the actual class which can then be instantiated with its parameters |
9,861 | def is_sub_plate ( self , other ) : if all ( v in set ( other . values ) for v in self . values ) : return True if all ( any ( all ( spv in m for spv in v ) for m in map ( set , other . values ) ) for v in self . values ) : return True if other in self . ancestor_plates : return True return False | Determines if this plate is a sub - plate of another plate - i . e . has the same meta data but a restricted set of values |
9,862 | def normalize_value ( value ) : value = str ( value ) value = value . casefold ( ) value = re . sub ( r'\/\s*\d+' , '' , value ) value = re . sub ( r'^0+([0-9]+)' , r'\1' , value ) value = re . sub ( r'^(\d+)\.+' , r'\1' , value ) value = re . sub ( r'[^\w\s]' , '' , value ) value = re . sub ( r'^the\s+' , '' , value ) value = re . sub ( r'^\s+' , '' , value ) value = re . sub ( r'\s+$' , '' , value ) value = re . sub ( r'\s+' , ' ' , value ) return value | Normalize metadata value to improve match accuracy . |
9,863 | def _init_from_file ( self , filename ) : if not filename . endswith ( "detx" ) : raise NotImplementedError ( 'Only the detx format is supported.' ) self . _open_file ( filename ) self . _extract_comments ( ) self . _parse_header ( ) self . _parse_doms ( ) self . _det_file . close ( ) | Create detector from detx file . |
9,864 | def _readline ( self , ignore_comments = True ) : while True : line = self . _det_file . readline ( ) if line == '' : return line line = line . strip ( ) if line == '' : continue if line . startswith ( '#' ) : if not ignore_comments : return line else : return line | The next line of the DETX file optionally ignores comments |
9,865 | def _extract_comments ( self ) : self . _det_file . seek ( 0 , 0 ) for line in self . _det_file . readlines ( ) : line = line . strip ( ) if line . startswith ( '#' ) : self . add_comment ( line [ 1 : ] ) | Retrieve all comments from the file |
9,866 | def _parse_header ( self ) : self . print ( "Parsing the DETX header" ) self . _det_file . seek ( 0 , 0 ) first_line = self . _readline ( ) try : self . det_id , self . n_doms = split ( first_line , int ) self . version = 'v1' except ValueError : det_id , self . version = first_line . split ( ) self . det_id = int ( det_id ) validity = self . _readline ( ) . strip ( ) self . valid_from , self . valid_until = split ( validity , float ) raw_utm_info = self . _readline ( ) . strip ( ) . split ( ' ' ) try : self . utm_info = UTMInfo ( * raw_utm_info [ 1 : ] ) except TypeError : log . warning ( "Missing UTM information." ) n_doms = self . _readline ( ) self . n_doms = int ( n_doms ) | Extract information from the header of the detector file |
9,867 | def dom_positions ( self ) : if not self . _dom_positions : for dom_id in self . dom_ids : mask = self . pmts . dom_id == dom_id pmt_pos = self . pmts [ mask ] . pos pmt_dir = self . pmts [ mask ] . dir centre = intersect_3d ( pmt_pos , pmt_pos - pmt_dir * 10 ) self . _dom_positions [ dom_id ] = centre return self . _dom_positions | The positions of the DOMs calculated from PMT directions . |
9,868 | def dom_table ( self ) : if self . _dom_table is None : data = defaultdict ( list ) for dom_id , ( du , floor , _ ) in self . doms . items ( ) : data [ 'dom_id' ] . append ( dom_id ) data [ 'du' ] . append ( du ) data [ 'floor' ] . append ( floor ) dom_position = self . dom_positions [ dom_id ] data [ 'pos_x' ] . append ( dom_position [ 0 ] ) data [ 'pos_y' ] . append ( dom_position [ 1 ] ) data [ 'pos_z' ] . append ( dom_position [ 2 ] ) self . _dom_table = Table ( data , name = 'DOMs' , h5loc = '/dom_table' ) return self . _dom_table | A Table containing DOM attributes |
9,869 | def com ( self ) : if self . _com is None : self . _com = np . mean ( self . pmts . pos , axis = 0 ) return self . _com | Center of mass calculated from the mean of the PMT positions |
9,870 | def xy_positions ( self ) : if self . _xy_positions is None or len ( self . _xy_positions ) == 0 : xy_pos = [ ] for dom_id , pos in self . dom_positions . items ( ) : if self . domid2floor ( dom_id ) == 1 : xy_pos . append ( np . array ( [ pos [ 0 ] , pos [ 1 ] ] ) ) self . _xy_positions = np . array ( xy_pos ) return self . _xy_positions | XY positions of the DUs given by the DOMs on floor 1 . |
9,871 | def translate_detector ( self , vector ) : vector = np . array ( vector , dtype = float ) self . pmts . pos_x += vector [ 0 ] self . pmts . pos_y += vector [ 1 ] self . pmts . pos_z += vector [ 2 ] self . reset_caches ( ) | Translate the detector by a given vector |
9,872 | def pmt_angles ( self ) : if self . _pmt_angles == [ ] : mask = ( self . pmts . du == 1 ) & ( self . pmts . floor == 1 ) self . _pmt_angles = self . pmts . dir [ mask ] return self . _pmt_angles | A list of PMT directions sorted by PMT channel on DU - 1 floor - 1 |
9,873 | def ascii ( self ) : comments = '' if self . version == 'v3' : for comment in self . comments : if not comment . startswith ( ' ' ) : comment = ' ' + comment comments += "#" + comment + "\n" if self . version == 'v1' : header = "{det.det_id} {det.n_doms}" . format ( det = self ) else : header = "{det.det_id} {det.version}" . format ( det = self ) header += "\n{0} {1}" . format ( self . valid_from , self . valid_until ) header += "\n" + str ( self . utm_info ) + "\n" header += str ( self . n_doms ) doms = "" for dom_id , ( line , floor , n_pmts ) in self . doms . items ( ) : doms += "{0} {1} {2} {3}\n" . format ( dom_id , line , floor , n_pmts ) for channel_id in range ( n_pmts ) : pmt_idx = self . _pmt_index_by_omkey [ ( line , floor , channel_id ) ] pmt = self . pmts [ pmt_idx ] doms += " {0} {1} {2} {3} {4} {5} {6} {7}" . format ( pmt . pmt_id , pmt . pos_x , pmt . pos_y , pmt . pos_z , pmt . dir_x , pmt . dir_y , pmt . dir_z , pmt . t0 ) if self . version == 'v3' : doms += " {0}" . format ( pmt . status ) doms += "\n" return comments + header + "\n" + doms | The ascii representation of the detector |
9,874 | def write ( self , filename ) : with open ( filename , 'w' ) as f : f . write ( self . ascii ) self . print ( "Detector file saved as '{0}'" . format ( filename ) ) | Save detx file . |
9,875 | def pmt_with_id ( self , pmt_id ) : try : return self . pmts [ self . _pmt_index_by_pmt_id [ pmt_id ] ] except KeyError : raise KeyError ( "No PMT found for ID: {0}" . format ( pmt_id ) ) | Get PMT with global pmt_id |
9,876 | def get_pmt ( self , dom_id , channel_id ) : du , floor , _ = self . doms [ dom_id ] pmt = self . pmts [ self . _pmt_index_by_omkey [ ( du , floor , channel_id ) ] ] return pmt | Return PMT with DOM ID and DAQ channel ID |
9,877 | def convert_mc_times_to_jte_times ( times_mc , evt_timestamp_in_ns , evt_mc_time ) : times_mc = np . array ( times_mc ) . astype ( float ) times_jte = times_mc - evt_timestamp_in_ns + evt_mc_time return times_jte | Function that converts MC times to JTE times . |
9,878 | def iexists ( irods_path ) : try : subprocess . check_output ( 'ils {}' . format ( irods_path ) , shell = True , stderr = subprocess . PIPE , ) return True except subprocess . CalledProcessError : return False | Returns True of iRODS path exists otherwise False |
9,879 | def token_urlsafe ( nbytes = 32 ) : tok = os . urandom ( nbytes ) return base64 . urlsafe_b64encode ( tok ) . rstrip ( b'=' ) . decode ( 'ascii' ) | Return a random URL - safe text string in Base64 encoding . |
9,880 | def prettyln ( text , fill = '-' , align = '^' , prefix = '[ ' , suffix = ' ]' , length = 69 ) : text = '{prefix}{0}{suffix}' . format ( text , prefix = prefix , suffix = suffix ) print ( "{0:{fill}{align}{length}}" . format ( text , fill = fill , align = align , length = length ) ) | Wrap text in a pretty line with maximum length . |
9,881 | def unpack_nfirst ( seq , nfirst ) : iterator = iter ( seq ) for _ in range ( nfirst ) : yield next ( iterator , None ) yield tuple ( iterator ) | Unpack the nfrist items from the list and return the rest . |
9,882 | def split ( string , callback = None , sep = None ) : if callback is not None : return [ callback ( i ) for i in string . split ( sep ) ] else : return string . split ( sep ) | Split the string and execute the callback function on each part . |
9,883 | def namedtuple_with_defaults ( typename , field_names , default_values = [ ] ) : the_tuple = collections . namedtuple ( typename , field_names ) the_tuple . __new__ . __defaults__ = ( None , ) * len ( the_tuple . _fields ) if isinstance ( default_values , collections . Mapping ) : prototype = the_tuple ( ** default_values ) else : prototype = the_tuple ( * default_values ) the_tuple . __new__ . __defaults__ = tuple ( prototype ) return the_tuple | Create a namedtuple with default values |
9,884 | def remain_file_pointer ( function ) : def wrapper ( * args , ** kwargs ) : file_obj = args [ - 1 ] old_position = file_obj . tell ( ) return_value = function ( * args , ** kwargs ) file_obj . seek ( old_position , 0 ) return return_value return wrapper | Remain the file pointer position after calling the decorated function |
9,885 | def decamelise ( text ) : s = re . sub ( '(.)([A-Z][a-z]+)' , r'\1_\2' , text ) return re . sub ( '([a-z0-9])([A-Z])' , r'\1_\2' , s ) . lower ( ) | Convert CamelCase to lower_and_underscore . |
9,886 | def camelise ( text , capital_first = True ) : def camelcase ( ) : if not capital_first : yield str . lower while True : yield str . capitalize if istype ( text , 'unicode' ) : text = text . encode ( 'utf8' ) c = camelcase ( ) return "" . join ( next ( c ) ( x ) if x else '_' for x in text . split ( "_" ) ) | Convert lower_underscore to CamelCase . |
9,887 | def colored ( text , color = None , on_color = None , attrs = None , ansi_code = None ) : if os . getenv ( 'ANSI_COLORS_DISABLED' ) is None : if ansi_code is not None : return "\033[38;5;{}m{}\033[0m" . format ( ansi_code , text ) fmt_str = '\033[%dm%s' if color is not None : text = re . sub ( COLORS_RE + '(.*?)' + RESET_RE , r'\1' , text ) text = fmt_str % ( COLORS [ color ] , text ) if on_color is not None : text = re . sub ( HIGHLIGHTS_RE + '(.*?)' + RESET_RE , r'\1' , text ) text = fmt_str % ( HIGHLIGHTS [ on_color ] , text ) if attrs is not None : text = re . sub ( ATTRIBUTES_RE + '(.*?)' + RESET_RE , r'\1' , text ) for attr in attrs : text = fmt_str % ( ATTRIBUTES [ attr ] , text ) return text + RESET else : return text | Colorize text while stripping nested ANSI color sequences . |
9,888 | def zero_pad ( m , n = 1 ) : return np . pad ( m , ( n , n ) , mode = 'constant' , constant_values = [ 0 ] ) | Pad a matrix with zeros on all sides . |
9,889 | def supports_color ( ) : if isnotebook ( ) : return True supported_platform = sys . platform != 'win32' or 'ANSICON' in os . environ is_a_tty = hasattr ( sys . stdout , 'isatty' ) and sys . stdout . isatty ( ) if not supported_platform or not is_a_tty : return False return True | Checks if the terminal supports color . |
9,890 | def get_jpp_revision ( via_command = 'JPrint' ) : try : output = subprocess . check_output ( [ via_command , '-v' ] , stderr = subprocess . STDOUT ) except subprocess . CalledProcessError as e : if e . returncode == 1 : output = e . output else : return None except OSError : return None revision = output . decode ( ) . split ( '\n' ) [ 0 ] . split ( ) [ 1 ] . strip ( ) return revision | Retrieves the Jpp revision number |
9,891 | def timed_cache ( ** timed_cache_kwargs ) : def _wrapper ( f ) : maxsize = timed_cache_kwargs . pop ( 'maxsize' , 128 ) typed = timed_cache_kwargs . pop ( 'typed' , False ) update_delta = timedelta ( ** timed_cache_kwargs ) d = { 'next_update' : datetime . utcnow ( ) - update_delta } try : f = functools . lru_cache ( maxsize = maxsize , typed = typed ) ( f ) except AttributeError : print ( "LRU caching is not available in Pyton 2.7, " "this will have no effect!" ) pass @ functools . wraps ( f ) def _wrapped ( * args , ** kwargs ) : now = datetime . utcnow ( ) if now >= d [ 'next_update' ] : try : f . cache_clear ( ) except AttributeError : pass d [ 'next_update' ] = now + update_delta return f ( * args , ** kwargs ) return _wrapped return _wrapper | LRU cache decorator with timeout . |
9,892 | def _get_point ( self , profile , point ) : cur_points_z = [ p . location . z for p in profile . elements ] try : cur_idx = cur_points_z . index ( point . z ) return profile . elements [ cur_idx ] except ValueError : new_idx = bisect_left ( cur_points_z , point . z ) new_point = Point ( ) new_point . location = sPoint ( point ) new_point . time = profile . time profile . elements . insert ( new_idx , new_point ) return new_point | Finds the given point in the profile or adds it in sorted z order . |
9,893 | def _parse_data_array ( self , data_array ) : tokenSeparator = data_array . encoding . tokenSeparator blockSeparator = data_array . encoding . blockSeparator data_values = data_array . values lines = [ x for x in data_values . split ( blockSeparator ) if x != "" ] ret_val = [ ] for row in lines : values = row . split ( tokenSeparator ) ret_val . append ( [ float ( v ) if " " not in v . strip ( ) else [ float ( vv ) for vv in v . split ( ) ] for v in values ] ) return [ list ( x ) for x in zip ( * ret_val ) ] | Parses a general DataArray . |
9,894 | def _parse_sensor_record ( self , sensor_data_rec , sensor_info , rem_values ) : val_idx = 0 assert len ( sensor_data_rec . field ) == 1 sensor_data_array = sensor_data_rec . field [ 0 ] . content count = None count_text = sensor_data_array . elementCount . text if count_text : count = int ( count_text . strip ( ) ) if not count : count = int ( rem_values [ val_idx ] ) val_idx += 1 parsed = [ ] for recnum in range ( count ) : cur = [ ] for f in sensor_data_array . elementType . field : cur_val = rem_values [ val_idx ] val_idx += 1 m = Member ( name = f . name , standard = f . content . definition ) if hasattr ( f . content , "uom" ) : m [ "units" ] = f . content . uom try : m [ "value" ] = float ( cur_val ) except ValueError : m [ "value" ] = cur_val if len ( f . quality ) : m [ "quality" ] = [ ] for qual in f . quality : cur_qual = rem_values [ val_idx ] val_idx += 1 m [ "quality" ] . append ( cur_qual ) cur . append ( m ) parsed . append ( cur ) return parsed , val_idx | Parses values via sensor data record passed in . Returns parsed values AND how many items it consumed out of rem_values . |
9,895 | def execute ( self , debug = False ) : if debug : start_time = datetime ( year = 2016 , month = 10 , day = 19 , hour = 12 , minute = 28 , tzinfo = UTC ) duration = timedelta ( seconds = 5 ) end_time = start_time + duration relative_interval = RelativeTimeInterval ( 0 , 0 ) time_interval = TimeInterval ( start_time , end_time ) else : duration = 0 relative_interval = self . hyperstream . config . online_engine . interval time_interval = relative_interval . absolute ( utcnow ( ) ) for _ in range ( self . hyperstream . config . online_engine . iterations ) : if not debug : signal . alarm ( self . hyperstream . config . online_engine . alarm ) logging . info ( "Online engine starting up." ) self . hyperstream . workflow_manager . set_all_requested_intervals ( TimeIntervals ( [ time_interval ] ) ) self . hyperstream . workflow_manager . execute_all ( ) logging . info ( "Online engine shutting down." ) logging . info ( "" ) sleep ( self . hyperstream . config . online_engine . sleep ) if debug : time_interval += duration else : time_interval = TimeInterval ( time_interval . end , utcnow ( ) + timedelta ( seconds = relative_interval . end ) ) | Execute the engine - currently simple executes all workflows . |
9,896 | def show ( movie ) : for key , value in sorted ( movie . iteritems ( ) , cmp = metadata_sorter , key = lambda x : x [ 0 ] ) : if isinstance ( value , list ) : if not value : continue other = value [ 1 : ] value = value [ 0 ] else : other = [ ] printer . p ( '<b>{key}</b>: {value}' , key = key , value = value ) for value in other : printer . p ( '{pad}{value}' , value = value , pad = ' ' * ( len ( key ) + 2 ) ) | Show the movie metadata . |
9,897 | def metadata_sorter ( x , y ) : if x == y : return 0 if x in METADATA_SORTER_FIRST and y in METADATA_SORTER_FIRST : return - 1 if METADATA_SORTER_FIRST . index ( x ) < METADATA_SORTER_FIRST . index ( y ) else 1 elif x in METADATA_SORTER_FIRST : return - 1 elif y in METADATA_SORTER_FIRST : return 1 else : if x . startswith ( '_' ) and y . startswith ( '_' ) : return cmp ( x [ 1 : ] , y [ 1 : ] ) elif x . startswith ( '_' ) : return 1 elif y . startswith ( '_' ) : return - 1 else : return cmp ( x , y ) | Sort metadata keys by priority . |
9,898 | def parse_lines ( log_parsers , fileinp ) : while 1 : logentry = fileinp . readline ( ) if not logentry : break elif not logentry . rstrip ( ) : continue processed = False for lp in log_parsers : if lp . grok ( logentry ) : processed = True if not processed : logger = logging . getLogger ( 'logparser' ) logger . warning ( 'Could not parse line >>>%s<<<' , logentry . rstrip ( ) ) print ( 'Could not parse line >>>%s<<<' % logentry . rstrip ( ) ) | parse lines from the fileinput and send them to the log_parsers |
9,899 | def load_commands ( self , parser ) : entrypoints = self . _get_entrypoints ( ) already_loaded = set ( ) for entrypoint in entrypoints : if entrypoint . name not in already_loaded : command_class = entrypoint . load ( ) command_class ( entrypoint . name , self , parser ) . prepare ( ) already_loaded . add ( entrypoint . name ) | Load commands of this profile . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.