idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
9,900
def copy ( tree , source_filename ) : filehash = sha1 ( ) with printer . progress ( os . path . getsize ( source_filename ) ) as update : with open ( source_filename , 'rb' ) as fsource : with NamedTemporaryFile ( dir = os . path . join ( tree , '.kolekto' , 'movies' ) , delete = False ) as fdestination : while True : buf = fsource . read ( 10 * 1024 ) if not buf : break filehash . update ( buf ) fdestination . write ( buf ) update ( len ( buf ) ) dest = os . path . join ( tree , '.kolekto' , 'movies' , filehash . hexdigest ( ) ) if os . path . exists ( dest ) : raise IOError ( 'This file already exists in tree (%s)' % filehash . hexdigest ( ) ) else : os . rename ( fdestination . name , dest ) return filehash . hexdigest ( )
Copy file in tree show a progress bar during operations and return the sha1 sum of copied file .
9,901
def list_attachments ( fullname ) : parent , filename = os . path . split ( fullname ) filename_without_ext , ext = os . path . splitext ( filename ) attachments = [ ] for found_filename in os . listdir ( parent ) : found_filename_without_ext , _ = os . path . splitext ( found_filename ) if filename_without_ext == found_filename_without_ext and found_filename != filename : attachments . append ( os . path . join ( parent , found_filename ) ) return attachments
List attachment for the specified fullname .
9,902
def write_to_stream ( self , stream_id , data , sandbox = None ) : if sandbox is not None : raise NotImplementedError if stream_id not in self . streams : raise StreamNotFoundError ( "Stream with id '{}' does not exist" . format ( stream_id ) ) writer = self . get_stream_writer ( self . streams [ stream_id ] ) if isinstance ( data , StreamInstance ) : data = [ data ] for instance in data : if not isinstance ( instance , StreamInstance ) : raise ValueError ( "Expected StreamInstance, got {}" . format ( str ( type ( instance ) ) ) ) writer ( instance )
Write to the stream
9,903
def _startXTVDNode ( self , name , attrs ) : schemaVersion = attrs . get ( 'schemaVersion' ) validFrom = self . _parseDateTime ( attrs . get ( 'from' ) ) validTo = self . _parseDateTime ( attrs . get ( 'to' ) ) self . _progress . printMsg ( 'Parsing version %s data from %s to %s' % ( schemaVersion , validFrom . strftime ( '%Y/%m/%d' ) , validTo . strftime ( '%Y/%m/%d' ) ) )
Process the start of the top - level xtvd node
9,904
def startElement ( self , name , attrs ) : self . _contextStack . append ( self . _context ) self . _contentList = [ ] if name in self . _statusDict : self . _itemTag , itemType = self . _statusDict [ name ] self . _progress . startItem ( itemType ) elif name == self . _itemTag : self . _error = False self . _progress . newItem ( ) try : if self . _context == 'root' : if name == 'xtvd' : self . _context = 'xtvd' self . _startXTVDNode ( name , attrs ) elif self . _context == 'xtvd' : self . _context = name elif self . _context == 'stations' : self . _startStationsNode ( name , attrs ) elif self . _context == 'lineups' : self . _startLineupsNode ( name , attrs ) elif self . _context == 'schedules' : self . _startSchedulesNode ( name , attrs ) elif self . _context == 'programs' : self . _startProgramsNode ( name , attrs ) elif self . _context == 'productionCrew' : self . _startProductionCrewNode ( name , attrs ) elif self . _context == 'genres' : self . _startGenresNode ( name , attrs ) except Exception , e : self . _error = True self . _progress . printMsg ( str ( e ) , error = True )
Callback run at the start of each XML element
9,905
def endElement ( self , name ) : content = '' . join ( self . _contentList ) if name == 'xtvd' : self . _progress . endItems ( ) else : try : if self . _context == 'stations' : self . _endStationsNode ( name , content ) elif self . _context == 'lineups' : self . _endLineupsNode ( name , content ) elif self . _context == 'schedules' : self . _endSchedulesNode ( name , content ) elif self . _context == 'programs' : self . _endProgramsNode ( name , content ) elif self . _context == 'productionCrew' : self . _endProductionCrewNode ( name , content ) elif self . _context == 'genres' : self . _endGenresNode ( name , content ) except Exception , e : self . _error = True self . _progress . printMsg ( str ( e ) , error = True ) self . _context = self . _contextStack . pop ( )
Callback run at the end of each XML element
9,906
def error ( self , msg ) : self . _error = True self . _progress . printMsg ( 'XML parse error: %s' % msg , error = True )
Callback run when a recoverable parsing error occurs
9,907
def format_all ( format_string , env ) : prepared_env = parse_pattern ( format_string , env , lambda x , y : [ FormatWrapper ( x , z ) for z in y ] ) for field_values in product ( * prepared_env . itervalues ( ) ) : format_env = dict ( izip ( prepared_env . iterkeys ( ) , field_values ) ) yield format_string . format ( ** format_env )
Format the input string using each possible combination of lists in the provided environment . Returns a list of formated strings .
9,908
def preloop ( self ) : Cmd . preloop ( self ) self . _hist = [ ] self . _locals = { } self . _globals = { }
Initialization before prompting user for commands .
9,909
def execute_tool ( self , stream , interval ) : if interval . end > self . up_to_timestamp : raise ValueError ( 'The stream is not available after ' + str ( self . up_to_timestamp ) + ' and cannot be calculated' ) required_intervals = TimeIntervals ( [ interval ] ) - stream . calculated_intervals if not required_intervals . is_empty : for interval in required_intervals : stream . tool . execute ( stream . input_streams , stream , interval ) stream . calculated_intervals += interval if not stream . required_intervals . is_empty : raise RuntimeError ( 'Tool execution did not cover the specified time interval.' )
Executes the stream s tool over the given time interval
9,910
def get_or_create_stream ( self , stream_id , try_create = True ) : stream_id = get_stream_id ( stream_id ) if stream_id in self . streams : logging . debug ( "found {}" . format ( stream_id ) ) return self . streams [ stream_id ] elif try_create : logging . debug ( "creating {}" . format ( stream_id ) ) return self . create_stream ( stream_id = stream_id )
Helper function to get a stream or create one if it s not already defined
9,911
def find_streams ( self , ** kwargs ) : found = { } if 'name' in kwargs : name = kwargs . pop ( 'name' ) else : name = None for stream_id , stream in self . streams . items ( ) : if name is not None and stream_id . name != name : continue d = dict ( stream_id . meta_data ) if all ( k in d and d [ k ] == str ( v ) for k , v in kwargs . items ( ) ) : found [ stream_id ] = stream return found
Finds streams with the given meta data values . Useful for debugging purposes .
9,912
def find_stream ( self , ** kwargs ) : found = list ( self . find_streams ( ** kwargs ) . values ( ) ) if not found : raise StreamNotFoundError ( kwargs ) if len ( found ) > 1 : raise MultipleStreamsFoundError ( kwargs ) return found [ 0 ]
Finds a single stream with the given meta data values . Useful for debugging purposes .
9,913
def next_blob ( self ) : blob_file = self . blob_file try : preamble = DAQPreamble ( file_obj = blob_file ) except struct . error : raise StopIteration try : data_type = DATA_TYPES [ preamble . data_type ] except KeyError : log . error ( "Unkown datatype: {0}" . format ( preamble . data_type ) ) data_type = 'Unknown' blob = Blob ( ) blob [ data_type ] = None blob [ 'DAQPreamble' ] = preamble if data_type == 'DAQSummaryslice' : daq_frame = DAQSummaryslice ( blob_file ) blob [ data_type ] = daq_frame blob [ 'DAQHeader' ] = daq_frame . header elif data_type == 'DAQEvent' : daq_frame = DAQEvent ( blob_file ) blob [ data_type ] = daq_frame blob [ 'DAQHeader' ] = daq_frame . header else : log . warning ( "Skipping DAQ frame with data type code '{0}'." . format ( preamble . data_type ) ) blob_file . seek ( preamble . length - DAQPreamble . size , 1 ) return blob
Get the next frame from file
9,914
def seek_to_frame ( self , index ) : pointer_position = self . frame_positions [ index ] self . blob_file . seek ( pointer_position , 0 )
Move file pointer to the frame with given index .
9,915
def _parse_file ( self , file_obj ) : byte_data = file_obj . read ( self . size ) self . _parse_byte_data ( byte_data )
Directly read from file handler .
9,916
def _parse_summary_frames ( self , file_obj ) : for _ in range ( self . n_summary_frames ) : dom_id = unpack ( '<i' , file_obj . read ( 4 ) ) [ 0 ] dq_status = file_obj . read ( 4 ) dom_status = unpack ( '<iiii' , file_obj . read ( 16 ) ) raw_rates = unpack ( 'b' * 31 , file_obj . read ( 31 ) ) pmt_rates = [ self . _get_rate ( value ) for value in raw_rates ] self . summary_frames [ dom_id ] = pmt_rates self . dq_status [ dom_id ] = dq_status self . dom_status [ dom_id ] = dom_status self . dom_rates [ dom_id ] = np . sum ( pmt_rates )
Iterate through the byte data and fill the summary_frames
9,917
def _get_rate ( self , value ) : if value == 0 : return 0 else : return MINIMAL_RATE_HZ * math . exp ( value * self . _get_factor ( ) )
Return the rate in Hz from the short int value
9,918
def _parse_triggered_hits ( self , file_obj ) : for _ in range ( self . n_triggered_hits ) : dom_id , pmt_id = unpack ( '<ib' , file_obj . read ( 5 ) ) tdc_time = unpack ( '>I' , file_obj . read ( 4 ) ) [ 0 ] tot = unpack ( '<b' , file_obj . read ( 1 ) ) [ 0 ] trigger_mask = unpack ( '<Q' , file_obj . read ( 8 ) ) self . triggered_hits . append ( ( dom_id , pmt_id , tdc_time , tot , trigger_mask ) )
Parse and store triggered hits .
9,919
def _parse_snapshot_hits ( self , file_obj ) : for _ in range ( self . n_snapshot_hits ) : dom_id , pmt_id = unpack ( '<ib' , file_obj . read ( 5 ) ) tdc_time = unpack ( '>I' , file_obj . read ( 4 ) ) [ 0 ] tot = unpack ( '<b' , file_obj . read ( 1 ) ) [ 0 ] self . snapshot_hits . append ( ( dom_id , pmt_id , tdc_time , tot ) )
Parse and store snapshot hits .
9,920
def runtable ( det_id , n = 5 , run_range = None , compact = False , sep = '\t' , regex = None ) : db = kp . db . DBManager ( ) df = db . run_table ( det_id ) if run_range is not None : try : from_run , to_run = [ int ( r ) for r in run_range . split ( '-' ) ] except ValueError : log . critical ( "Please specify a valid range (e.g. 3100-3200)!" ) raise SystemExit else : df = df [ ( df . RUN >= from_run ) & ( df . RUN <= to_run ) ] if regex is not None : try : re . compile ( regex ) except re . error : log . error ( "Invalid regex!" ) return df = df [ df [ 'RUNSETUPNAME' ] . str . contains ( regex ) | df [ 'RUNSETUPID' ] . str . contains ( regex ) ] if n is not None : df = df . tail ( n ) if compact : df = df [ [ 'RUN' , 'DATETIME' , 'RUNSETUPNAME' ] ] df . to_csv ( sys . stdout , sep = sep )
Print the run table of the last n runs for given detector
9,921
def _extract_calibration ( xroot ) : names = [ c . text for c in xroot . findall ( ".//Name" ) ] val = [ [ i . text for i in c ] for c in xroot . findall ( ".//Values" ) ] col_ic = [ int ( v ) for v in val [ names . index ( "AHRS_Matrix_Column(-)" ) ] ] try : row_ic = [ int ( v ) for v in val [ names . index ( "AHRS_Matrix_Row(-)" ) ] ] except ValueError : row_ic = [ 2 , 2 , 2 , 1 , 1 , 1 , 0 , 0 , 0 ] try : vec_ic = [ int ( v ) for v in val [ names . index ( "AHRS_Vector_Index(-)" ) ] ] except ValueError : vec_ic = [ 2 , 1 , 0 ] Aoff_ix = names . index ( "AHRS_Acceleration_Offset(g/ms^2-)" ) Arot_ix = names . index ( "AHRS_Acceleration_Rotation(-)" ) Hrot_ix = names . index ( "AHRS_Magnetic_Rotation(-)" ) Aoff = np . array ( val [ Aoff_ix ] ) [ vec_ic ] . astype ( float ) Arot = np . array ( val [ Arot_ix ] ) . reshape ( 3 , 3 ) [ col_ic , row_ic ] . reshape ( 3 , 3 ) . astype ( float ) Hrot = np . array ( val [ Hrot_ix ] ) . reshape ( 3 , 3 ) [ col_ic , row_ic ] . reshape ( 3 , 3 ) . astype ( float ) Hoff = [ ] for q in 'XYZ' : values = [ ] for t in ( 'Min' , 'Max' ) : ix = names . index ( "AHRS_Magnetic_{}{}(G-)" . format ( q , t ) ) values . append ( float ( val [ ix ] [ 0 ] ) ) Hoff . append ( sum ( values ) / 2. ) Hoff = np . array ( Hoff ) return Aoff , Arot , Hoff , Hrot
Extract AHRS calibration information from XML root .
9,922
def calibrate ( self ) : now = time . time ( ) dom_ids = self . A . keys ( ) print ( "Calibrating AHRS from median A and H for {} DOMs." . format ( len ( dom_ids ) ) ) calibrations = { } for dom_id in dom_ids : print ( "Calibrating DOM ID {}" . format ( dom_id ) ) clb_upi = self . db . doms . via_dom_id ( dom_id ) . clb_upi ahrs_calib = get_latest_ahrs_calibration ( clb_upi ) if ahrs_calib is None : log . warning ( "AHRS calibration missing for '{}'" . format ( dom_id ) ) continue du , floor , _ = self . detector . doms [ dom_id ] A = np . median ( self . A [ dom_id ] , axis = 0 ) H = np . median ( self . H [ dom_id ] , axis = 0 ) cyaw , cpitch , croll = fit_ahrs ( A , H , * ahrs_calib ) calibrations [ dom_id ] = ( now , du , floor , cyaw , cpitch , croll ) self . A = defaultdict ( list ) self . H = defaultdict ( list ) return calibrations
Calculate yaw pitch and roll from the median of A and H .
9,923
def humanize_filesize ( value ) : value = float ( value ) if value == 1 : return '1 Byte' elif value < 1024 : return '%d Bytes' % value elif value < 1024 : return '%dB' % value for i , s in enumerate ( SUFFIXES ) : unit = 1024 ** ( i + 2 ) if value < unit : return '%.1f %s' % ( ( 1024 * value / unit ) , s ) return '%.1f %s' % ( ( 1024 * value / unit ) , s )
Return an humanized file size .
9,924
def format_top ( counter , top = 3 ) : items = islice ( reversed ( sorted ( counter . iteritems ( ) , key = lambda x : x [ 1 ] ) ) , 0 , top ) return u'; ' . join ( u'{g} ({nb})' . format ( g = g , nb = nb ) for g , nb in items )
Format a top .
9,925
def check_input_stream_count ( expected_number_of_streams ) : def stream_count_decorator ( func ) : def func_wrapper ( * args , ** kwargs ) : self = args [ 0 ] sources = kwargs [ 'sources' ] if 'sources' in kwargs else args [ 1 ] if expected_number_of_streams == 0 : if sources : raise ValueError ( "No input streams expected" ) else : given_number_of_streams = len ( sources ) if sources else 0 if given_number_of_streams != expected_number_of_streams : raise ValueError ( "{} tool takes {} stream(s) as input ({} given)" . format ( self . __class__ . __name__ , expected_number_of_streams , given_number_of_streams ) ) return func ( * args , ** kwargs ) return func_wrapper return stream_count_decorator
Decorator for Tool . _execute that checks the number of input streams
9,926
def main ( ) : from docopt import docopt args = docopt ( __doc__ , version = kp . version ) kp . logger . set_level ( "km3pipe" , args [ '-d' ] ) pipe = kp . Pipeline ( ) pipe . attach ( kp . io . ch . CHPump , host = args [ 'SOURCE_IP' ] , port = int ( args [ '-p' ] ) , tags = args [ '-m' ] , timeout = int ( args [ '-x' ] ) , max_queue = int ( args [ '-s' ] ) ) pipe . attach ( LigierSender , target_ip = args [ '-t' ] , port = int ( args [ '-q' ] ) ) pipe . drain ( )
The main script
9,927
def parse ( filename ) : with open ( filename , "rb" ) as data : header , v_major , v_minor , chunk_count = struct . unpack ( "!4sHHI" , data . read ( 12 ) ) assert header == b"ASEF" assert ( v_major , v_minor ) == ( 1 , 0 ) return [ c for c in parser . parse_chunk ( data ) ]
parses a . ase file and returns a list of colors and color groups
9,928
def dumps ( obj ) : header = b'ASEF' v_major , v_minor = 1 , 0 chunk_count = writer . chunk_count ( obj ) head = struct . pack ( '!4sHHI' , header , v_major , v_minor , chunk_count ) body = b'' . join ( [ writer . chunk_for_object ( c ) for c in obj ] ) return head + body
converts a swatch to bytes suitable for writing
9,929
def isFullPreferenceOrder ( self , candList ) : for cand1 in candList : if cand1 not in self . wmgMap . keys ( ) : return False for cand2 in candList : if cand1 == cand2 : continue if cand2 not in self . wmgMap [ cand1 ] . keys ( ) : return False return True
Returns True if the underlying weighted majority graph contains a comparision between every pair of candidate and returns False otherwise .
9,930
def containsTie ( self ) : for cand in self . wmgMap . keys ( ) : if 0 in self . wmgMap [ cand ] . values ( ) : return True return False
Returns True if the underlying weighted majority graph contains a tie between any pair of candidates and returns False otherwise .
9,931
def getIncEdgesMap ( self ) : incEdgesMap = dict ( ) for cand1 in self . wmgMap . keys ( ) : incEdgesSum = 0 for cand2 in self . wmgMap [ cand1 ] . keys ( ) : if self . wmgMap [ cand1 ] [ cand2 ] > 0 : incEdgesSum += self . wmgMap [ cand1 ] [ cand2 ] if incEdgesSum in incEdgesMap . keys ( ) : incEdgesMap [ incEdgesSum ] . append ( cand1 ) else : incEdgesMap [ incEdgesSum ] = [ cand1 ] return incEdgesMap
Returns a dictionary that associates numbers of incoming edges in the weighted majority graph with the candidates that have that number of incoming edges .
9,932
def getRankMap ( self ) : incEdgesMap = self . getIncEdgesMap ( ) sortedKeys = sorted ( incEdgesMap . keys ( ) , reverse = True ) rankMap = dict ( ) pos = 1 for key in sortedKeys : cands = incEdgesMap [ key ] for cand in cands : rankMap [ cand ] = pos pos += 1 return rankMap
Returns a dictionary that associates the integer representation of each candidate with its position in the ranking starting from 1 .
9,933
def getReverseRankMap ( self ) : incEdgesMap = self . getIncEdgesMap ( ) sortedKeys = sorted ( incEdgesMap . keys ( ) , reverse = True ) reverseRankMap = dict ( ) pos = 1 for key in sortedKeys : cands = incEdgesMap [ key ] reverseRankMap [ pos ] = cands pos += 1 return reverseRankMap
Returns a dictionary that associates each position in the ranking with a list of integer representations of the candidates ranked at that position .
9,934
def histogram ( a , bins ) : if any ( map ( lambda x : x < 0 , diff ( bins ) ) ) : raise ValueError ( 'bins must increase monotonically.' ) try : sa = sorted ( a ) except TypeError : sa = sorted ( [ a ] ) nl = list ( accumulate ( [ Counter ( map ( lambda x : bisect_left ( bins [ : - 1 ] , x ) , sa ) ) [ i ] for i in range ( len ( bins ) - 1 ) ] ) ) nr = Counter ( map ( lambda x : bisect_right ( [ bins [ 1 ] ] , x ) , sa ) ) [ 1 ] n = list ( nl ) + [ nr ] return diff ( n ) , bins
Compute the histogram of a set of data .
9,935
def deprecation ( self , message , * args , ** kws ) : self . _log ( DEPRECATION , message , args , ** kws )
Show a deprecation warning .
9,936
def once ( self , message , * args , ** kws ) : identifier = kws . pop ( 'identifier' , None ) if identifier is None : caller = getframeinfo ( stack ( ) [ 1 ] [ 0 ] ) identifier = "%s:%d" % ( caller . filename , caller . lineno ) if not hasattr ( self , 'once_dict' ) : self . once_dict = { } if identifier in self . once_dict : return self . once_dict [ identifier ] = True self . _log ( ONCE , message , args , ** kws )
Show a message only once determined by position in source or identifer .
9,937
def get_logger ( name ) : if name in loggers : return loggers [ name ] logger = logging . getLogger ( name ) logger . propagate = False pre1 , suf1 = hash_coloured_escapes ( name ) if supports_color ( ) else ( '' , '' ) pre2 , suf2 = hash_coloured_escapes ( name + 'salt' ) if supports_color ( ) else ( '' , '' ) formatter = logging . Formatter ( '%(levelname)s {}+{}+{} ' '%(name)s: %(message)s' . format ( pre1 , pre2 , suf1 ) ) ch = logging . StreamHandler ( ) ch . setFormatter ( formatter ) logger . addHandler ( ch ) loggers [ name ] = logger logger . once_dict = { } return logger
Helper function to get a logger
9,938
def get_printer ( name , color = None , ansi_code = None , force_color = False ) : if force_color or supports_color ( ) : if color is None and ansi_code is None : cpre_1 , csuf_1 = hash_coloured_escapes ( name ) cpre_2 , csuf_2 = hash_coloured_escapes ( name + 'salt' ) name = cpre_1 + '+' + cpre_2 + '+' + csuf_1 + ' ' + name else : name = colored ( name , color = color , ansi_code = ansi_code ) prefix = name + ': ' def printer ( text ) : print ( prefix + str ( text ) ) return printer
Return a function which prints a message with a coloured name prefix
9,939
def hash_coloured ( text ) : ansi_code = int ( sha256 ( text . encode ( 'utf-8' ) ) . hexdigest ( ) , 16 ) % 230 return colored ( text , ansi_code = ansi_code )
Return a ANSI coloured text based on its hash
9,940
def hash_coloured_escapes ( text ) : ansi_code = int ( sha256 ( text . encode ( 'utf-8' ) ) . hexdigest ( ) , 16 ) % 230 prefix , suffix = colored ( 'SPLIT' , ansi_code = ansi_code ) . split ( 'SPLIT' ) return prefix , suffix
Return the ANSI hash colour prefix and suffix for a given text
9,941
def tai_timestamp ( ) : timestamp = time . time ( ) date = datetime . utcfromtimestamp ( timestamp ) if date . year < 1972 : return timestamp offset = 10 + timestamp leap_seconds = [ ( 1972 , 1 , 1 ) , ( 1972 , 7 , 1 ) , ( 1973 , 1 , 1 ) , ( 1974 , 1 , 1 ) , ( 1975 , 1 , 1 ) , ( 1976 , 1 , 1 ) , ( 1977 , 1 , 1 ) , ( 1978 , 1 , 1 ) , ( 1979 , 1 , 1 ) , ( 1980 , 1 , 1 ) , ( 1981 , 7 , 1 ) , ( 1982 , 7 , 1 ) , ( 1983 , 7 , 1 ) , ( 1985 , 7 , 1 ) , ( 1988 , 1 , 1 ) , ( 1990 , 1 , 1 ) , ( 1991 , 1 , 1 ) , ( 1992 , 7 , 1 ) , ( 1993 , 7 , 1 ) , ( 1994 , 7 , 1 ) , ( 1996 , 1 , 1 ) , ( 1997 , 7 , 1 ) , ( 1999 , 1 , 1 ) , ( 2006 , 1 , 1 ) , ( 2009 , 1 , 1 ) , ( 2012 , 7 , 1 ) , ( 2015 , 7 , 1 ) , ( 2017 , 1 , 1 ) , ] for idx , leap_date in enumerate ( leap_seconds ) : if leap_date >= ( date . year , date . month , date . day ) : return idx - 1 + offset return len ( leap_seconds ) - 1 + offset
Return current TAI timestamp .
9,942
def msg ( self , * args , ** kwargs ) : "Only execute callback when interval is reached." if self . timestamp is None or self . _interval_reached ( ) : self . callback ( * args , ** kwargs ) self . reset ( )
Only execute callback when interval is reached .
9,943
def _gather_field_values ( item , * , fields = None , field_map = FIELD_MAP , normalize_values = False , normalize_func = normalize_value ) : it = get_item_tags ( item ) if fields is None : fields = list ( it . keys ( ) ) normalize = normalize_func if normalize_values else lambda x : str ( x ) field_values = [ ] for field in fields : field_values . append ( normalize ( list_to_single_value ( get_field ( it , field , field_map = field_map ) ) ) ) return tuple ( field_values )
Create a tuple of normalized metadata field values .
9,944
def find_existing_items ( src , dst , * , fields = None , field_map = None , normalize_values = False , normalize_func = normalize_value ) : if field_map is None : field_map = FIELD_MAP dst_keys = { _gather_field_values ( dst_item , fields = fields , field_map = field_map , normalize_values = normalize_values , normalize_func = normalize_func ) for dst_item in dst } for src_item in src : if _gather_field_values ( src_item , fields = fields , field_map = field_map , normalize_values = normalize_values , normalize_func = normalize_func ) in dst_keys : yield src_item
Find items from an item collection that are in another item collection .
9,945
def monitor ( self , message , * args , ** kws ) : if self . isEnabledFor ( MON ) : self . _log ( MON , message , args , ** kws )
Define a monitoring logger that will be added to Logger
9,946
def monitor ( msg , * args , ** kwargs ) : if len ( logging . root . handlers ) == 0 : logging . basicConfig ( ) logging . root . monitor ( msg , * args , ** kwargs )
Log a message with severity MON on the root logger .
9,947
def format ( self , record ) : try : n = record . n except AttributeError : n = 'default' try : message = record . message except AttributeError : message = record . msg senml = OrderedDict ( uid = "hyperstream" , bt = datetime . utcfromtimestamp ( record . created ) . isoformat ( ) [ : - 3 ] + 'Z' , e = [ OrderedDict ( n = n , v = message ) ] ) formatted_json = json . dumps ( senml ) return formatted_json
The formatting function
9,948
def teardown ( self ) : with self . _teardown_lock : if not self . _teardown_called : self . _teardown_called = True if len ( self . _acquiring_session_ids ) > 0 : logger . info ( f"Destroying all sessions that have not acquired keys: {self._acquiring_session_ids}..." ) for session_id in self . _acquiring_session_ids : try : self . consul_client . session . destroy ( session_id = session_id ) logger . debug ( f"Destroyed: {session_id}" ) except requests . exceptions . ConnectionError as e : logger . debug ( f"Exception: {e}" ) logger . warning ( f"Could not connect to Consul to clean up session {session_id}" ) atexit . unregister ( self . teardown )
Tears down the instance removing any remaining sessions that this instance has created .
9,949
def we_are_in_lyon ( ) : import socket try : hostname = socket . gethostname ( ) ip = socket . gethostbyname ( hostname ) except socket . gaierror : return False return ip . startswith ( "134.158." )
Check if we are on a Lyon machine
9,950
def read_csv ( text , sep = "\t" ) : import pandas as pd return pd . read_csv ( StringIO ( text ) , sep = "\t" )
Create a DataFrame from CSV text
9,951
def add_datetime ( dataframe , timestamp_key = 'UNIXTIME' ) : def convert_data ( timestamp ) : return datetime . fromtimestamp ( float ( timestamp ) / 1e3 , UTC_TZ ) try : log . debug ( "Adding DATETIME column to the data" ) converted = dataframe [ timestamp_key ] . apply ( convert_data ) dataframe [ 'DATETIME' ] = converted except KeyError : log . warning ( "Could not add DATETIME column" )
Add an additional DATETIME column with standar datetime format .
9,952
def show_ahrs_calibration ( clb_upi , version = '3' ) : db = DBManager ( ) ahrs_upi = clbupi2ahrsupi ( clb_upi ) print ( "AHRS UPI: {}" . format ( ahrs_upi ) ) content = db . _get_content ( "show_product_test.htm?upi={0}&" "testtype=AHRS-CALIBRATION-v{1}&n=1&out=xml" . format ( ahrs_upi , version ) ) . replace ( '\n' , '' ) import xml . etree . ElementTree as ET try : root = ET . parse ( io . StringIO ( content ) ) . getroot ( ) except ET . ParseError : print ( "No calibration data found" ) else : for child in root : print ( "{}: {}" . format ( child . tag , child . text ) ) names = [ c . text for c in root . findall ( ".//Name" ) ] values = [ [ i . text for i in c ] for c in root . findall ( ".//Values" ) ] for name , value in zip ( names , values ) : print ( "{}: {}" . format ( name , value ) )
Show AHRS calibration data for given clb_upi .
9,953
def _datalog ( self , parameter , run , maxrun , det_id ) : "Extract data from database" values = { 'parameter_name' : parameter , 'minrun' : run , 'maxrun' : maxrun , 'detid' : det_id , } data = urlencode ( values ) content = self . _get_content ( 'streamds/datalognumbers.txt?' + data ) if content . startswith ( 'ERROR' ) : log . error ( content ) return None try : dataframe = read_csv ( content ) except ValueError : log . warning ( "Empty dataset" ) return make_empty_dataset ( ) else : add_datetime ( dataframe ) try : self . _add_converted_units ( dataframe , parameter ) except KeyError : log . warning ( "Could not add converted units for {0}" . format ( parameter ) ) return dataframe
Extract data from database
9,954
def _add_converted_units ( self , dataframe , parameter , key = 'VALUE' ) : convert_unit = self . parameters . get_converter ( parameter ) try : log . debug ( "Adding unit converted DATA_VALUE to the data" ) dataframe [ key ] = dataframe [ 'DATA_VALUE' ] . apply ( convert_unit ) except KeyError : log . warning ( "Missing 'VALUE': no unit conversion." ) else : dataframe . unit = self . parameters . unit ( parameter )
Add an additional DATA_VALUE column with converted VALUEs
9,955
def to_det_id ( self , det_id_or_det_oid ) : try : int ( det_id_or_det_oid ) except ValueError : return self . get_det_id ( det_id_or_det_oid ) else : return det_id_or_det_oid
Convert det ID or OID to det ID
9,956
def to_det_oid ( self , det_id_or_det_oid ) : try : int ( det_id_or_det_oid ) except ValueError : return det_id_or_det_oid else : return self . get_det_oid ( det_id_or_det_oid )
Convert det OID or ID to det OID
9,957
def _load_parameters ( self ) : "Retrieve a list of available parameters from the database" parameters = self . _get_json ( 'allparam/s' ) data = { } for parameter in parameters : data [ parameter [ 'Name' ] . lower ( ) ] = parameter self . _parameters = ParametersContainer ( data )
Retrieve a list of available parameters from the database
9,958
def trigger_setup ( self , runsetup_oid ) : "Retrieve the trigger setup for a given runsetup OID" r = self . _get_content ( "jsonds/rslite/s?rs_oid={}&upifilter=1.1.2.2.3/*" . format ( runsetup_oid ) ) data = json . loads ( r ) [ 'Data' ] if not data : log . error ( "Empty dataset." ) return raw_setup = data [ 0 ] det_id = raw_setup [ 'DetID' ] name = raw_setup [ 'Name' ] description = raw_setup [ 'Desc' ] _optical_df = raw_setup [ 'ConfGroups' ] [ 0 ] optical_df = { 'Name' : _optical_df [ 'Name' ] , 'Desc' : _optical_df [ 'Desc' ] } for param in _optical_df [ 'Params' ] : pname = self . parameters . oid2name ( param [ 'OID' ] ) . replace ( 'DAQ_' , '' ) try : dtype = float if '.' in param [ 'Val' ] else int val = dtype ( param [ 'Val' ] ) except ValueError : val = param [ 'Val' ] optical_df [ pname ] = val _acoustic_df = raw_setup [ 'ConfGroups' ] [ 1 ] acoustic_df = { 'Name' : _acoustic_df [ 'Name' ] , 'Desc' : _acoustic_df [ 'Desc' ] } for param in _acoustic_df [ 'Params' ] : pname = self . parameters . oid2name ( param [ 'OID' ] ) . replace ( 'DAQ_' , '' ) try : dtype = float if '.' in param [ 'Val' ] else int val = dtype ( param [ 'Val' ] ) except ValueError : val = param [ 'Val' ] acoustic_df [ pname ] = val return TriggerSetup ( runsetup_oid , name , det_id , description , optical_df , acoustic_df )
Retrieve the trigger setup for a given runsetup OID
9,959
def detx ( self , det_id , t0set = None , calibration = None ) : url = 'detx/{0}?' . format ( det_id ) if t0set is not None : url += '&t0set=' + t0set if calibration is not None : url += '&calibrid=' + calibration detx = self . _get_content ( url ) return detx
Retrieve the detector file for given detector id
9,960
def _get_json ( self , url ) : "Get JSON-type content" content = self . _get_content ( 'jsonds/' + url ) try : json_content = json . loads ( content . decode ( ) ) except AttributeError : json_content = json . loads ( content ) if json_content [ 'Comment' ] : log . warning ( json_content [ 'Comment' ] ) if json_content [ 'Result' ] != 'OK' : raise ValueError ( 'Error while retrieving the parameter list.' ) return json_content [ 'Data' ]
Get JSON - type content
9,961
def _get_content ( self , url ) : "Get HTML content" target_url = self . _db_url + '/' + unquote ( url ) log . debug ( "Opening '{0}'" . format ( target_url ) ) try : f = self . opener . open ( target_url ) except HTTPError as e : log . error ( "HTTP error, your session may be expired." ) log . error ( e ) if input ( "Request new permanent session and retry? (y/n)" ) in 'yY' : self . request_permanent_session ( ) return self . _get_content ( url ) else : return None log . debug ( "Accessing '{0}'" . format ( target_url ) ) try : content = f . read ( ) except IncompleteRead as icread : log . critical ( "Incomplete data received from the DB, " + "the data could be corrupted." ) content = icread . partial log . debug ( "Got {0} bytes of data." . format ( len ( content ) ) ) return content . decode ( 'utf-8' )
Get HTML content
9,962
def opener ( self ) : "A reusable connection manager" if self . _opener is None : log . debug ( "Creating connection handler" ) opener = build_opener ( ) if self . _cookies : log . debug ( "Appending cookies" ) else : log . debug ( "No cookies to append" ) for cookie in self . _cookies : cookie_str = cookie . name + '=' + cookie . value opener . addheaders . append ( ( 'Cookie' , cookie_str ) ) self . _opener = opener else : log . debug ( "Reusing connection manager" ) return self . _opener
A reusable connection manager
9,963
def request_sid_cookie ( self , username , password ) : log . debug ( "Requesting SID cookie" ) target_url = self . _login_url + '?usr={0}&pwd={1}&persist=y' . format ( username , password ) cookie = urlopen ( target_url ) . read ( ) return cookie
Request cookie for permanent session token .
9,964
def restore_session ( self , cookie ) : log . debug ( "Restoring session from cookie: {}" . format ( cookie ) ) opener = build_opener ( ) opener . addheaders . append ( ( 'Cookie' , cookie ) ) self . _opener = opener
Establish databse connection using permanent session cookie
9,965
def login ( self , username , password ) : "Login to the database and store cookies for upcoming requests." log . debug ( "Logging in to the DB" ) opener = self . _build_opener ( ) values = { 'usr' : username , 'pwd' : password } req = self . _make_request ( self . _login_url , values ) try : log . debug ( "Sending login request" ) f = opener . open ( req ) except URLError as e : log . error ( "Failed to connect to the database -> probably down!" ) log . error ( "Error from database server:\n {0}" . format ( e ) ) return False html = f . read ( ) failed_auth_message = 'Bad username or password' if failed_auth_message in str ( html ) : log . error ( failed_auth_message ) return False return True
Login to the database and store cookies for upcoming requests .
9,966
def _update_streams ( self ) : content = self . _db . _get_content ( "streamds" ) self . _stream_df = read_csv ( content ) . sort_values ( "STREAM" ) self . _streams = None for stream in self . streams : setattr ( self , stream , self . __getattr__ ( stream ) )
Update the list of available straems
9,967
def streams ( self ) : if self . _streams is None : self . _streams = list ( self . _stream_df [ "STREAM" ] . values ) return self . _streams
A list of available streams
9,968
def help ( self , stream ) : if stream not in self . streams : log . error ( "Stream '{}' not found in the database." . format ( stream ) ) params = self . _stream_df [ self . _stream_df [ 'STREAM' ] == stream ] . values [ 0 ] self . _print_stream_parameters ( params )
Show the help for a given stream .
9,969
def _print_stream_parameters ( self , values ) : cprint ( "{0}" . format ( * values ) , "magenta" , attrs = [ "bold" ] ) print ( "{4}" . format ( * values ) ) cprint ( " available formats: {1}" . format ( * values ) , "blue" ) cprint ( " mandatory selectors: {2}" . format ( * values ) , "red" ) cprint ( " optional selectors: {3}" . format ( * values ) , "green" ) print ( )
Print a coloured help for a given tuple of stream parameters .
9,970
def get ( self , stream , fmt = 'txt' , ** kwargs ) : sel = '' . join ( [ "&{0}={1}" . format ( k , v ) for ( k , v ) in kwargs . items ( ) ] ) url = "streamds/{0}.{1}?{2}" . format ( stream , fmt , sel [ 1 : ] ) data = self . _db . _get_content ( url ) if not data : log . error ( "No data found at URL '%s'." % url ) return if ( data . startswith ( "ERROR" ) ) : log . error ( data ) return if fmt == "txt" : return read_csv ( data ) return data
Get the data for a given stream manually
9,971
def get_parameter ( self , parameter ) : "Return a dict for given parameter" parameter = self . _get_parameter_name ( parameter ) return self . _parameters [ parameter ]
Return a dict for given parameter
9,972
def get_converter ( self , parameter ) : if parameter not in self . _converters : param = self . get_parameter ( parameter ) try : scale = float ( param [ 'Scale' ] ) except KeyError : scale = 1 def convert ( value ) : return value * scale return convert
Generate unit conversion function for given parameter
9,973
def unit ( self , parameter ) : "Get the unit for given parameter" parameter = self . _get_parameter_name ( parameter ) . lower ( ) return self . _parameters [ parameter ] [ 'Unit' ]
Get the unit for given parameter
9,974
def oid2name ( self , oid ) : "Look up the parameter name for a given OID" if not self . _oid_lookup : for name , data in self . _parameters . items ( ) : self . _oid_lookup [ data [ 'OID' ] ] = data [ 'Name' ] return self . _oid_lookup [ oid ]
Look up the parameter name for a given OID
9,975
def via_dom_id ( self , dom_id , det_id ) : try : return DOM . from_json ( [ d for d in self . _json if d [ "DOMId" ] == dom_id and d [ "DetOID" ] == det_id ] [ 0 ] ) except IndexError : log . critical ( "No DOM found for DOM ID '{0}'" . format ( dom_id ) )
Return DOM for given dom_id
9,976
def via_clb_upi ( self , clb_upi , det_id ) : try : return DOM . from_json ( [ d for d in self . _json if d [ "CLBUPI" ] == clb_upi and d [ "DetOID" ] == det_id ] [ 0 ] ) except IndexError : log . critical ( "No DOM found for CLB UPI '{0}'" . format ( clb_upi ) )
return DOM for given CLB UPI
9,977
def upi ( self ) : parameter = 'UPI' if parameter not in self . _by : self . _populate ( by = parameter ) return self . _by [ parameter ]
A dict of CLBs with UPI as key
9,978
def dom_id ( self ) : parameter = 'DOMID' if parameter not in self . _by : self . _populate ( by = parameter ) return self . _by [ parameter ]
A dict of CLBs with DOM ID as key
9,979
def base ( self , du ) : parameter = 'base' if parameter not in self . _by : self . _by [ parameter ] = { } for clb in self . upi . values ( ) : if clb . floor == 0 : self . _by [ parameter ] [ clb . du ] = clb return self . _by [ parameter ] [ du ]
Return the base CLB for a given DU
9,980
def get_results ( self , stream , time_interval ) : query = stream . stream_id . as_raw ( ) query [ 'datetime' ] = { '$gt' : time_interval . start , '$lte' : time_interval . end } with switch_db ( StreamInstanceModel , 'hyperstream' ) : for instance in StreamInstanceModel . objects ( __raw__ = query ) : yield StreamInstance ( timestamp = instance . datetime , value = instance . value )
Get the results for a given stream
9,981
def seek_to_packet ( self , index ) : pointer_position = self . packet_positions [ index ] self . blob_file . seek ( pointer_position , 0 )
Move file pointer to the packet with given index .
9,982
def next_blob ( self ) : try : length = struct . unpack ( '<i' , self . blob_file . read ( 4 ) ) [ 0 ] except struct . error : raise StopIteration header = CLBHeader ( file_obj = self . blob_file ) blob = { 'CLBHeader' : header } remaining_length = length - header . size pmt_data = [ ] pmt_raw_data = self . blob_file . read ( remaining_length ) pmt_raw_data_io = BytesIO ( pmt_raw_data ) for _ in range ( int ( remaining_length / 6 ) ) : channel_id , time , tot = struct . unpack ( '>cic' , pmt_raw_data_io . read ( 6 ) ) pmt_data . append ( PMTData ( ord ( channel_id ) , time , ord ( tot ) ) ) blob [ 'PMTData' ] = pmt_data blob [ 'PMTRawData' ] = pmt_raw_data return blob
Generate next blob in file
9,983
def getKendallTauScore ( myResponse , otherResponse ) : kt = 0 list1 = myResponse . values ( ) list2 = otherResponse . values ( ) if len ( list1 ) <= 1 : return kt for itr1 in range ( 0 , len ( list1 ) - 1 ) : for itr2 in range ( itr1 + 1 , len ( list2 ) ) : if ( ( list1 [ itr1 ] > list1 [ itr2 ] and list2 [ itr1 ] < list2 [ itr2 ] ) or ( list1 [ itr1 ] < list1 [ itr2 ] and list2 [ itr1 ] > list2 [ itr2 ] ) ) : kt += 1 kt = ( kt * 2 ) / ( len ( list1 ) * ( len ( list1 ) - 1 ) ) return kt
Returns the Kendall Tau Score
9,984
def getCandScoresMap ( self , profile ) : elecType = profile . getElecType ( ) if elecType != "soc" and elecType != "toc" : print ( "ERROR: unsupported election type" ) exit ( ) candScoresMap = dict ( ) for cand in profile . candMap . keys ( ) : candScoresMap [ cand ] = 0.0 rankMaps = profile . getRankMaps ( ) rankMapCounts = profile . getPreferenceCounts ( ) scoringVector = self . getScoringVector ( profile ) for i in range ( 0 , len ( rankMaps ) ) : rankMap = rankMaps [ i ] rankMapCount = rankMapCounts [ i ] for cand in rankMap . keys ( ) : candScoresMap [ cand ] += scoringVector [ rankMap [ cand ] - 1 ] * rankMapCount return candScoresMap
Returns a dictonary that associates the integer representation of each candidate with the score they recieved in the profile .
9,985
def getMov ( self , profile ) : import mov return mov . MoVScoring ( profile , self . getScoringVector ( profile ) )
Returns an integer that is equal to the margin of victory of the election profile .
9,986
def getCandScoresMap ( self , profile ) : elecType = profile . getElecType ( ) if elecType != "soc" and elecType != "toc" : print ( "ERROR: unsupported profile type" ) exit ( ) bucklinScores = dict ( ) rankMaps = profile . getRankMaps ( ) preferenceCounts = profile . getPreferenceCounts ( ) for cand in profile . candMap . keys ( ) : numTimesRanked = 0 for t in range ( 1 , profile . numCands + 1 ) : for i in range ( 0 , len ( rankMaps ) ) : if ( rankMaps [ i ] [ cand ] == t ) : numTimesRanked += preferenceCounts [ i ] if numTimesRanked >= math . ceil ( float ( profile . numVoters ) / 2 ) : bucklinScores [ cand ] = t break return bucklinScores
Returns a dictionary that associates integer representations of each candidate with their Bucklin score .
9,987
def getCandScoresMap ( self , profile ) : elecType = profile . getElecType ( ) if elecType != "soc" and elecType != "toc" : print ( "ERROR: unsupported election type" ) exit ( ) wmg = profile . getWmg ( ) maximinScores = dict ( ) for cand in wmg . keys ( ) : maximinScores [ cand ] = float ( "inf" ) for cand1 , cand2 in itertools . combinations ( wmg . keys ( ) , 2 ) : if cand2 in wmg [ cand1 ] . keys ( ) : maximinScores [ cand1 ] = min ( maximinScores [ cand1 ] , wmg [ cand1 ] [ cand2 ] ) maximinScores [ cand2 ] = min ( maximinScores [ cand2 ] , wmg [ cand2 ] [ cand1 ] ) return maximinScores
Returns a dictionary that associates integer representations of each candidate with their maximin score .
9,988
def computeStrongestPaths ( self , profile , pairwisePreferences ) : cands = profile . candMap . keys ( ) numCands = len ( cands ) strongestPaths = dict ( ) for cand in cands : strongestPaths [ cand ] = dict ( ) for i in range ( 1 , numCands + 1 ) : for j in range ( 1 , numCands + 1 ) : if ( i == j ) : continue if pairwisePreferences [ i ] [ j ] > pairwisePreferences [ j ] [ i ] : strongestPaths [ i ] [ j ] = pairwisePreferences [ i ] [ j ] else : strongestPaths [ i ] [ j ] = 0 for i in range ( 1 , numCands + 1 ) : for j in range ( 1 , numCands + 1 ) : if ( i == j ) : continue for k in range ( 1 , numCands + 1 ) : if ( i == k or j == k ) : continue strongestPaths [ j ] [ k ] = max ( strongestPaths [ j ] [ k ] , min ( strongestPaths [ j ] [ i ] , strongestPaths [ i ] [ k ] ) ) return strongestPaths
Returns a two - dimensional dictionary that associates every pair of candidates cand1 and cand2 with the strongest path from cand1 to cand2 .
9,989
def computePairwisePreferences ( self , profile ) : cands = profile . candMap . keys ( ) pairwisePreferences = dict ( ) for cand in cands : pairwisePreferences [ cand ] = dict ( ) for cand1 in cands : for cand2 in cands : if cand1 != cand2 : pairwisePreferences [ cand1 ] [ cand2 ] = 0 for preference in profile . preferences : wmgMap = preference . wmgMap for cand1 , cand2 in itertools . combinations ( cands , 2 ) : if cand1 not in wmgMap . keys ( ) : if cand2 in wmgMap . keys ( ) : pairwisePreferences [ cand2 ] [ cand1 ] += 1 * preference . count elif cand2 not in wmgMap . keys ( ) : if cand1 in wmgMap . keys ( ) : pairwisePreferences [ cand1 ] [ cand2 ] += 1 * preference . count elif wmgMap [ cand1 ] [ cand2 ] == 1 : pairwisePreferences [ cand1 ] [ cand2 ] += 1 * preference . count elif wmgMap [ cand1 ] [ cand2 ] == - 1 : pairwisePreferences [ cand2 ] [ cand1 ] += 1 * preference . count return pairwisePreferences
Returns a two - dimensional dictionary that associates every pair of candidates cand1 and cand2 with number of voters who prefer cand1 to cand2 .
9,990
def getCandScoresMap ( self , profile ) : cands = profile . candMap . keys ( ) pairwisePreferences = self . computePairwisePreferences ( profile ) strongestPaths = self . computeStrongestPaths ( profile , pairwisePreferences ) betterCount = dict ( ) for cand in cands : betterCount [ cand ] = 0 for cand1 in cands : for cand2 in cands : if cand1 == cand2 : continue if strongestPaths [ cand1 ] [ cand2 ] >= strongestPaths [ cand2 ] [ cand1 ] : betterCount [ cand1 ] += 1 return betterCount
Returns a dictionary that associates integer representations of each candidate with the number of other candidates for which her strongest path to the other candidate is greater than the other candidate s stronget path to her .
9,991
def STVsocwinners ( self , profile ) : ordering = profile . getOrderVectors ( ) prefcounts = profile . getPreferenceCounts ( ) m = profile . numCands if min ( ordering [ 0 ] ) == 0 : startstate = set ( range ( m ) ) else : startstate = set ( range ( 1 , m + 1 ) ) ordering , startstate = self . preprocessing ( ordering , prefcounts , m , startstate ) m_star = len ( startstate ) known_winners = set ( ) hashtable2 = set ( ) root = Node ( value = startstate ) stackNode = [ ] stackNode . append ( root ) while stackNode : node = stackNode . pop ( ) state = node . value . copy ( ) if len ( state ) == 1 and list ( state ) [ 0 ] not in known_winners : known_winners . add ( list ( state ) [ 0 ] ) continue if state <= known_winners : continue plural_score = self . get_plurality_scores3 ( prefcounts , ordering , state , m_star ) minscore = min ( plural_score . values ( ) ) for to_be_deleted in state : if plural_score [ to_be_deleted ] == minscore : child_state = state . copy ( ) child_state . remove ( to_be_deleted ) tpc = tuple ( sorted ( child_state ) ) if tpc in hashtable2 : continue else : hashtable2 . add ( tpc ) child_node = Node ( value = child_state ) stackNode . append ( child_node ) return sorted ( known_winners )
Returns an integer list that represents all possible winners of a profile under STV rule .
9,992
def baldwinsoc_winners ( self , profile ) : ordering = profile . getOrderVectors ( ) m = profile . numCands prefcounts = profile . getPreferenceCounts ( ) if min ( ordering [ 0 ] ) == 0 : startstate = set ( range ( m ) ) else : startstate = set ( range ( 1 , m + 1 ) ) wmg = self . getWmg2 ( prefcounts , ordering , startstate , normalize = False ) known_winners = set ( ) hashtable2 = set ( ) root = Node ( value = startstate ) stackNode = [ ] stackNode . append ( root ) while stackNode : node = stackNode . pop ( ) state = node . value . copy ( ) if len ( state ) == 1 and list ( state ) [ 0 ] not in known_winners : known_winners . add ( list ( state ) [ 0 ] ) continue if state <= known_winners : continue plural_score = dict ( ) for cand in state : plural_score [ cand ] = 0 for cand1 , cand2 in itertools . permutations ( state , 2 ) : plural_score [ cand1 ] += wmg [ cand1 ] [ cand2 ] minscore = min ( plural_score . values ( ) ) for to_be_deleted in state : if plural_score [ to_be_deleted ] == minscore : child_state = state . copy ( ) child_state . remove ( to_be_deleted ) tpc = tuple ( sorted ( child_state ) ) if tpc in hashtable2 : continue else : hashtable2 . add ( tpc ) child_node = Node ( value = child_state ) stackNode . append ( child_node ) return sorted ( known_winners )
Returns an integer list that represents all possible winners of a profile under baldwin rule .
9,993
def getWmg2 ( self , prefcounts , ordering , state , normalize = False ) : wmgMap = dict ( ) for cand in state : wmgMap [ cand ] = dict ( ) for cand1 , cand2 in itertools . combinations ( state , 2 ) : wmgMap [ cand1 ] [ cand2 ] = 0 wmgMap [ cand2 ] [ cand1 ] = 0 for i in range ( 0 , len ( prefcounts ) ) : for cand1 , cand2 in itertools . combinations ( ordering [ i ] , 2 ) : wmgMap [ cand1 ] [ cand2 ] += prefcounts [ i ] if normalize == True : maxEdge = float ( '-inf' ) for cand in wmgMap . keys ( ) : maxEdge = max ( maxEdge , max ( wmgMap [ cand ] . values ( ) ) ) for cand1 in wmgMap . keys ( ) : for cand2 in wmgMap [ cand1 ] . keys ( ) : wmgMap [ cand1 ] [ cand2 ] = float ( wmgMap [ cand1 ] [ cand2 ] ) / maxEdge return wmgMap
Generate a weighted majority graph that represents the whole profile . The function will return a two - dimensional dictionary that associates integer representations of each pair of candidates cand1 and cand2 with the number of times cand1 is ranked above cand2 minus the number of times cand2 is ranked above cand1 .
9,994
def PluRunOff_single_winner ( self , profile ) : elecType = profile . getElecType ( ) if elecType != "soc" and elecType != "toc" and elecType != "csv" : print ( "ERROR: unsupported election type" ) exit ( ) prefcounts = profile . getPreferenceCounts ( ) len_prefcounts = len ( prefcounts ) rankmaps = profile . getRankMaps ( ) ranking = MechanismPlurality ( ) . getRanking ( profile ) max_cand = ranking [ 0 ] [ 0 ] [ 0 ] if len ( ranking [ 0 ] [ 0 ] ) > 1 : second_max_cand = ranking [ 0 ] [ 0 ] [ 1 ] else : second_max_cand = ranking [ 0 ] [ 1 ] [ 0 ] top_2 = [ max_cand , second_max_cand ] dict_top2 = { max_cand : 0 , second_max_cand : 0 } for i in range ( len_prefcounts ) : vote_top2 = { key : value for key , value in rankmaps [ i ] . items ( ) if key in top_2 } top_position = min ( vote_top2 . values ( ) ) keys = [ x for x in vote_top2 . keys ( ) if vote_top2 [ x ] == top_position ] for key in keys : dict_top2 [ key ] += prefcounts [ i ] winner = max ( dict_top2 . items ( ) , key = lambda x : x [ 1 ] ) [ 0 ] return winner
Returns a number that associates the winner of a profile under Plurality with Runoff rule .
9,995
def PluRunOff_cowinners ( self , profile ) : elecType = profile . getElecType ( ) if elecType != "soc" and elecType != "toc" and elecType != "csv" : print ( "ERROR: unsupported election type" ) exit ( ) prefcounts = profile . getPreferenceCounts ( ) len_prefcounts = len ( prefcounts ) rankmaps = profile . getRankMaps ( ) ranking = MechanismPlurality ( ) . getRanking ( profile ) known_winners = set ( ) top_2_combinations = [ ] if len ( ranking [ 0 ] [ 0 ] ) > 1 : for cand1 , cand2 in itertools . combinations ( ranking [ 0 ] [ 0 ] , 2 ) : top_2_combinations . append ( [ cand1 , cand2 ] ) else : max_cand = ranking [ 0 ] [ 0 ] [ 0 ] if len ( ranking [ 0 ] [ 1 ] ) > 1 : for second_max_cand in ranking [ 0 ] [ 1 ] : top_2_combinations . append ( [ max_cand , second_max_cand ] ) else : second_max_cand = ranking [ 0 ] [ 1 ] [ 0 ] top_2_combinations . append ( [ max_cand , second_max_cand ] ) for top_2 in top_2_combinations : dict_top2 = { top_2 [ 0 ] : 0 , top_2 [ 1 ] : 0 } for i in range ( len_prefcounts ) : vote_top2 = { key : value for key , value in rankmaps [ i ] . items ( ) if key in top_2 } top_position = min ( vote_top2 . values ( ) ) keys = [ x for x in vote_top2 . keys ( ) if vote_top2 [ x ] == top_position ] for key in keys : dict_top2 [ key ] += prefcounts [ i ] max_value = max ( dict_top2 . values ( ) ) winners = [ y for y in dict_top2 . keys ( ) if dict_top2 [ y ] == max_value ] known_winners = known_winners | set ( winners ) return sorted ( known_winners )
Returns a list that associates all the winners of a profile under Plurality with Runoff rule .
9,996
def SNTV_winners ( self , profile , K ) : elecType = profile . getElecType ( ) if elecType != "soc" and elecType != "toc" and elecType != "csv" : print ( "ERROR: unsupported election type" ) exit ( ) m = profile . numCands candScoresMap = MechanismPlurality ( ) . getCandScoresMap ( profile ) if K >= m : return list ( candScoresMap . keys ( ) ) sorted_items = sorted ( candScoresMap . items ( ) , key = lambda x : x [ 1 ] , reverse = True ) sorted_dict = { key : value for key , value in sorted_items } winners = list ( sorted_dict . keys ( ) ) [ 0 : K ] return winners
Returns a list that associates all the winners of a profile under Single non - transferable vote rule .
9,997
def Borda_mean_winners ( self , profile ) : n_candidates = profile . numCands prefcounts = profile . getPreferenceCounts ( ) len_prefcounts = len ( prefcounts ) rankmaps = profile . getRankMaps ( ) values = zeros ( [ len_prefcounts , n_candidates ] , dtype = int ) if min ( list ( rankmaps [ 0 ] . keys ( ) ) ) == 0 : delta = 0 else : delta = 1 for i in range ( len_prefcounts ) : for j in range ( delta , n_candidates + delta ) : values [ i ] [ j - delta ] = rankmaps [ i ] [ j ] mat0 = self . _build_mat ( values , n_candidates , prefcounts ) borda = [ 0 for i in range ( n_candidates ) ] for i in range ( n_candidates ) : borda [ i ] = sum ( [ mat0 [ i , j ] for j in range ( n_candidates ) ] ) borda_mean = mean ( borda ) bin_winners_list = [ int ( borda [ i ] >= borda_mean ) for i in range ( n_candidates ) ] return bin_winners_list
Returns a list that associates all the winners of a profile under The Borda - mean rule .
9,998
def apply_t0 ( self , hits ) : if HAVE_NUMBA : apply_t0_nb ( hits . time , hits . dom_id , hits . channel_id , self . _lookup_tables ) else : n = len ( hits ) cal = np . empty ( n ) lookup = self . _calib_by_dom_and_channel for i in range ( n ) : calib = lookup [ hits [ 'dom_id' ] [ i ] ] [ hits [ 'channel_id' ] [ i ] ] cal [ i ] = calib [ 6 ] hits . time += cal return hits
Apply only t0s
9,999
def _get_file_index_str ( self ) : file_index = str ( self . file_index ) if self . n_digits is not None : file_index = file_index . zfill ( self . n_digits ) return file_index
Create a string out of the current file_index