idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
10,000
def prepare_blobs ( self ) : self . raw_header = self . extract_header ( ) if self . cache_enabled : self . _cache_offsets ( )
Populate the blobs
10,001
def extract_header ( self ) : self . log . info ( "Extracting the header" ) raw_header = self . raw_header = defaultdict ( list ) first_line = self . blob_file . readline ( ) first_line = try_decode_string ( first_line ) self . blob_file . seek ( 0 , 0 ) if not first_line . startswith ( str ( 'start_run' ) ) : self . log . warning ( "No header found." ) return raw_header for line in iter ( self . blob_file . readline , '' ) : line = try_decode_string ( line ) line = line . strip ( ) try : tag , value = str ( line ) . split ( ':' ) except ValueError : continue raw_header [ tag ] . append ( str ( value ) . split ( ) ) if line . startswith ( str ( 'end_event:' ) ) : self . _record_offset ( ) if self . _auto_parse and 'physics' in raw_header : parsers = [ p [ 0 ] . lower ( ) for p in raw_header [ 'physics' ] ] self . _register_parsers ( parsers ) return raw_header raise ValueError ( "Incomplete header, no 'end_event' tag found!" )
Create a dictionary with the EVT header information
10,002
def get_blob ( self , index ) : self . log . info ( "Retrieving blob #{}" . format ( index ) ) if index > len ( self . event_offsets ) - 1 : self . log . info ( "Index not in cache, caching offsets" ) self . _cache_offsets ( index , verbose = False ) self . blob_file . seek ( self . event_offsets [ index ] , 0 ) blob = self . _create_blob ( ) if blob is None : self . log . info ( "Empty blob created..." ) raise IndexError else : self . log . debug ( "Applying parsers..." ) for parser in self . parsers : parser ( blob ) self . log . debug ( "Returning the blob" ) return blob
Return a blob with the event at the given index
10,003
def process ( self , blob = None ) : try : blob = self . get_blob ( self . index ) except IndexError : self . log . info ( "Got an IndexError, trying the next file" ) if ( self . basename or self . filenames ) and self . file_index < self . index_stop : self . file_index += 1 self . log . info ( "Now at file_index={}" . format ( self . file_index ) ) self . _reset ( ) self . blob_file . close ( ) self . log . info ( "Resetting blob index to 0" ) self . index = 0 file_index = self . _get_file_index_str ( ) if self . filenames : self . filename = self . filenames [ self . file_index - 1 ] elif self . basename : self . filename = "{}{}{}.evt" . format ( self . basename , file_index , self . suffix ) self . log . info ( "Next filename: {}" . format ( self . filename ) ) self . print ( "Opening {0}" . format ( self . filename ) ) self . open_file ( self . filename ) self . prepare_blobs ( ) try : blob = self . get_blob ( self . index ) except IndexError : self . log . warning ( "No blob found in file {}" . format ( self . filename ) ) else : return blob self . log . info ( "No files left, terminating the pipeline" ) raise StopIteration self . index += 1 return blob
Pump the next blob to the modules
10,004
def _cache_offsets ( self , up_to_index = None , verbose = True ) : if not up_to_index : if verbose : self . print ( "Caching event file offsets, this may take a bit." ) self . blob_file . seek ( 0 , 0 ) self . event_offsets = [ ] if not self . raw_header : self . event_offsets . append ( 0 ) else : self . blob_file . seek ( self . event_offsets [ - 1 ] , 0 ) for line in iter ( self . blob_file . readline , '' ) : line = try_decode_string ( line ) if line . startswith ( 'end_event:' ) : self . _record_offset ( ) if len ( self . event_offsets ) % 100 == 0 : if verbose : print ( '.' , end = '' ) sys . stdout . flush ( ) if up_to_index and len ( self . event_offsets ) >= up_to_index + 1 : return self . event_offsets . pop ( ) if not up_to_index : self . whole_file_cached = True self . print ( "\n{0} events indexed." . format ( len ( self . event_offsets ) ) )
Cache all event offsets .
10,005
def _record_offset ( self ) : offset = self . blob_file . tell ( ) self . event_offsets . append ( offset )
Stores the current file pointer position
10,006
def _create_blob ( self ) : blob = None for line in self . blob_file : line = try_decode_string ( line ) line = line . strip ( ) if line == '' : self . log . info ( "Ignoring empty line..." ) continue if line . startswith ( 'end_event:' ) and blob : blob [ 'raw_header' ] = self . raw_header return blob try : tag , values = line . split ( ':' ) except ValueError : self . log . warning ( "Ignoring corrupt line: {}" . format ( line ) ) continue try : values = tuple ( split ( values . strip ( ) , callback = float ) ) except ValueError : self . log . info ( "Empty value: {}" . format ( values ) ) if line . startswith ( 'start_event:' ) : blob = Blob ( ) blob [ tag ] = tuple ( int ( v ) for v in values ) continue if tag not in blob : blob [ tag ] = [ ] blob [ tag ] . append ( values )
Parse the next event from the current file position
10,007
def runserver ( project_name ) : DIR = os . listdir ( project_name ) if 'settings.py' not in DIR : raise NotImplementedError ( 'No file called: settings.py found in %s' % project_name ) CGI_BIN_FOLDER = os . path . join ( project_name , 'cgi' , 'cgi-bin' ) CGI_FOLDER = os . path . join ( project_name , 'cgi' ) if not os . path . exists ( CGI_BIN_FOLDER ) : os . makedirs ( CGI_BIN_FOLDER ) os . chdir ( CGI_FOLDER ) subprocess . Popen ( "python -m http.server --cgi 8000" )
Runs a python cgi server in a subprocess .
10,008
def getUtility ( self , decision , sample , aggregationMode = "avg" ) : utilities = self . getUtilities ( decision , sample ) if aggregationMode == "avg" : utility = numpy . mean ( utilities ) elif aggregationMode == "min" : utility = min ( utilities ) elif aggregationMode == "max" : utility = max ( utilities ) else : print ( "ERROR: aggregation mode not recognized" ) exit ( ) return utility
Get the utility of a given decision given a preference .
10,009
def getUtilities ( self , decision , orderVector ) : scoringVector = self . getScoringVector ( orderVector ) utilities = [ ] for alt in decision : altPosition = orderVector . index ( alt ) utility = float ( scoringVector [ altPosition ] ) if self . isLoss == True : utility = - 1 * utility utilities . append ( utility ) return utilities
Returns a floats that contains the utilities of every candidate in the decision .
10,010
def getUtilities ( self , decision , binaryRelations ) : m = len ( binaryRelations ) utilities = [ ] for cand in decision : tops = [ cand - 1 ] index = 0 while index < len ( tops ) : s = tops [ index ] for j in range ( m ) : if j == s : continue if binaryRelations [ j ] [ s ] > 0 : if j not in tops : tops . append ( j ) index += 1 if len ( tops ) <= self . k : if self . isLoss == False : utilities . append ( 1.0 ) elif self . isLoss == True : utilities . append ( - 1.0 ) else : utilities . append ( 0.0 ) return utilities
Returns a floats that contains the utilities of every candidate in the decision . This was adapted from code written by Lirong Xia .
10,011
def db_credentials ( self ) : try : username = self . config . get ( 'DB' , 'username' ) password = self . config . get ( 'DB' , 'password' ) except Error : username = input ( "Please enter your KM3NeT DB username: " ) password = getpass . getpass ( "Password: " ) return username , password
Return username and password for the KM3NeT WebDB .
10,012
def get_path ( src ) : res = None while not res : if res is False : print ( colored ( 'You must provide a path to an existing directory!' , 'red' ) ) print ( 'You need a local clone or release of (a fork of) ' 'https://github.com/{0}' . format ( src ) ) res = input ( colored ( 'Local path to {0}: ' . format ( src ) , 'green' , attrs = [ 'blink' ] ) ) if res and Path ( res ) . exists ( ) : return Path ( res ) . resolve ( ) res = False
Prompts the user to input a local path .
10,013
def execute_all ( self ) : for workflow_id in self . workflows : if self . workflows [ workflow_id ] . online : for interval in self . workflows [ workflow_id ] . requested_intervals : logging . info ( "Executing workflow {} over interval {}" . format ( workflow_id , interval ) ) self . workflows [ workflow_id ] . execute ( interval )
Execute all workflows
10,014
def execute ( self , sources , sink , interval , alignment_stream = None ) : if not isinstance ( interval , TimeInterval ) : raise TypeError ( 'Expected TimeInterval, got {}' . format ( type ( interval ) ) ) if interval . end > sink . channel . up_to_timestamp : raise StreamNotAvailableError ( sink . channel . up_to_timestamp ) required_intervals = TimeIntervals ( [ interval ] ) - sink . calculated_intervals if not required_intervals . is_empty : document_count = 0 for interval in required_intervals : for stream_instance in self . _execute ( sources = sources , alignment_stream = alignment_stream , interval = interval ) : sink . writer ( stream_instance ) document_count += 1 sink . calculated_intervals += interval required_intervals = TimeIntervals ( [ interval ] ) - sink . calculated_intervals if not required_intervals . is_empty : logging . error ( "{} execution error for time interval {} on stream {}" . format ( self . name , interval , sink ) ) if not document_count : logging . debug ( "{} did not produce any data for time interval {} on stream {}" . format ( self . name , interval , sink ) ) self . write_to_history ( interval = interval , tool = self . name , document_count = document_count )
Execute the tool over the given time interval . If an alignment stream is given the output instances will be aligned to this stream
10,015
def create_stream ( self , stream_id , sandbox = None ) : if stream_id in self . streams : raise StreamAlreadyExistsError ( "Stream with id '{}' already exists" . format ( stream_id ) ) if sandbox is not None : raise ValueError ( "Cannot use sandboxes with memory streams" ) stream = Stream ( channel = self , stream_id = stream_id , calculated_intervals = None , sandbox = None ) self . streams [ stream_id ] = stream self . data [ stream_id ] = StreamInstanceCollection ( ) return stream
Must be overridden by deriving classes must create the stream according to the tool and return its unique identifier stream_id
10,016
def purge_all ( self , remove_definitions = False ) : for stream_id in list ( self . streams . keys ( ) ) : self . purge_stream ( stream_id , remove_definition = remove_definitions )
Clears all streams in the channel - use with caution!
10,017
def update_state ( self , up_to_timestamp ) : for stream_id in self . streams : self . streams [ stream_id ] . calculated_intervals = TimeIntervals ( [ ( MIN_DATE , up_to_timestamp ) ] ) self . up_to_timestamp = up_to_timestamp
Call this function to ensure that the channel is up to date at the time of timestamp . I . e . all the streams that have been created before or at that timestamp are calculated exactly until up_to_timestamp .
10,018
def compile_regex ( self , pattern , flags = 0 ) : pattern_re = regex . compile ( '(?P<substr>%\{(?P<fullname>(?P<patname>\w+)(?::(?P<subname>\w+))?)\})' ) while 1 : matches = [ md . groupdict ( ) for md in pattern_re . finditer ( pattern ) ] if len ( matches ) == 0 : break for md in matches : if md [ 'patname' ] in self . pattern_dict : if md [ 'subname' ] : if '(?P<' in self . pattern_dict [ md [ 'patname' ] ] : repl = regex . sub ( '\(\?P<(\w+)>' , '(?P<%s>' % md [ 'subname' ] , self . pattern_dict [ md [ 'patname' ] ] , 1 ) else : repl = '(?P<%s>%s)' % ( md [ 'subname' ] , self . pattern_dict [ md [ 'patname' ] ] ) else : repl = self . pattern_dict [ md [ 'patname' ] ] pattern = pattern . replace ( md [ 'substr' ] , repl ) else : return return regex . compile ( pattern , flags )
Compile regex from pattern and pattern_dict
10,019
def _load_patterns ( self , folders , pattern_dict = None ) : if pattern_dict is None : pattern_dict = { } for folder in folders : for file in os . listdir ( folder ) : if regex . match ( '^[\w-]+$' , file ) : self . _load_pattern_file ( os . path . join ( folder , file ) , pattern_dict ) return pattern_dict
Load all pattern from all the files in folders
10,020
def load_pkl ( filenames ) : if not isinstance ( filenames , ( list , tuple ) ) : filenames = [ filenames ] times = [ ] for name in filenames : name = str ( name ) with open ( name , 'rb' ) as file : loaded_obj = pickle . load ( file ) if not isinstance ( loaded_obj , Times ) : raise TypeError ( "At least one loaded object is not a Times data object." ) times . append ( loaded_obj ) return times if len ( times ) > 1 else times [ 0 ]
Unpickle file contents .
10,021
async def retrieve ( self , url , ** kwargs ) : try : async with self . websession . request ( 'GET' , url , ** kwargs ) as res : if res . status != 200 : raise Exception ( "Could not retrieve information from API" ) if res . content_type == 'application/json' : return await res . json ( ) return await res . text ( ) except aiohttp . ClientError as err : logging . error ( err )
Issue API requests .
10,022
def _to_number ( cls , string ) : try : if float ( string ) - int ( string ) == 0 : return int ( string ) return float ( string ) except ValueError : try : return float ( string ) except ValueError : return string
Convert string to int or float .
10,023
async def stations ( self ) : data = await self . retrieve ( API_DISTRITS ) Station = namedtuple ( 'Station' , [ 'latitude' , 'longitude' , 'idAreaAviso' , 'idConselho' , 'idDistrito' , 'idRegiao' , 'globalIdLocal' , 'local' ] ) _stations = [ ] for station in data [ 'data' ] : _station = Station ( self . _to_number ( station [ 'latitude' ] ) , self . _to_number ( station [ 'longitude' ] ) , station [ 'idAreaAviso' ] , station [ 'idConcelho' ] , station [ 'idDistrito' ] , station [ 'idRegiao' ] , station [ 'globalIdLocal' ] // 100 * 100 , station [ 'local' ] , ) _stations . append ( _station ) return _stations
Retrieve stations .
10,024
async def weather_type_classe ( self ) : data = await self . retrieve ( url = API_WEATHER_TYPE ) self . weather_type = dict ( ) for _type in data [ 'data' ] : self . weather_type [ _type [ 'idWeatherType' ] ] = _type [ 'descIdWeatherTypePT' ] return self . weather_type
Retrieve translation for weather type .
10,025
async def wind_type_classe ( self ) : data = await self . retrieve ( url = API_WIND_TYPE ) self . wind_type = dict ( ) for _type in data [ 'data' ] : self . wind_type [ int ( _type [ 'classWindSpeed' ] ) ] = _type [ 'descClassWindSpeedDailyPT' ] return self . wind_type
Retrieve translation for wind type .
10,026
def register ( self , plugin ) : for listener in plugin . listeners : self . listeners [ listener ] . add ( plugin ) self . plugins . add ( plugin ) plugin . messenger = self . messages plugin . start ( )
Add the plugin to our set of listeners for each message that it listens to tell it to use our messages Queue for communication and start it up .
10,027
def start ( self ) : self . recieve ( 'APP_START' ) self . alive = True while self . alive : message , payload = self . messages . get ( ) if message == 'APP_STOP' : for plugin in self . plugins : plugin . recieve ( 'SHUTDOWN' ) self . alive = False else : self . recieve ( message , payload )
Send APP_START to any plugins that listen for it and loop around waiting for messages and sending them to their listening plugins until it s time to shutdown .
10,028
def choose ( self , palette ) : try : self . _cycler = cycle ( self . colours [ palette ] ) except KeyError : raise KeyError ( "Chose one of the following colour palettes: {0}" . format ( self . available ) )
Pick a palette
10,029
def refresh_styles ( self ) : import matplotlib . pyplot as plt self . colours = { } for style in plt . style . available : try : style_colours = plt . style . library [ style ] [ 'axes.prop_cycle' ] self . colours [ style ] = [ c [ 'color' ] for c in list ( style_colours ) ] except KeyError : continue self . colours [ 'km3pipe' ] = [ "#ff7869" , "#4babe1" , "#96ad3e" , "#e4823d" , "#5d72b2" , "#e2a3c2" , "#fd9844" , "#e480e7" ]
Load all available styles
10,030
def get_file_object ( username , password , utc_start = None , utc_stop = None ) : if not utc_start : utc_start = datetime . now ( ) if not utc_stop : utc_stop = utc_start + timedelta ( days = 1 ) logging . info ( "Downloading schedules for username [%s] in range [%s] to " "[%s]." % ( username , utc_start , utc_stop ) ) replacements = { 'start_time' : utc_start . strftime ( '%Y-%m-%dT%H:%M:%SZ' ) , 'stop_time' : utc_stop . strftime ( '%Y-%m-%dT%H:%M:%SZ' ) } soap_message_xml = ( soap_message_xml_template % replacements ) authinfo = urllib2 . HTTPDigestAuthHandler ( ) authinfo . add_password ( realm , url , username , password ) try : request = urllib2 . Request ( url , soap_message_xml , request_headers ) response = urllib2 . build_opener ( authinfo ) . open ( request ) if response . headers [ 'Content-Encoding' ] == 'gzip' : response = GzipStream ( response ) except : logging . exception ( "Could not acquire connection to Schedules Direct." ) raise return response
Make the connection . Return a file - like object .
10,031
def process_file_object ( file_obj , importer , progress ) : logging . info ( "Processing schedule data." ) try : handler = XmlCallbacks ( importer , progress ) parser = sax . make_parser ( ) parser . setContentHandler ( handler ) parser . setErrorHandler ( handler ) parser . parse ( file_obj ) except : logging . exception ( "Parse failed." ) raise logging . info ( "Schedule data processed." )
Parse the data using the connected file - like object .
10,032
def parse_schedules ( username , password , importer , progress , utc_start = None , utc_stop = None ) : file_obj = get_file_object ( username , password , utc_start , utc_stop ) process_file_object ( file_obj , importer , progress )
A utility function to marry the connecting and reading functions .
10,033
def km3h5concat ( input_files , output_file , n_events = None , ** kwargs ) : from km3pipe import Pipeline from km3pipe . io import HDF5Pump , HDF5Sink pipe = Pipeline ( ) pipe . attach ( HDF5Pump , filenames = input_files , ** kwargs ) pipe . attach ( StatusBar , every = 250 ) pipe . attach ( HDF5Sink , filename = output_file , ** kwargs ) pipe . drain ( n_events )
Concatenate KM3HDF5 files via pipeline .
10,034
def get_data ( stream , parameters , fmt ) : sds = kp . db . StreamDS ( ) if stream not in sds . streams : log . error ( "Stream '{}' not found in the database." . format ( stream ) ) return params = { } if parameters : for parameter in parameters : if '=' not in parameter : log . error ( "Invalid parameter syntax '{}'\n" "The correct syntax is 'parameter=value'" . format ( parameter ) ) continue key , value = parameter . split ( '=' ) params [ key ] = value data = sds . get ( stream , fmt , ** params ) if data is not None : with pd . option_context ( 'display.max_rows' , None , 'display.max_columns' , None ) : print ( data ) else : sds . help ( stream )
Retrieve data for given stream and parameters or None if not found
10,035
def available_streams ( ) : sds = kp . db . StreamDS ( ) print ( "Available streams: " ) print ( ', ' . join ( sorted ( sds . streams ) ) )
Show a short list of available streams .
10,036
def upload_runsummary ( csv_filename , dryrun = False ) : print ( "Checking '{}' for consistency." . format ( csv_filename ) ) if not os . path . exists ( csv_filename ) : log . critical ( "{} -> file not found." . format ( csv_filename ) ) return try : df = pd . read_csv ( csv_filename , sep = '\t' ) except pd . errors . EmptyDataError as e : log . error ( e ) return cols = set ( df . columns ) if not REQUIRED_COLUMNS . issubset ( cols ) : log . error ( "Missing columns: {}." . format ( ', ' . join ( str ( c ) for c in REQUIRED_COLUMNS - cols ) ) ) return parameters = cols - REQUIRED_COLUMNS if len ( parameters ) < 1 : log . error ( "No parameter columns found." ) return if len ( df ) == 0 : log . critical ( "Empty dataset." ) return print ( "Found data for parameters: {}." . format ( ', ' . join ( str ( c ) for c in parameters ) ) ) print ( "Converting CSV data into JSON" ) if dryrun : log . warn ( "Dryrun: adding 'TEST_' prefix to parameter names" ) prefix = "TEST_" else : prefix = "" data = convert_runsummary_to_json ( df , prefix = prefix ) print ( "We have {:.3f} MB to upload." . format ( len ( data ) / 1024 ** 2 ) ) print ( "Requesting database session." ) db = kp . db . DBManager ( ) if kp . db . we_are_in_lyon ( ) : session_cookie = "sid=_kmcprod_134.158_lyo7783844001343100343mcprod1223user" else : session_cookie = kp . config . Config ( ) . get ( 'DB' , 'session_cookie' ) if session_cookie is None : raise SystemExit ( "Could not restore DB session." ) log . debug ( "Using the session cookie: {}" . format ( session_cookie ) ) cookie_key , sid = session_cookie . split ( '=' ) print ( "Uploading the data to the database." ) r = requests . post ( RUNSUMMARY_URL , cookies = { cookie_key : sid } , files = { 'datafile' : data } ) if r . status_code == 200 : log . debug ( "POST request status code: {}" . format ( r . status_code ) ) print ( "Database response:" ) db_answer = json . loads ( r . text ) for key , value in db_answer . items ( ) : print ( " -> {}: {}" . format ( key , value ) ) if db_answer [ 'Result' ] == 'OK' : print ( "Upload successful." ) else : log . critical ( "Something went wrong." ) else : log . error ( "POST request status code: {}" . format ( r . status_code ) ) log . critical ( "Something went wrong..." ) return
Reads the CSV file and uploads its contents to the runsummary table
10,037
def convert_runsummary_to_json ( df , comment = 'Uploaded via km3pipe.StreamDS' , prefix = 'TEST_' ) : data_field = [ ] comment += ", by {}" . format ( getpass . getuser ( ) ) for det_id , det_data in df . groupby ( 'det_id' ) : runs_field = [ ] data_field . append ( { "DetectorId" : det_id , "Runs" : runs_field } ) for run , run_data in det_data . groupby ( 'run' ) : parameters_field = [ ] runs_field . append ( { "Run" : int ( run ) , "Parameters" : parameters_field } ) parameter_dict = { } for row in run_data . itertuples ( ) : for parameter_name in run_data . columns : if parameter_name in REQUIRED_COLUMNS : continue if parameter_name not in parameter_dict : entry = { 'Name' : prefix + parameter_name , 'Data' : [ ] } parameter_dict [ parameter_name ] = entry data_value = getattr ( row , parameter_name ) try : data_value = float ( data_value ) except ValueError as e : log . critical ( "Data values has to be floats!" ) raise ValueError ( e ) value = { 'S' : str ( getattr ( row , 'source' ) ) , 'D' : data_value } parameter_dict [ parameter_name ] [ 'Data' ] . append ( value ) for parameter_data in parameter_dict . values ( ) : parameters_field . append ( parameter_data ) data_to_upload = { "Comment" : comment , "Data" : data_field } file_data_to_upload = json . dumps ( data_to_upload ) return file_data_to_upload
Convert a Pandas DataFrame with runsummary to JSON for DB upload
10,038
def calcAcceptanceRatio ( self , V , W ) : acceptanceRatio = 1.0 for comb in itertools . combinations ( V , 2 ) : vIOverJ = 1 wIOverJ = 1 if V . index ( comb [ 0 ] ) > V . index ( comb [ 1 ] ) : vIOverJ = 0 if W . index ( comb [ 0 ] ) > W . index ( comb [ 1 ] ) : wIOverJ = 0 acceptanceRatio = acceptanceRatio * self . phi ** ( self . wmg [ comb [ 0 ] ] [ comb [ 1 ] ] * ( vIOverJ - wIOverJ ) ) return acceptanceRatio
Given a order vector V and a proposed order vector W calculate the acceptance ratio for changing to W when using MCMC .
10,039
def getNextSample ( self , V ) : randPos = random . randint ( 0 , len ( V ) - 2 ) W = copy . deepcopy ( V ) d = V [ randPos ] c = V [ randPos + 1 ] W [ randPos ] = c W [ randPos + 1 ] = d prMW = 1 prMV = 1 prob = min ( 1.0 , ( prMW / prMV ) * pow ( self . phi , self . wmg [ d ] [ c ] ) ) / 2 if random . random ( ) <= prob : V = W return V
Generate the next sample by randomly flipping two adjacent candidates .
10,040
def getNextSample ( self , V ) : positions = range ( 0 , len ( self . wmg ) ) randPoss = random . sample ( positions , self . shuffleSize ) flipSet = copy . deepcopy ( randPoss ) randPoss . sort ( ) W = copy . deepcopy ( V ) for j in range ( 0 , self . shuffleSize ) : W [ randPoss [ j ] ] = V [ flipSet [ j ] ] prMW = 1.0 prMV = 1.0 acceptanceRatio = self . calcAcceptanceRatio ( V , W ) prob = min ( 1.0 , ( prMW / prMV ) * acceptanceRatio ) if random . random ( ) <= prob : V = W return V
Generate the next sample by randomly shuffling candidates .
10,041
def getNextSample ( self , V ) : phi = self . phi wmg = self . wmg W = [ ] W . append ( V [ 0 ] ) for j in range ( 2 , len ( V ) + 1 ) : randomSelect = random . random ( ) threshold = 0.0 denom = 1.0 for k in range ( 1 , j ) : denom = denom + phi ** k for k in range ( 1 , j + 1 ) : numerator = phi ** ( j - k ) threshold = threshold + numerator / denom if randomSelect <= threshold : W . insert ( k - 1 , V [ j - 1 ] ) break acceptanceRatio = self . calcAcceptanceRatio ( V , W ) prob = min ( 1.0 , acceptanceRatio ) if random . random ( ) <= prob : V = W return V
We generate a new ranking based on a Mallows - based jumping distribution . The algorithm is described in Bayesian Ordinal Peer Grading by Raman and Joachims .
10,042
def getNextSample ( self , V ) : W , WProb = self . drawRankingPlakettLuce ( V ) VProb = self . calcProbOfVFromW ( V , W ) acceptanceRatio = self . calcAcceptanceRatio ( V , W ) prob = min ( 1.0 , acceptanceRatio * ( VProb / WProb ) ) if random . random ( ) <= prob : V = W return V
Given a ranking over the candidates generate a new ranking by assigning each candidate at position i a Plakett - Luce weight of phi^i and draw a new ranking .
10,043
def calcDrawingProbs ( self ) : wmg = self . wmg phi = self . phi weights = [ ] for i in range ( 0 , len ( wmg . keys ( ) ) ) : weights . append ( phi ** i ) totalWeight = sum ( weights ) for i in range ( 0 , len ( wmg . keys ( ) ) ) : weights [ i ] = weights [ i ] / totalWeight return weights
Returns a vector that contains the probabily of an item being from each position . We say that every item in a order vector is drawn with weight phi^i where i is its position .
10,044
def drawRankingPlakettLuce ( self , rankList ) : probs = self . plakettLuceProbs numCands = len ( rankList ) newRanking = [ ] remainingCands = copy . deepcopy ( rankList ) probsCopy = copy . deepcopy ( self . plakettLuceProbs ) totalProb = sum ( probs ) prob = 1.0 while ( len ( newRanking ) < numCands ) : rand = random . random ( ) threshold = 0.0 for i in range ( 0 , len ( probsCopy ) ) : threshold = threshold + probsCopy [ i ] / totalProb if rand <= threshold : prob = prob * probsCopy [ i ] / totalProb newRanking . append ( remainingCands [ i ] ) remainingCands . pop ( i ) totalProb = totalProb - probsCopy [ i ] probsCopy . pop ( i ) break return newRanking , prob
Given an order vector over the candidates draw candidates to generate a new order vector .
10,045
def calcProbOfVFromW ( self , V , W ) : weights = range ( 0 , len ( V ) ) i = 0 for alt in W : weights [ alt - 1 ] = self . phi ** i i = i + 1 prob = 1.0 totalWeight = sum ( weights ) for alt in V : prob = prob * weights [ alt - 1 ] / totalWeight totalWeight = totalWeight - weights [ alt - 1 ] return prob
Given a order vector V and an order vector W calculate the probability that we generate V as our next sample if our current sample was W .
10,046
def get_hist ( rfile , histname , get_overflow = False ) : import root_numpy as rnp rfile = open_rfile ( rfile ) hist = rfile [ histname ] xlims = np . array ( list ( hist . xedges ( ) ) ) bin_values = rnp . hist2array ( hist , include_overflow = get_overflow ) rfile . close ( ) return bin_values , xlims
Read a 1D Histogram .
10,047
def interpol_hist2d ( h2d , oversamp_factor = 10 ) : from rootpy import ROOTError xlim = h2d . bins ( axis = 0 ) ylim = h2d . bins ( axis = 1 ) xn = h2d . nbins ( 0 ) yn = h2d . nbins ( 1 ) x = np . linspace ( xlim [ 0 ] , xlim [ 1 ] , xn * oversamp_factor ) y = np . linspace ( ylim [ 0 ] , ylim [ 1 ] , yn * oversamp_factor ) mat = np . zeros ( ( xn , yn ) ) for xi in range ( xn ) : for yi in range ( yn ) : try : mat [ xi , yi ] = h2d . interpolate ( x [ xi ] , y [ yi ] ) except ROOTError : continue return mat , x , y
Sample the interpolator of a root 2d hist .
10,048
def create_window ( size = None , samples = 16 , * , fullscreen = False , title = None , threaded = True ) -> Window : if size is None : width , height = 1280 , 720 else : width , height = size if samples < 0 or ( samples & ( samples - 1 ) ) != 0 : raise Exception ( 'Invalid number of samples: %d' % samples ) window = Window . __new__ ( Window ) window . wnd = glwnd . create_window ( width , height , samples , fullscreen , title , threaded ) return window
Create the main window .
10,049
def clear ( self , red = 0.0 , green = 0.0 , blue = 0.0 , alpha = 0.0 ) -> None : self . wnd . clear ( red , green , blue , alpha )
Clear the window .
10,050
def windowed ( self , size ) -> None : width , height = size self . wnd . windowed ( width , height )
Set the window to windowed mode .
10,051
def product_metadata ( product , dst_folder , counter = None , writers = [ file_writer ] , geometry_check = None ) : if not counter : counter = { 'products' : 0 , 'saved_tiles' : 0 , 'skipped_tiles' : 0 , 'skipped_tiles_paths' : [ ] } s3_url = 'http://sentinel-s2-l1c.s3.amazonaws.com' product_meta_link = '{0}/{1}' . format ( s3_url , product [ 'metadata' ] ) product_info = requests . get ( product_meta_link , stream = True ) product_metadata = metadata_to_dict ( product_info . raw ) product_metadata [ 'product_meta_link' ] = product_meta_link counter [ 'products' ] += 1 for tile in product [ 'tiles' ] : tile_info = requests . get ( '{0}/{1}' . format ( s3_url , tile ) ) try : metadata = tile_metadata ( tile_info . json ( ) , copy ( product_metadata ) , geometry_check ) for w in writers : w ( dst_folder , metadata ) logger . info ( 'Saving to disk: %s' % metadata [ 'tile_name' ] ) counter [ 'saved_tiles' ] += 1 except JSONDecodeError : logger . warning ( 'Tile: %s was not found and skipped' % tile ) counter [ 'skipped_tiles' ] += 1 counter [ 'skipped_tiles_paths' ] . append ( tile ) return counter
Extract metadata for a specific product
10,052
def daily_metadata ( year , month , day , dst_folder , writers = [ file_writer ] , geometry_check = None , num_worker_threads = 1 ) : threaded = False counter = { 'products' : 0 , 'saved_tiles' : 0 , 'skipped_tiles' : 0 , 'skipped_tiles_paths' : [ ] } if num_worker_threads > 1 : threaded = True queue = Queue ( ) year_dir = os . path . join ( dst_folder , str ( year ) ) month_dir = os . path . join ( year_dir , str ( month ) ) day_dir = os . path . join ( month_dir , str ( day ) ) product_list = get_products_metadata_path ( year , month , day ) logger . info ( 'There are %s products in %s-%s-%s' % ( len ( list ( iterkeys ( product_list ) ) ) , year , month , day ) ) for name , product in iteritems ( product_list ) : product_dir = os . path . join ( day_dir , name ) if threaded : queue . put ( [ product , product_dir , counter , writers , geometry_check ] ) else : counter = product_metadata ( product , product_dir , counter , writers , geometry_check ) if threaded : def worker ( ) : while not queue . empty ( ) : args = queue . get ( ) try : product_metadata ( * args ) except Exception : exc = sys . exc_info ( ) logger . error ( '%s tile skipped due to error: %s' % ( threading . current_thread ( ) . name , exc [ 1 ] . __str__ ( ) ) ) args [ 2 ] [ 'skipped_tiles' ] += 1 queue . task_done ( ) threads = [ ] for i in range ( num_worker_threads ) : t = threading . Thread ( target = worker ) t . start ( ) threads . append ( t ) queue . join ( ) return counter
Extra metadata for all products in a specific date
10,053
def range_metadata ( start , end , dst_folder , num_worker_threads = 0 , writers = [ file_writer ] , geometry_check = None ) : assert isinstance ( start , date ) assert isinstance ( end , date ) delta = end - start dates = [ ] for i in range ( delta . days + 1 ) : dates . append ( start + timedelta ( days = i ) ) days = len ( dates ) total_counter = { 'days' : days , 'products' : 0 , 'saved_tiles' : 0 , 'skipped_tiles' : 0 , 'skipped_tiles_paths' : [ ] } def update_counter ( counter ) : for key in iterkeys ( total_counter ) : if key in counter : total_counter [ key ] += counter [ key ] for d in dates : logger . info ( 'Getting metadata of {0}-{1}-{2}' . format ( d . year , d . month , d . day ) ) update_counter ( daily_metadata ( d . year , d . month , d . day , dst_folder , writers , geometry_check , num_worker_threads ) ) return total_counter
Extra metadata for all products in a date range
10,054
def get_on_tmdb ( uri , ** kwargs ) : kwargs [ 'api_key' ] = app . config [ 'TMDB_API_KEY' ] response = requests_session . get ( ( TMDB_API_URL + uri ) . encode ( 'utf8' ) , params = kwargs ) response . raise_for_status ( ) return json . loads ( response . text )
Get a resource on TMDB .
10,055
def search ( ) : redis_key = 's_%s' % request . args [ 'query' ] . lower ( ) cached = redis_ro_conn . get ( redis_key ) if cached : return Response ( cached ) else : try : found = get_on_tmdb ( u'/search/movie' , query = request . args [ 'query' ] ) movies = [ ] for movie in found [ 'results' ] : cast = get_on_tmdb ( u'/movie/%s/casts' % movie [ 'id' ] ) year = datetime . strptime ( movie [ 'release_date' ] , '%Y-%m-%d' ) . year if movie [ 'release_date' ] else None movies . append ( { 'title' : movie [ 'original_title' ] , 'directors' : [ x [ 'name' ] for x in cast [ 'crew' ] if x [ 'department' ] == 'Directing' and x [ 'job' ] == 'Director' ] , 'year' : year , '_tmdb_id' : movie [ 'id' ] } ) except requests . HTTPError as err : return Response ( 'TMDB API error: %s' % str ( err ) , status = err . response . status_code ) json_response = json . dumps ( { 'movies' : movies } ) redis_conn . setex ( redis_key , app . config [ 'CACHE_TTL' ] , json_response ) return Response ( json_response )
Search a movie on TMDB .
10,056
def get_movie ( tmdb_id ) : redis_key = 'm_%s' % tmdb_id cached = redis_ro_conn . get ( redis_key ) if cached : return Response ( cached ) else : try : details = get_on_tmdb ( u'/movie/%d' % tmdb_id ) cast = get_on_tmdb ( u'/movie/%d/casts' % tmdb_id ) alternative = get_on_tmdb ( u'/movie/%d/alternative_titles' % tmdb_id ) except requests . HTTPError as err : return Response ( 'TMDB API error: %s' % str ( err ) , status = err . response . status_code ) movie = { 'title' : details [ 'original_title' ] , 'score' : details [ 'popularity' ] , 'directors' : [ x [ 'name' ] for x in cast [ 'crew' ] if x [ 'department' ] == 'Directing' and x [ 'job' ] == 'Director' ] , 'writers' : [ x [ 'name' ] for x in cast [ 'crew' ] if x [ 'department' ] == 'Writing' ] , 'cast' : [ x [ 'name' ] for x in cast [ 'cast' ] ] , 'genres' : [ x [ 'name' ] for x in details [ 'genres' ] ] , 'countries' : [ x [ 'name' ] for x in details [ 'production_countries' ] ] , 'tmdb_votes' : int ( round ( details . get ( 'vote_average' , 0 ) * 0.5 ) ) , '_tmdb_id' : tmdb_id } if details . get ( 'release_date' ) : movie [ 'year' ] = datetime . strptime ( details [ 'release_date' ] , '%Y-%m-%d' ) . year if details . get ( 'belongs_to_collection' ) : movie [ 'collection' ] = details [ 'belongs_to_collection' ] [ 'name' ] for alt in alternative [ 'titles' ] : movie [ 'title_%s' % alt [ 'iso_3166_1' ] . lower ( ) ] = alt [ 'title' ] json_response = json . dumps ( { 'movie' : movie } ) redis_conn . setex ( redis_key , app . config [ 'CACHE_TTL' ] , json_response ) return Response ( json_response )
Get informations about a movie using its tmdb id .
10,057
def _handle_response_error ( self , response , retries , ** kwargs ) : r error = self . _convert_response_to_error ( response ) if error is None : return response max_retries = self . _max_retries_for_error ( error ) if max_retries is None or retries >= max_retries : return response backoff = min ( 0.0625 * 2 ** retries , 1.0 ) self . logger . warning ( "Sleeping for %r before retrying failed request..." , backoff ) time . sleep ( backoff ) retries += 1 self . logger . warning ( "Retrying failed request. Attempt %d/%d." , retries , max_retries ) return self . request ( retries = retries , ** kwargs )
r Provides a way for each connection wrapper to handle error responses .
10,058
def _convert_response_to_error ( self , response ) : content_type = response . headers . get ( "content-type" , "" ) if "application/x-protobuf" in content_type : self . logger . debug ( "Decoding protobuf response." ) data = status_pb2 . Status . FromString ( response . content ) status = self . _PB_ERROR_CODES . get ( data . code ) error = { "status" : status } return error elif "application/json" in content_type : self . logger . debug ( "Decoding json response." ) data = response . json ( ) error = data . get ( "error" ) if not error or not isinstance ( error , dict ) : self . logger . warning ( "Unexpected error response: %r" , data ) return None return error self . logger . warning ( "Unexpected response: %r" , response . text ) return None
Subclasses may override this method in order to influence how errors are parsed from the response .
10,059
def parse_pattern ( format_string , env , wrapper = lambda x , y : y ) : formatter = Formatter ( ) fields = [ x [ 1 ] for x in formatter . parse ( format_string ) if x [ 1 ] is not None ] prepared_env = { } for field in fields : for field_alt in ( x . strip ( ) for x in field . split ( '|' ) ) : if field_alt [ 0 ] in '\'"' and field_alt [ - 1 ] in '\'"' : field_values = field_alt [ 1 : - 1 ] else : field_values = env . get ( field_alt ) if field_values is not None : break else : field_values = [ ] if not isinstance ( field_values , list ) : field_values = [ field_values ] prepared_env [ field ] = wrapper ( field_alt , field_values ) return prepared_env
Parse the format_string and return prepared data according to the env .
10,060
def perc ( arr , p = 95 , ** kwargs ) : offset = ( 100 - p ) / 2 return np . percentile ( arr , ( offset , 100 - offset ) , ** kwargs )
Create symmetric percentiles with p coverage .
10,061
def resample_1d ( arr , n_out = None , random_state = None ) : if random_state is None : random_state = np . random . RandomState ( ) arr = np . atleast_1d ( arr ) n = len ( arr ) if n_out is None : n_out = n idx = random_state . randint ( 0 , n , size = n ) return arr [ idx ]
Resample an array with replacement .
10,062
def bootstrap_params ( rv_cont , data , n_iter = 5 , ** kwargs ) : fit_res = [ ] for _ in range ( n_iter ) : params = rv_cont . fit ( resample_1d ( data , ** kwargs ) ) fit_res . append ( params ) fit_res = np . array ( fit_res ) return fit_res
Bootstrap the fit params of a distribution .
10,063
def param_describe ( params , quant = 95 , axis = 0 ) : par = np . mean ( params , axis = axis ) lo , up = perc ( quant ) p_up = np . percentile ( params , up , axis = axis ) p_lo = np . percentile ( params , lo , axis = axis ) return par , p_lo , p_up
Get mean + quantile range from bootstrapped params .
10,064
def bootstrap_fit ( rv_cont , data , n_iter = 10 , quant = 95 , print_params = True , ** kwargs ) : fit_params = bootstrap_params ( rv_cont , data , n_iter ) par , lo , up = param_describe ( fit_params , quant = quant ) names = param_names ( rv_cont ) maxlen = max ( [ len ( s ) for s in names ] ) print ( "--------------" ) print ( rv_cont . name ) print ( "--------------" ) for i , name in enumerate ( names ) : print ( "{nam:>{fill}}: {mean:+.3f} ∈ " "[{lo:+.3f}, {up:+.3f}] ({q}%)" . format ( nam = name , fill = maxlen , mean = par [ i ] , lo = lo [ i ] , up = up [ i ] , q = quant ) ) out = { 'mean' : par , 'lower limit' : lo , 'upper limit' : up , } return out
Bootstrap a distribution fit + get confidence intervals for the params .
10,065
def rvs ( self , * args , ** kwargs ) : size = kwargs . pop ( 'size' , 1 ) random_state = kwargs . pop ( 'size' , None ) return self . _kde . sample ( n_samples = size , random_state = random_state )
Draw Random Variates .
10,066
def main ( ) : from docopt import docopt args = docopt ( __doc__ ) infile = args [ 'INFILE' ] outfile = args [ 'OUTFILE' ] i3extract ( infile , outfile )
Entry point when running as script from commandline .
10,067
def connect ( self , server_config ) : if 'connection_string' in server_config : self . client = pymongo . MongoClient ( server_config [ 'connection_string' ] ) self . db = self . client [ server_config [ 'db' ] ] else : self . client = pymongo . MongoClient ( server_config [ 'host' ] , server_config [ 'port' ] , tz_aware = self . get_config_value ( 'tz_aware' , True ) ) self . db = self . client [ server_config [ 'db' ] ] if ( 'authentication_database' in server_config and server_config [ 'authentication_database' ] ) : self . db . authenticate ( server_config [ 'username' ] , server_config [ 'password' ] , source = server_config [ 'authentication_database' ] ) else : if 'username' in server_config : if 'password' in server_config : self . db . authenticate ( server_config [ 'username' ] , server_config [ 'password' ] ) else : self . db . authenticate ( server_config [ 'username' ] ) d = dict ( ( k , v ) for k , v in server_config . items ( ) if k not in [ 'modalities' , 'summaries' ] ) if 'authentication_database' in d : d [ 'authentication_source' ] = d [ 'authentication_database' ] del d [ 'authentication_database' ] self . session = connect ( alias = "hyperstream" , ** d ) if "default" not in connection . _connections : connection . _connections [ "default" ] = connection . _connections [ "hyperstream" ] connection . _connection_settings [ "default" ] = connection . _connection_settings [ "hyperstream" ]
Connect using the configuration given
10,068
def ptconcat ( output_file , input_files , overwrite = False ) : filt = tb . Filters ( complevel = 5 , shuffle = True , fletcher32 = True , complib = 'zlib' ) out_tabs = { } dt_file = input_files [ 0 ] log . info ( "Reading data struct '%s'..." % dt_file ) h5struc = tb . open_file ( dt_file , 'r' ) log . info ( "Opening output file '%s'..." % output_file ) if overwrite : outmode = 'w' else : outmode = 'a' h5out = tb . open_file ( output_file , outmode ) for node in h5struc . walk_nodes ( '/' , classname = 'Table' ) : path = node . _v_pathname log . debug ( path ) dtype = node . dtype p , n = os . path . split ( path ) out_tabs [ path ] = h5out . create_table ( p , n , description = dtype , filters = filt , createparents = True ) h5struc . close ( ) for fname in input_files : log . info ( 'Reading %s...' % fname ) h5 = tb . open_file ( fname ) for path , out in out_tabs . items ( ) : tab = h5 . get_node ( path ) out . append ( tab [ : ] ) h5 . close ( ) h5out . close ( )
Concatenate HDF5 Files
10,069
def calibrate_dom ( dom_id , data , detector , livetime = None , fit_ang_dist = False , scale_mc_to_data = True , ad_fit_shape = 'pexp' , fit_background = True , ctmin = - 1. ) : if isinstance ( data , str ) : filename = data loaders = { '.h5' : load_k40_coincidences_from_hdf5 , '.root' : load_k40_coincidences_from_rootfile } try : loader = loaders [ os . path . splitext ( filename ) [ 1 ] ] except KeyError : log . critical ( 'File format not supported.' ) raise IOError else : data , livetime = loader ( filename , dom_id ) combs = np . array ( list ( combinations ( range ( 31 ) , 2 ) ) ) angles = calculate_angles ( detector , combs ) cos_angles = np . cos ( angles ) angles = angles [ cos_angles >= ctmin ] data = data [ cos_angles >= ctmin ] combs = combs [ cos_angles >= ctmin ] try : fit_res = fit_delta_ts ( data , livetime , fit_background = fit_background ) rates , means , sigmas , popts , pcovs = fit_res except : return 0 rate_errors = np . array ( [ np . diag ( pc ) [ 2 ] for pc in pcovs ] ) scale_factor = None if fit_ang_dist : fit_res = fit_angular_distribution ( angles , rates , rate_errors , shape = ad_fit_shape ) fitted_rates , exp_popts , exp_pcov = fit_res else : mc_fitted_rates = exponential_polinomial ( np . cos ( angles ) , * MC_ANG_DIST ) if scale_mc_to_data : scale_factor = np . mean ( rates [ angles < 1.5 ] ) / np . mean ( mc_fitted_rates [ angles < 1.5 ] ) else : scale_factor = 1. fitted_rates = mc_fitted_rates * scale_factor exp_popts = [ ] exp_pcov = [ ] print ( 'Using angular distribution from Monte Carlo' ) if not fit_background : minimize_weights = calculate_weights ( fitted_rates , data ) else : minimize_weights = fitted_rates opt_t0s = minimize_t0s ( means , minimize_weights , combs ) opt_sigmas = minimize_sigmas ( sigmas , minimize_weights , combs ) opt_qes = minimize_qes ( fitted_rates , rates , minimize_weights , combs ) corrected_means = correct_means ( means , opt_t0s . x , combs ) corrected_rates = correct_rates ( rates , opt_qes . x , combs ) rms_means , rms_corrected_means = calculate_rms_means ( means , corrected_means ) rms_rates , rms_corrected_rates = calculate_rms_rates ( rates , fitted_rates , corrected_rates ) cos_angles = np . cos ( angles ) return_data = { 'opt_t0s' : opt_t0s , 'opt_qes' : opt_qes , 'data' : data , 'means' : means , 'rates' : rates , 'fitted_rates' : fitted_rates , 'angles' : angles , 'corrected_means' : corrected_means , 'corrected_rates' : corrected_rates , 'rms_means' : rms_means , 'rms_corrected_means' : rms_corrected_means , 'rms_rates' : rms_rates , 'rms_corrected_rates' : rms_corrected_rates , 'gaussian_popts' : popts , 'livetime' : livetime , 'exp_popts' : exp_popts , 'exp_pcov' : exp_pcov , 'scale_factor' : scale_factor , 'opt_sigmas' : opt_sigmas , 'sigmas' : sigmas , 'combs' : combs } return return_data
Calibrate intra DOM PMT time offsets efficiencies and sigmas
10,070
def load_k40_coincidences_from_hdf5 ( filename , dom_id ) : with h5py . File ( filename , 'r' ) as h5f : data = h5f [ '/k40counts/{0}' . format ( dom_id ) ] livetime = data . attrs [ 'livetime' ] data = np . array ( data ) return data , livetime
Load k40 coincidences from hdf5 file
10,071
def load_k40_coincidences_from_rootfile ( filename , dom_id ) : from ROOT import TFile root_file_monitor = TFile ( filename , "READ" ) dom_name = str ( dom_id ) + ".2S" histo_2d_monitor = root_file_monitor . Get ( dom_name ) data = [ ] for c in range ( 1 , histo_2d_monitor . GetNbinsX ( ) + 1 ) : combination = [ ] for b in range ( 1 , histo_2d_monitor . GetNbinsY ( ) + 1 ) : combination . append ( histo_2d_monitor . GetBinContent ( c , b ) ) data . append ( combination ) weights = { } weights_histo = root_file_monitor . Get ( 'weights_hist' ) try : for i in range ( 1 , weights_histo . GetNbinsX ( ) + 1 ) : weight = weights_histo . GetBinContent ( i ) label = weights_histo . GetXaxis ( ) . GetBinLabel ( i ) weights [ label [ 3 : ] ] = weight dom_weight = weights [ str ( dom_id ) ] except AttributeError : log . info ( "Weights histogram broken or not found, setting weight to 1." ) dom_weight = 1. return np . array ( data ) , dom_weight
Load k40 coincidences from JMonitorK40 ROOT file
10,072
def calculate_angles ( detector , combs ) : angles = [ ] pmt_angles = detector . pmt_angles for first , second in combs : angles . append ( kp . math . angle_between ( np . array ( pmt_angles [ first ] ) , np . array ( pmt_angles [ second ] ) ) ) return np . array ( angles )
Calculates angles between PMT combinations according to positions in detector_file
10,073
def fit_angular_distribution ( angles , rates , rate_errors , shape = 'pexp' ) : if shape == 'exp' : fit_function = exponential if shape == 'pexp' : fit_function = exponential_polinomial cos_angles = np . cos ( angles ) popt , pcov = optimize . curve_fit ( fit_function , cos_angles , rates ) fitted_rates = fit_function ( cos_angles , * popt ) return fitted_rates , popt , pcov
Fits angular distribution of rates .
10,074
def minimize_t0s ( means , weights , combs ) : def make_quality_function ( means , weights , combs ) : def quality_function ( t0s ) : sq_sum = 0 for mean , comb , weight in zip ( means , combs , weights ) : sq_sum += ( ( mean - ( t0s [ comb [ 1 ] ] - t0s [ comb [ 0 ] ] ) ) * weight ) ** 2 return sq_sum return quality_function qfunc = make_quality_function ( means , weights , combs ) t0s = np . random . rand ( 31 ) bounds = [ ( 0 , 0 ) ] + [ ( - 10. , 10. ) ] * 30 opt_t0s = optimize . minimize ( qfunc , t0s , bounds = bounds ) return opt_t0s
Varies t0s to minimize the deviation of the gaussian means from zero .
10,075
def minimize_qes ( fitted_rates , rates , weights , combs ) : def make_quality_function ( fitted_rates , rates , weights , combs ) : def quality_function ( qes ) : sq_sum = 0 for fitted_rate , comb , rate , weight in zip ( fitted_rates , combs , rates , weights ) : sq_sum += ( ( rate / qes [ comb [ 0 ] ] / qes [ comb [ 1 ] ] - fitted_rate ) * weight ) ** 2 return sq_sum return quality_function qfunc = make_quality_function ( fitted_rates , rates , weights , combs ) qes = np . ones ( 31 ) bounds = [ ( 0.1 , 2. ) ] * 31 opt_qes = optimize . minimize ( qfunc , qes , bounds = bounds ) return opt_qes
Varies QEs to minimize the deviation of the rates from the fitted_rates .
10,076
def correct_means ( means , opt_t0s , combs ) : corrected_means = np . array ( [ ( opt_t0s [ comb [ 1 ] ] - opt_t0s [ comb [ 0 ] ] ) - mean for mean , comb in zip ( means , combs ) ] ) return corrected_means
Applies optimal t0s to gaussians means .
10,077
def correct_rates ( rates , opt_qes , combs ) : corrected_rates = np . array ( [ rate / opt_qes [ comb [ 0 ] ] / opt_qes [ comb [ 1 ] ] for rate , comb in zip ( rates , combs ) ] ) return corrected_rates
Applies optimal qes to rates .
10,078
def calculate_rms_means ( means , corrected_means ) : rms_means = np . sqrt ( np . mean ( ( means - 0 ) ** 2 ) ) rms_corrected_means = np . sqrt ( np . mean ( ( corrected_means - 0 ) ** 2 ) ) return rms_means , rms_corrected_means
Calculates RMS of means from zero before and after correction
10,079
def calculate_rms_rates ( rates , fitted_rates , corrected_rates ) : rms_rates = np . sqrt ( np . mean ( ( rates - fitted_rates ) ** 2 ) ) rms_corrected_rates = np . sqrt ( np . mean ( ( corrected_rates - fitted_rates ) ** 2 ) ) return rms_rates , rms_corrected_rates
Calculates RMS of rates from fitted_rates before and after correction
10,080
def add_to_twofold_matrix ( times , tdcs , mat , tmax = 10 ) : h_idx = 0 c_idx = 0 n_hits = len ( times ) multiplicity = 0 while h_idx <= n_hits : c_idx = h_idx + 1 if ( c_idx < n_hits ) and ( times [ c_idx ] - times [ h_idx ] <= tmax ) : multiplicity = 2 c_idx += 1 while ( c_idx < n_hits ) and ( times [ c_idx ] - times [ h_idx ] <= tmax ) : c_idx += 1 multiplicity += 1 if multiplicity != 2 : h_idx = c_idx continue c_idx -= 1 h_tdc = tdcs [ h_idx ] c_tdc = tdcs [ c_idx ] h_time = times [ h_idx ] c_time = times [ c_idx ] if h_tdc != c_tdc : dt = int ( c_time - h_time ) if h_tdc > c_tdc : mat [ get_comb_index ( c_tdc , h_tdc ) , - dt + tmax ] += 1 else : mat [ get_comb_index ( h_tdc , c_tdc ) , dt + tmax ] += 1 h_idx = c_idx
Add counts to twofold coincidences for a given tmax .
10,081
def reset ( self ) : self . counts = defaultdict ( partial ( np . zeros , ( 465 , self . tmax * 2 + 1 ) ) ) self . n_timeslices = defaultdict ( int )
Reset coincidence counter
10,082
def dump ( self ) : self . print ( "Dumping data to {}" . format ( self . dump_filename ) ) pickle . dump ( { 'data' : self . counts , 'livetime' : self . get_livetime ( ) } , open ( self . dump_filename , "wb" ) )
Write coincidence counts into a Python pickle
10,083
def get_named_by_definition ( cls , element_list , string_def ) : try : return next ( ( st . value for st in element_list if st . definition == string_def ) ) except Exception : return None
Attempts to get an IOOS definition from a list of xml elements
10,084
def get_ioos_def ( self , ident , elem_type , ont ) : if elem_type == "identifier" : getter_fn = self . system . get_identifiers_by_name elif elem_type == "classifier" : getter_fn = self . system . get_classifiers_by_name else : raise ValueError ( "Unknown element type '{}'" . format ( elem_type ) ) return DescribeSensor . get_named_by_definition ( getter_fn ( ident ) , urljoin ( ont , ident ) )
Gets a definition given an identifier and where to search for it
10,085
def get_sentence ( start = None , depth = 7 ) : if not GRAMMAR : return 'Please set a GRAMMAR file' start = start if start else GRAMMAR . start ( ) if isinstance ( start , Nonterminal ) : productions = GRAMMAR . productions ( start ) if not depth : terminals = [ p for p in productions if not isinstance ( start , Nonterminal ) ] if len ( terminals ) : production = terminals production = random . choice ( productions ) sentence = [ ] for piece in production . rhs ( ) : sentence += get_sentence ( start = piece , depth = depth - 1 ) return sentence else : return [ start ]
follow the grammatical patterns to generate a random sentence
10,086
def format_sentence ( sentence ) : for index , word in enumerate ( sentence ) : if word == 'a' and index + 1 < len ( sentence ) and re . match ( r'^[aeiou]' , sentence [ index + 1 ] ) and not re . match ( r'^uni' , sentence [ index + 1 ] ) : sentence [ index ] = 'an' text = ' ' . join ( sentence ) text = '%s%s' % ( text [ 0 ] . upper ( ) , text [ 1 : ] ) text = text . replace ( ' ,' , ',' ) return '%s.' % text
fix display formatting of a sentence array
10,087
def new_station ( self , _id , callSign , name , affiliate , fccChannelNumber ) : if self . __v_station : print ( "[Station: %s, %s, %s, %s, %s]" % ( _id , callSign , name , affiliate , fccChannelNumber ) )
Callback run for each new station
10,088
def new_lineup ( self , name , location , device , _type , postalCode , _id ) : if self . __v_lineup : print ( "[Lineup: %s, %s, %s, %s, %s, %s]" % ( name , location , device , _type , postalCode , _id ) )
Callback run for each new lineup
10,089
def new_genre ( self , program , genre , relevance ) : if self . __v_genre : print ( "[Genre: %s, %s, %s]" % ( program , genre , relevance ) )
Callback run for each new program genre entry
10,090
def qsub ( script , job_name , dryrun = False , * args , ** kwargs ) : print ( "Preparing job script..." ) job_string = gen_job ( script = script , job_name = job_name , * args , ** kwargs ) env = os . environ . copy ( ) if dryrun : print ( "This is a dry run! Here is the generated job file, which will " "not be submitted:" ) print ( job_string ) else : print ( "Calling qsub with the generated job script." ) p = subprocess . Popen ( 'qsub -V' , stdin = subprocess . PIPE , env = env , shell = True ) p . communicate ( input = bytes ( job_string . encode ( 'ascii' ) ) )
Submit a job via qsub .
10,091
def gen_job ( script , job_name , log_path = 'qlogs' , group = 'km3net' , platform = 'cl7' , walltime = '00:10:00' , vmem = '8G' , fsize = '8G' , shell = None , email = None , send_mail = 'n' , job_array_start = 1 , job_array_stop = None , job_array_step = 1 , irods = False , sps = True , hpss = False , xrootd = False , dcache = False , oracle = False , split_array_logs = False ) : if shell is None : shell = os . environ [ 'SHELL' ] if email is None : email = os . environ [ 'USER' ] + '@km3net.de' if isinstance ( script , Script ) : script = str ( script ) log_path = os . path . join ( os . getcwd ( ) , log_path ) if job_array_stop is not None : job_array_option = "#$ -t {}-{}:{}" . format ( job_array_start , job_array_stop , job_array_step ) else : job_array_option = "#" if split_array_logs : task_name = '_$TASK_ID' else : task_name = '' job_string = JOB_TEMPLATE . format ( script = script , email = email , send_mail = send_mail , log_path = log_path , job_name = job_name , group = group , walltime = walltime , vmem = vmem , fsize = fsize , irods = irods , sps = sps , hpss = hpss , xrootd = xrootd , dcache = dcache , oracle = oracle , shell = shell , platform = platform , job_array_option = job_array_option , task_name = task_name ) return job_string
Generate a job script .
10,092
def get_jpp_env ( jpp_dir ) : env = { v [ 0 ] : '' . join ( v [ 1 : ] ) for v in [ l . split ( '=' ) for l in os . popen ( "source {0}/setenv.sh {0} && env" . format ( jpp_dir ) ) . read ( ) . split ( '\n' ) if '=' in l ] } return env
Return the environment dict of a loaded Jpp env .
10,093
def iget ( self , irods_path , attempts = 1 , pause = 15 ) : if attempts > 1 : cmd = cmd = lstrip ( cmd ) cmd = cmd . format ( attempts , irods_path , pause ) self . add ( cmd ) else : self . add ( 'iget -v "{}"' . format ( irods_path ) )
Add an iget command to retrieve a file from iRODS .
10,094
def _add_two_argument_command ( self , command , arg1 , arg2 ) : self . lines . append ( "{} {} {}" . format ( command , arg1 , arg2 ) )
Helper function for two - argument commands
10,095
def get_devices ( self ) : devices = self . make_request ( '["{username}","{password}","info","",""]' . format ( username = self . username , password = self . password ) ) if devices != False : garage_doors = [ ] try : self . apicode = devices . find ( 'apicode' ) . text self . _device_states = { } for doorNum in range ( 1 , 4 ) : door = devices . find ( 'door' + str ( doorNum ) ) doorName = door . find ( 'name' ) . text if doorName : dev = { 'door' : doorNum , 'name' : doorName } for id in [ 'mode' , 'sensor' , 'status' , 'sensorid' , 'temperature' , 'voltage' , 'camera' , 'events' , 'permission' ] : item = door . find ( id ) if item is not None : dev [ id ] = item . text garage_state = door . find ( 'status' ) . text dev [ 'status' ] = self . DOOR_STATE [ garage_state ] self . _device_states [ doorNum ] = self . DOOR_STATE [ garage_state ] garage_doors . append ( dev ) return garage_doors except TypeError as ex : print ( ex ) return False else : return False
List all garage door devices .
10,096
def get_status ( self , device_id ) : devices = self . get_devices ( ) if devices != False : for device in devices : if device [ 'door' ] == device_id : return device [ 'status' ] return False
List only MyQ garage door devices .
10,097
def analyze ( segments , analysis , lookup = dict ( bipa = { } , dolgo = { } ) ) : if not segments : raise ValueError ( 'Empty sequence.' ) if not [ segment for segment in segments if segment . strip ( ) ] : raise ValueError ( 'No information in the sequence.' ) try : bipa_analysis , sc_analysis = [ ] , [ ] for s in segments : a = lookup [ 'bipa' ] . get ( s ) if a is None : a = lookup [ 'bipa' ] . setdefault ( s , BIPA [ s ] ) bipa_analysis . append ( a ) sc = lookup [ 'dolgo' ] . get ( s ) if sc is None : sc = lookup [ 'dolgo' ] . setdefault ( s , BIPA . translate ( s , DOLGO ) ) sc_analysis . append ( sc ) except : print ( segments ) raise for sound_bipa , sound_class in zip ( bipa_analysis , sc_analysis ) : if isinstance ( sound_bipa , pyclts . models . UnknownSound ) or sound_class == '?' : analysis . general_errors += 1 for segment , sound_bipa , sound_class in zip ( segments , bipa_analysis , sc_analysis ) : analysis . segments . update ( [ segment ] ) if isinstance ( sound_bipa , pyclts . models . UnknownSound ) : analysis . bipa_errors . add ( segment ) else : analysis . replacements [ sound_bipa . source ] . add ( sound_bipa . __unicode__ ( ) ) if sound_class == '?' : analysis . sclass_errors . add ( segment ) return segments , bipa_analysis , sc_analysis , analysis
Test a sequence for compatibility with CLPA and LingPy .
10,098
def most_energetic ( df ) : idx = df . groupby ( [ 'event_id' ] ) [ 'energy' ] . transform ( max ) == df [ 'energy' ] return df [ idx ] . reindex ( )
Grab most energetic particle from mc_tracks dataframe .
10,099
def _connect ( self ) : log . debug ( "Connecting to JLigier" ) self . socket = socket . socket ( ) self . socket . connect ( ( self . host , self . port ) )
Connect to JLigier