idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
14,400
def median_filter ( tr , multiplier = 10 , windowlength = 0.5 , interp_len = 0.05 , debug = 0 ) : num_cores = cpu_count ( ) if debug >= 1 : data_in = tr . copy ( ) filt = tr . copy ( ) filt . detrend ( 'linear' ) try : filt . filter ( 'bandpass' , freqmin = 10.0 , freqmax = ( tr . stats . sampling_rate / 2 ) - 1 ) except Exception as e : print ( "Could not filter due to error: {0}" . format ( e ) ) data = filt . data del filt _windowlength = int ( windowlength * tr . stats . sampling_rate ) _interp_len = int ( interp_len * tr . stats . sampling_rate ) peaks = [ ] with Timer ( ) as t : pool = Pool ( processes = num_cores ) results = [ pool . apply_async ( _median_window , args = ( data [ chunk * _windowlength : ( chunk + 1 ) * _windowlength ] , chunk * _windowlength , multiplier , tr . stats . starttime + windowlength , tr . stats . sampling_rate , debug ) ) for chunk in range ( int ( len ( data ) / _windowlength ) ) ] pool . close ( ) for p in results : peaks += p . get ( ) pool . join ( ) for peak in peaks : tr . data = _interp_gap ( tr . data , peak [ 1 ] , _interp_len ) print ( "Despiking took: %s s" % t . secs ) if debug >= 1 : plt . plot ( data_in . data , 'r' , label = 'raw' ) plt . plot ( tr . data , 'k' , label = 'despiked' ) plt . legend ( ) plt . show ( ) return tr
Filter out spikes in data above a multiple of MAD of the data .
14,401
def _median_window ( window , window_start , multiplier , starttime , sampling_rate , debug = 0 ) : MAD = np . median ( np . abs ( window ) ) thresh = multiplier * MAD if debug >= 2 : print ( 'Threshold for window is: ' + str ( thresh ) + '\nMedian is: ' + str ( MAD ) + '\nMax is: ' + str ( np . max ( window ) ) ) peaks = find_peaks2_short ( arr = window , thresh = thresh , trig_int = 5 , debug = 0 ) if debug >= 4 and peaks : peaks_plot ( window , starttime , sampling_rate , save = False , peaks = peaks ) if peaks : peaks = [ ( peak [ 0 ] , peak [ 1 ] + window_start ) for peak in peaks ] else : peaks = [ ] return peaks
Internal function to aid parallel processing
14,402
def _interp_gap ( data , peak_loc , interp_len ) : start_loc = peak_loc - int ( 0.5 * interp_len ) end_loc = peak_loc + int ( 0.5 * interp_len ) if start_loc < 0 : start_loc = 0 if end_loc > len ( data ) - 1 : end_loc = len ( data ) - 1 fill = np . linspace ( data [ start_loc ] , data [ end_loc ] , end_loc - start_loc ) data [ start_loc : end_loc ] = fill return data
Internal function for filling gap with linear interpolation
14,403
def template_remove ( tr , template , cc_thresh , windowlength , interp_len , debug = 0 ) : data_in = tr . copy ( ) _interp_len = int ( tr . stats . sampling_rate * interp_len ) if _interp_len < len ( template . data ) : warnings . warn ( 'Interp_len is less than the length of the template,' 'will used the length of the template!' ) _interp_len = len ( template . data ) if isinstance ( template , Trace ) : template = template . data with Timer ( ) as t : cc = normxcorr2 ( image = tr . data . astype ( np . float32 ) , template = template . astype ( np . float32 ) ) if debug > 3 : plt . plot ( cc . flatten ( ) , 'k' , label = 'cross-correlation' ) plt . legend ( ) plt . show ( ) peaks = find_peaks2_short ( arr = cc . flatten ( ) , thresh = cc_thresh , trig_int = windowlength * tr . stats . sampling_rate ) for peak in peaks : tr . data = _interp_gap ( data = tr . data , peak_loc = peak [ 1 ] + int ( 0.5 * _interp_len ) , interp_len = _interp_len ) print ( "Despiking took: %s s" % t . secs ) if debug > 2 : plt . plot ( data_in . data , 'r' , label = 'raw' ) plt . plot ( tr . data , 'k' , label = 'despiked' ) plt . legend ( ) plt . show ( ) return tr
Looks for instances of template in the trace and removes the matches .
14,404
def read_data ( archive , arc_type , day , stachans , length = 86400 ) : st = [ ] available_stations = _check_available_data ( archive , arc_type , day ) for station in stachans : if len ( station [ 1 ] ) == 2 : station_map = ( station [ 0 ] , station [ 1 ] [ 0 ] + '*' + station [ 1 ] [ 1 ] ) available_stations_map = [ ( sta [ 0 ] , sta [ 1 ] [ 0 ] + '*' + sta [ 1 ] [ - 1 ] ) for sta in available_stations ] else : station_map = station available_stations_map = available_stations if station_map not in available_stations_map : msg = ' ' . join ( [ station [ 0 ] , station_map [ 1 ] , 'is not available for' , day . strftime ( '%Y/%m/%d' ) ] ) warnings . warn ( msg ) continue if arc_type . lower ( ) == 'seishub' : client = SeishubClient ( archive ) st += client . get_waveforms ( network = '*' , station = station_map [ 0 ] , location = '*' , channel = station_map [ 1 ] , starttime = UTCDateTime ( day ) , endtime = UTCDateTime ( day ) + length ) elif arc_type . upper ( ) == "FDSN" : client = FDSNClient ( archive ) try : st += client . get_waveforms ( network = '*' , station = station_map [ 0 ] , location = '*' , channel = station_map [ 1 ] , starttime = UTCDateTime ( day ) , endtime = UTCDateTime ( day ) + length ) except FDSNException : warnings . warn ( 'No data on server despite station being ' + 'available...' ) continue elif arc_type . lower ( ) == 'day_vols' : wavfiles = _get_station_file ( os . path . join ( archive , day . strftime ( 'Y%Y' + os . sep + 'R%j.01' ) ) , station_map [ 0 ] , station_map [ 1 ] ) for wavfile in wavfiles : st += read ( wavfile , starttime = day , endtime = day + length ) st = Stream ( st ) return st
Function to read the appropriate data from an archive for a day .
14,405
def _get_station_file ( path_name , station , channel , debug = 0 ) : wavfiles = glob . glob ( path_name + os . sep + '*' ) out_files = [ _check_data ( wavfile , station , channel , debug = debug ) for wavfile in wavfiles ] out_files = list ( set ( out_files ) ) return out_files
Helper function to find the correct file .
14,406
def _check_data ( wavfile , station , channel , debug = 0 ) : if debug > 1 : print ( 'Checking ' + wavfile ) st = read ( wavfile , headonly = True ) for tr in st : if tr . stats . station == station and tr . stats . channel == channel : return wavfile
Inner loop for parallel checks .
14,407
def _check_available_data ( archive , arc_type , day ) : available_stations = [ ] if arc_type . lower ( ) == 'day_vols' : wavefiles = glob . glob ( os . path . join ( archive , day . strftime ( 'Y%Y' ) , day . strftime ( 'R%j.01' ) , '*' ) ) for wavefile in wavefiles : header = read ( wavefile , headonly = True ) available_stations . append ( ( header [ 0 ] . stats . station , header [ 0 ] . stats . channel ) ) elif arc_type . lower ( ) == 'seishub' : client = SeishubClient ( archive ) st = client . get_previews ( starttime = UTCDateTime ( day ) , endtime = UTCDateTime ( day ) + 86400 ) for tr in st : available_stations . append ( ( tr . stats . station , tr . stats . channel ) ) elif arc_type . lower ( ) == 'fdsn' : client = FDSNClient ( archive ) inventory = client . get_stations ( starttime = UTCDateTime ( day ) , endtime = UTCDateTime ( day ) + 86400 , level = 'channel' ) for network in inventory : for station in network : for channel in station : available_stations . append ( ( station . code , channel . code ) ) return available_stations
Function to check what stations are available in the archive for a given \ day .
14,408
def rt_time_log ( logfile , startdate ) : if os . name == 'nt' : f = io . open ( logfile , 'rb' ) else : f = io . open ( logfile , 'rb' ) phase_err = [ ] lock = [ ] for line_binary in f : try : line = line_binary . decode ( "utf8" , "ignore" ) except UnicodeDecodeError : warnings . warn ( 'Cannot decode line, skipping' ) continue if re . search ( "INTERNAL CLOCK PHASE ERROR" , line ) : match = re . search ( "INTERNAL CLOCK PHASE ERROR" , line ) d_start = match . start ( ) - 13 phase_err . append ( ( dt . datetime . strptime ( str ( startdate . year ) + ':' + line [ d_start : d_start + 12 ] , '%Y:%j:%H:%M:%S' ) , float ( line . rstrip ( ) . split ( ) [ - 2 ] ) * 0.000001 ) ) elif re . search ( "EXTERNAL CLOCK POWER IS TURNED OFF" , line ) : match = re . search ( "EXTERNAL CLOCK POWER IS TURNED OFF" , line ) d_start = match . start ( ) - 13 lock . append ( ( dt . datetime . strptime ( str ( startdate . year ) + ':' + line [ d_start : d_start + 12 ] , '%Y:%j:%H:%M:%S' ) , 999 ) ) if len ( phase_err ) == 0 and len ( lock ) > 0 : phase_err = lock f . close ( ) return phase_err
Open and read reftek raw log - file .
14,409
def rt_location_log ( logfile ) : if os . name == 'nt' : f = open ( logfile , 'rb' ) else : f = open ( logfile , 'rb' ) locations = [ ] for line_binary in f : try : line = line_binary . decode ( "utf8" , "ignore" ) except UnicodeDecodeError : warnings . warn ( 'Cannot decode line, skipping' ) print ( line_binary ) continue match = re . search ( "GPS: POSITION:" , line ) if match : loc = line [ match . end ( ) + 1 : ] . rstrip ( ) . split ( ' ' ) lat_sign = loc [ 0 ] [ 0 ] lat = loc [ 0 ] [ 1 : ] . split ( ':' ) lat = int ( lat [ 0 ] ) + ( int ( lat [ 1 ] ) / 60.0 ) + ( float ( lat [ 2 ] ) / 3600.0 ) if lat_sign == 'S' : lat *= - 1 lon_sign = loc [ 1 ] [ 0 ] lon = loc [ 1 ] [ 1 : ] . split ( ':' ) lon = int ( lon [ 0 ] ) + ( int ( lon [ 1 ] ) / 60.0 ) + ( float ( lon [ 2 ] ) / 3600.0 ) if lon_sign == 'W' : lon *= - 1 elev_sign = loc [ 2 ] [ 0 ] elev_unit = loc [ 2 ] [ - 1 ] if not elev_unit == 'M' : raise NotImplementedError ( 'Elevation is not in M: unit=' + elev_unit ) elev = int ( loc [ 2 ] [ 1 : - 1 ] ) if elev_sign == '-' : elev *= - 1 elev /= 1000 locations . append ( ( lat , lon , elev ) ) f . close ( ) return locations
Extract location information from a RefTek raw log - file .
14,410
def flag_time_err ( phase_err , time_thresh = 0.02 ) : time_err = [ ] for stamp in phase_err : if abs ( stamp [ 1 ] ) > time_thresh : time_err . append ( stamp [ 0 ] ) return time_err
Find large time errors in list .
14,411
def check_all_logs ( directory , time_thresh ) : log_files = glob . glob ( directory + '/*/0/000000000_00000000' ) print ( 'I have ' + str ( len ( log_files ) ) + ' log files to scan' ) total_phase_errs = [ ] for i , log_file in enumerate ( log_files ) : startdate = dt . datetime . strptime ( log_file . split ( '/' ) [ - 4 ] [ 0 : 7 ] , '%Y%j' ) . date ( ) total_phase_errs += rt_time_log ( log_file , startdate ) sys . stdout . write ( "\r" + str ( float ( i ) / len ( log_files ) * 100 ) + "% \r" ) sys . stdout . flush ( ) time_errs = flag_time_err ( total_phase_errs , time_thresh ) time_errs . sort ( ) return time_errs , total_phase_errs
Check all the log - files in a directory tree for timing errors .
14,412
def _cc_round ( num , dp ) : num = round ( num , dp ) num = '{0:.{1}f}' . format ( num , dp ) return num
Convenience function to take a float and round it to dp padding with zeros to return a string
14,413
def readSTATION0 ( path , stations ) : stalist = [ ] f = open ( path + '/STATION0.HYP' , 'r' ) for line in f : if line [ 1 : 6 ] . strip ( ) in stations : station = line [ 1 : 6 ] . strip ( ) lat = line [ 6 : 14 ] if lat [ - 1 ] == 'S' : NS = - 1 else : NS = 1 if lat [ 4 ] == '.' : lat = ( int ( lat [ 0 : 2 ] ) + float ( lat [ 2 : - 1 ] ) / 60 ) * NS else : lat = ( int ( lat [ 0 : 2 ] ) + float ( lat [ 2 : 4 ] + '.' + lat [ 4 : - 1 ] ) / 60 ) * NS lon = line [ 14 : 23 ] if lon [ - 1 ] == 'W' : EW = - 1 else : EW = 1 if lon [ 5 ] == '.' : lon = ( int ( lon [ 0 : 3 ] ) + float ( lon [ 3 : - 1 ] ) / 60 ) * EW else : lon = ( int ( lon [ 0 : 3 ] ) + float ( lon [ 3 : 5 ] + '.' + lon [ 5 : - 1 ] ) / 60 ) * EW elev = float ( line [ 23 : - 1 ] . strip ( ) ) if line [ 0 ] == '-' : elev *= - 1 stalist . append ( ( station , lat , lon , elev ) ) f . close ( ) f = open ( 'station.dat' , 'w' ) for sta in stalist : line = '' . join ( [ sta [ 0 ] . ljust ( 5 ) , _cc_round ( sta [ 1 ] , 4 ) . ljust ( 10 ) , _cc_round ( sta [ 2 ] , 4 ) . ljust ( 10 ) , _cc_round ( sta [ 3 ] / 1000 , 4 ) . rjust ( 7 ) , '\n' ] ) f . write ( line ) f . close ( ) return stalist
Read a Seisan STATION0 . HYP file on the path given .
14,414
def sfiles_to_event ( sfile_list ) : event_list = [ ] sort_list = [ ( readheader ( sfile ) . origins [ 0 ] . time , sfile ) for sfile in sfile_list ] sort_list . sort ( key = lambda tup : tup [ 0 ] ) sfile_list = [ sfile [ 1 ] for sfile in sort_list ] catalog = Catalog ( ) for i , sfile in enumerate ( sfile_list ) : event_list . append ( ( i , sfile ) ) catalog . append ( readheader ( sfile ) ) write_event ( catalog ) return event_list
Write an event . dat file from a list of Seisan events
14,415
def write_event ( catalog ) : f = open ( 'event.dat' , 'w' ) for i , event in enumerate ( catalog ) : try : evinfo = event . origins [ 0 ] except IndexError : raise IOError ( 'No origin' ) try : Mag_1 = event . magnitudes [ 0 ] . mag except IndexError : Mag_1 = 0.0 try : t_RMS = event . origins [ 0 ] . quality [ 'standard_error' ] except AttributeError : print ( 'No time residual in header' ) t_RMS = 0.0 f . write ( str ( evinfo . time . year ) + str ( evinfo . time . month ) . zfill ( 2 ) + str ( evinfo . time . day ) . zfill ( 2 ) + ' ' + str ( evinfo . time . hour ) . rjust ( 2 ) + str ( evinfo . time . minute ) . zfill ( 2 ) + str ( evinfo . time . second ) . zfill ( 2 ) + str ( evinfo . time . microsecond ) [ 0 : 2 ] . zfill ( 2 ) + ' ' + str ( evinfo . latitude ) . ljust ( 8 , str ( '0' ) ) + ' ' + str ( evinfo . longitude ) . ljust ( 8 , str ( '0' ) ) + ' ' + str ( evinfo . depth / 1000 ) . rjust ( 7 ) . ljust ( 9 , str ( '0' ) ) + ' ' + str ( Mag_1 ) + ' 0.00 0.00 ' + str ( t_RMS ) . ljust ( 4 , str ( '0' ) ) + str ( i ) . rjust ( 11 ) + '\n' ) f . close ( ) return
Write obspy . core . event . Catalog to a hypoDD format event . dat file .
14,416
def read_phase ( ph_file ) : ph_catalog = Catalog ( ) f = open ( ph_file , 'r' ) for line in f : if line [ 0 ] == '#' : if 'event_text' not in locals ( ) : event_text = { 'header' : line . rstrip ( ) , 'picks' : [ ] } else : ph_catalog . append ( _phase_to_event ( event_text ) ) event_text = { 'header' : line . rstrip ( ) , 'picks' : [ ] } else : event_text [ 'picks' ] . append ( line . rstrip ( ) ) ph_catalog . append ( _phase_to_event ( event_text ) ) return ph_catalog
Read hypoDD phase files into Obspy catalog class .
14,417
def _phase_to_event ( event_text ) : ph_event = Event ( ) header = event_text [ 'header' ] . split ( ) ph_event . origins . append ( Origin ( ) ) ph_event . origins [ 0 ] . time = UTCDateTime ( year = int ( header [ 1 ] ) , month = int ( header [ 2 ] ) , day = int ( header [ 3 ] ) , hour = int ( header [ 4 ] ) , minute = int ( header [ 5 ] ) , second = int ( header [ 6 ] . split ( '.' ) [ 0 ] ) , microsecond = int ( float ( ( '0.' + header [ 6 ] . split ( '.' ) [ 1 ] ) ) * 1000000 ) ) ph_event . origins [ 0 ] . latitude = float ( header [ 7 ] ) ph_event . origins [ 0 ] . longitude = float ( header [ 8 ] ) ph_event . origins [ 0 ] . depth = float ( header [ 9 ] ) * 1000 ph_event . origins [ 0 ] . quality = OriginQuality ( standard_error = float ( header [ 13 ] ) ) ph_event . magnitudes . append ( Magnitude ( ) ) ph_event . magnitudes [ 0 ] . mag = float ( header [ 10 ] ) ph_event . magnitudes [ 0 ] . magnitude_type = 'M' for i , pick_line in enumerate ( event_text [ 'picks' ] ) : pick = pick_line . split ( ) _waveform_id = WaveformStreamID ( station_code = pick [ 0 ] ) pick_time = ph_event . origins [ 0 ] . time + float ( pick [ 1 ] ) ph_event . picks . append ( Pick ( waveform_id = _waveform_id , phase_hint = pick [ 3 ] , time = pick_time ) ) ph_event . origins [ 0 ] . arrivals . append ( Arrival ( phase = ph_event . picks [ i ] , pick_id = ph_event . picks [ i ] . resource_id ) ) ph_event . origins [ 0 ] . arrivals [ i ] . time_weight = float ( pick [ 2 ] ) return ph_event
Function to convert the text for one event in hypoDD phase format to \ event object .
14,418
def extract_from_stack ( stack , template , length , pre_pick , pre_pad , Z_include = False , pre_processed = True , samp_rate = None , lowcut = None , highcut = None , filt_order = 3 ) : new_template = stack . copy ( ) mintime = min ( [ tr . stats . starttime for tr in template ] ) delays = [ ( tr . stats . station , tr . stats . channel [ - 1 ] , tr . stats . starttime - mintime ) for tr in template ] if not pre_processed : new_template = pre_processing . shortproc ( st = new_template , lowcut = lowcut , highcut = highcut , filt_order = filt_order , samp_rate = samp_rate , debug = 0 ) out = Stream ( ) for tr in new_template : delay = [ d [ 2 ] for d in delays if d [ 0 ] == tr . stats . station and d [ 1 ] == tr . stats . channel [ - 1 ] ] if Z_include and len ( delay ) == 0 : delay = [ d [ 2 ] for d in delays if d [ 0 ] == tr . stats . station ] if len ( delay ) == 0 : debug_print ( "No matching template channel found for stack channel" " {0}.{1}" . format ( tr . stats . station , tr . stats . channel ) , 2 , 3 ) new_template . remove ( tr ) else : for d in delay : out += tr . copy ( ) . trim ( starttime = tr . stats . starttime + d + pre_pad - pre_pick , endtime = tr . stats . starttime + d + pre_pad + length - pre_pick ) return out
Extract a multiplexed template from a stack of detections .
14,419
def _group_events ( catalog , process_len , template_length , data_pad ) : if len ( catalog ) == 1 : return [ catalog ] sub_catalogs = [ ] catalog . events = sorted ( catalog . events , key = lambda e : ( e . preferred_origin ( ) or e . origins [ 0 ] ) . time ) sub_catalog = Catalog ( [ catalog [ 0 ] ] ) for event in catalog [ 1 : ] : origin_time = ( event . preferred_origin ( ) or event . origins [ 0 ] ) . time last_pick = sorted ( event . picks , key = lambda p : p . time ) [ - 1 ] max_diff = ( process_len - ( last_pick . time - origin_time ) - template_length ) max_diff -= 2 * data_pad if origin_time - sub_catalog [ 0 ] . origins [ 0 ] . time < max_diff : sub_catalog . append ( event ) else : sub_catalogs . append ( sub_catalog ) sub_catalog = Catalog ( [ event ] ) sub_catalogs . append ( sub_catalog ) return sub_catalogs
Internal function to group events into sub - catalogs based on process_len .
14,420
def multi_template_gen ( catalog , st , length , swin = 'all' , prepick = 0.05 , all_horiz = False , delayed = True , plot = False , debug = 0 , return_event = False , min_snr = None ) : EQcorrscanDeprecationWarning ( "Function is depreciated and will be removed soon. Use " "template_gen.template_gen instead." ) temp_list = template_gen ( method = "from_meta_file" , process = False , meta_file = catalog , st = st , lowcut = None , highcut = None , samp_rate = st [ 0 ] . stats . sampling_rate , filt_order = None , length = length , prepick = prepick , swin = swin , all_horiz = all_horiz , delayed = delayed , plot = plot , debug = debug , return_event = return_event , min_snr = min_snr , parallel = False ) return temp_list
Generate multiple templates from one stream of data .
14,421
def from_client ( catalog , client_id , lowcut , highcut , samp_rate , filt_order , length , prepick , swin , process_len = 86400 , data_pad = 90 , all_horiz = False , delayed = True , plot = False , debug = 0 , return_event = False , min_snr = None ) : EQcorrscanDeprecationWarning ( "Function is depreciated and will be removed soon. Use " "template_gen.template_gen instead." ) temp_list = template_gen ( method = "from_client" , catalog = catalog , client_id = client_id , lowcut = lowcut , highcut = highcut , samp_rate = samp_rate , filt_order = filt_order , length = length , prepick = prepick , swin = swin , process_len = process_len , data_pad = data_pad , all_horiz = all_horiz , delayed = delayed , plot = plot , debug = debug , return_event = return_event , min_snr = min_snr ) return temp_list
Generate multiplexed template from FDSN client .
14,422
def from_sac ( sac_files , lowcut , highcut , samp_rate , filt_order , length , swin , prepick , all_horiz = False , delayed = True , plot = False , debug = 0 , return_event = False , min_snr = None ) : EQcorrscanDeprecationWarning ( "Function is depreciated and will be removed soon. Use " "template_gen.template_gen instead." ) temp_list = template_gen ( method = "from_sac" , sac_files = sac_files , lowcut = lowcut , highcut = highcut , samp_rate = samp_rate , filt_order = filt_order , length = length , prepick = prepick , swin = swin , all_horiz = all_horiz , delayed = delayed , plot = plot , debug = debug , return_event = return_event , min_snr = min_snr , parallel = False ) return temp_list
Generate a multiplexed template from a list of SAC files .
14,423
def time_func ( func , name , * args , ** kwargs ) : tic = time . time ( ) out = func ( * args , ** kwargs ) toc = time . time ( ) print ( '%s took %0.2f seconds' % ( name , toc - tic ) ) return out
call a func with args and kwargs print name of func and how long it took .
14,424
def seis_sim ( sp , amp_ratio = 1.5 , flength = False , phaseout = 'all' ) : if flength and 2.5 * sp < flength and 100 < flength : additional_length = flength elif 2.5 * sp < 100.0 : additional_length = 100 else : additional_length = 2.5 * sp synth = np . zeros ( int ( sp + 10 + additional_length ) ) synth [ 10 ] = 1.0 S_length = 10 + int ( sp // 3 ) S_spikes = np . arange ( amp_ratio , 0 , - ( amp_ratio / S_length ) ) for i in range ( len ( S_spikes ) ) : if i in np . arange ( 1 , len ( S_spikes ) , 2 ) : S_spikes [ i ] = 0 if i in np . arange ( 2 , len ( S_spikes ) , 4 ) : S_spikes [ i ] *= - 1 synth [ 10 + sp : 10 + sp + len ( S_spikes ) ] = S_spikes sine_x = np . arange ( 0 , 10.0 , 0.5 ) damped_sine = np . exp ( - sine_x ) * np . sin ( 2 * np . pi * sine_x ) synth = np . convolve ( synth , damped_sine ) synth = synth / np . max ( np . abs ( synth ) ) if not flength : return synth else : if phaseout in [ 'all' , 'P' ] : synth = synth [ 0 : flength ] elif phaseout == 'S' : synth = synth [ sp : ] if len ( synth ) < flength : synth = np . append ( synth , np . zeros ( flength - len ( synth ) ) ) else : synth = synth [ 0 : flength ] return synth
Generate a simulated seismogram from a given S - P time .
14,425
def SVD_sim ( sp , lowcut , highcut , samp_rate , amp_range = np . arange ( - 10 , 10 , 0.01 ) ) : sp = int ( sp * samp_rate ) synthetics = [ Stream ( Trace ( seis_sim ( sp , a ) ) ) for a in amp_range ] for st in synthetics : for tr in st : tr . stats . station = 'SYNTH' tr . stats . channel = 'SH1' tr . stats . sampling_rate = samp_rate tr . filter ( 'bandpass' , freqmin = lowcut , freqmax = highcut ) U , s , V , stachans = clustering . svd ( synthetics ) return U , s , V , stachans
Generate basis vectors of a set of simulated seismograms .
14,426
def template_grid ( stations , nodes , travel_times , phase , PS_ratio = 1.68 , samp_rate = 100 , flength = False , phaseout = 'all' ) : if phase not in [ 'S' , 'P' ] : raise IOError ( 'Phase is neither P nor S' ) templates = [ ] for i , node in enumerate ( nodes ) : st = [ ] for j , station in enumerate ( stations ) : tr = Trace ( ) tr . stats . sampling_rate = samp_rate tr . stats . station = station tr . stats . channel = 'SYN' tt = travel_times [ j ] [ i ] if phase == 'P' : SP_time = ( tt * PS_ratio ) - tt if phaseout == 'S' : tr . stats . starttime += tt + SP_time else : tr . stats . starttime += tt elif phase == 'S' : SP_time = tt - ( tt / PS_ratio ) if phaseout == 'S' : tr . stats . starttime += tt else : tr . stats . starttime += tt - SP_time if flength and SP_time * samp_rate < flength - 11 and phaseout == 'all' : tr . data = seis_sim ( sp = int ( SP_time * samp_rate ) , amp_ratio = 1.5 , flength = flength , phaseout = phaseout ) st . append ( tr ) elif flength and phaseout == 'all' : warnings . warn ( 'Cannot make a bulk synthetic with this fixed ' + 'length for station ' + station ) elif phaseout == 'all' : tr . data = seis_sim ( sp = int ( SP_time * samp_rate ) , amp_ratio = 1.5 , flength = flength , phaseout = phaseout ) st . append ( tr ) elif phaseout in [ 'P' , 'S' ] : tr . data = seis_sim ( sp = int ( SP_time * samp_rate ) , amp_ratio = 1.5 , flength = flength , phaseout = phaseout ) st . append ( tr ) elif phaseout == 'both' : for _phaseout in [ 'P' , 'S' ] : _tr = tr . copy ( ) _tr . data = seis_sim ( sp = int ( SP_time * samp_rate ) , amp_ratio = 1.5 , flength = flength , phaseout = _phaseout ) if _phaseout == 'P' : _tr . stats . channel = 'SYN_Z' _tr . stats . starttime = _tr . stats . starttime - SP_time elif _phaseout == 'S' : _tr . stats . channel = 'SYN_H' st . append ( _tr ) templates . append ( Stream ( st ) ) return templates
Generate a group of synthetic seismograms for a grid of sources .
14,427
def generate_synth_data ( nsta , ntemplates , nseeds , samp_rate , t_length , max_amp , max_lag , debug = 0 ) : t_times = np . abs ( np . random . random ( [ nsta , ntemplates ] ) ) * max_lag lats = np . random . random ( ntemplates ) * 90.0 lons = np . random . random ( ntemplates ) * 90.0 depths = np . abs ( np . random . random ( ntemplates ) * 40.0 ) nodes = zip ( lats , lons , depths ) stations = [ 'ALPH' , 'BETA' , 'GAMM' , 'KAPP' , 'ZETA' , 'BOB' , 'MAGG' , 'ALF' , 'WALR' , 'ALBA' , 'PENG' , 'BANA' , 'WIGG' , 'SAUS' , 'MALC' ] if debug > 1 : print ( nodes ) print ( t_times ) print ( stations [ 0 : nsta ] ) templates = template_grid ( stations = stations [ 0 : nsta ] , nodes = nodes , travel_times = t_times , phase = 'S' , samp_rate = samp_rate , flength = int ( t_length * samp_rate ) ) if debug > 2 : for template in templates : print ( template ) seeds = [ ] data = templates [ 0 ] . copy ( ) for tr in data : tr . data = np . zeros ( 86400 * int ( samp_rate ) ) tr . stats . starttime = UTCDateTime ( 0 ) for i , template in enumerate ( templates ) : impulses = np . zeros ( 86400 * int ( samp_rate ) ) impulse_times = np . random . randint ( 86400 * int ( samp_rate ) , size = nseeds ) impulse_amplitudes = np . random . randn ( nseeds ) * max_amp seeds . append ( { 'SNR' : impulse_amplitudes , 'time' : impulse_times } ) for j in range ( nseeds ) : impulses [ impulse_times [ j ] ] = impulse_amplitudes [ j ] mintime = min ( [ template_tr . stats . starttime for template_tr in template ] ) for j , template_tr in enumerate ( template ) : offset = int ( ( template_tr . stats . starttime - mintime ) * samp_rate ) pad = np . zeros ( offset ) tr_impulses = np . append ( pad , impulses ) [ 0 : len ( impulses ) ] data [ j ] . data += np . convolve ( tr_impulses , template_tr . data ) [ 0 : len ( impulses ) ] for tr in data : noise = np . random . randn ( 86400 * int ( samp_rate ) ) tr . data += noise / max ( noise ) return templates , data , seeds
Generate a synthetic dataset to be used for testing .
14,428
def linstack ( streams , normalize = True ) : stack = streams [ np . argmax ( [ len ( stream ) for stream in streams ] ) ] . copy ( ) if normalize : for tr in stack : tr . data = tr . data / np . sqrt ( np . mean ( np . square ( tr . data ) ) ) tr . data = np . nan_to_num ( tr . data ) for i in range ( 1 , len ( streams ) ) : for tr in stack : matchtr = streams [ i ] . select ( station = tr . stats . station , channel = tr . stats . channel ) if matchtr : if normalize : norm = matchtr [ 0 ] . data / np . sqrt ( np . mean ( np . square ( matchtr [ 0 ] . data ) ) ) norm = np . nan_to_num ( norm ) else : norm = matchtr [ 0 ] . data tr . data = np . sum ( ( norm , tr . data ) , axis = 0 ) return stack
Compute the linear stack of a series of seismic streams of \ multiplexed data .
14,429
def PWS_stack ( streams , weight = 2 , normalize = True ) : Linstack = linstack ( streams ) instaphases = [ ] print ( "Computing instantaneous phase" ) for stream in streams : instaphase = stream . copy ( ) for tr in instaphase : analytic = hilbert ( tr . data ) envelope = np . sqrt ( np . sum ( ( np . square ( analytic ) , np . square ( tr . data ) ) , axis = 0 ) ) tr . data = analytic / envelope instaphases . append ( instaphase ) print ( "Computing the phase stack" ) Phasestack = linstack ( instaphases , normalize = normalize ) for tr in Phasestack : tr . data = Linstack . select ( station = tr . stats . station ) [ 0 ] . data * np . abs ( tr . data ** weight ) return Phasestack
Compute the phase weighted stack of a series of streams .
14,430
def align_traces ( trace_list , shift_len , master = False , positive = False , plot = False ) : from eqcorrscan . core . match_filter import normxcorr2 from eqcorrscan . utils . plotting import xcorr_plot traces = deepcopy ( trace_list ) if not master : master = traces [ 0 ] MAD_master = np . median ( np . abs ( master . data ) ) for i in range ( 1 , len ( traces ) ) : if np . median ( np . abs ( traces [ i ] . data ) ) > MAD_master : master = traces [ i ] MAD_master = np . median ( np . abs ( master . data ) ) else : print ( 'Using master given by user' ) shifts = [ ] ccs = [ ] for i in range ( len ( traces ) ) : if not master . stats . sampling_rate == traces [ i ] . stats . sampling_rate : raise ValueError ( 'Sampling rates not the same' ) cc_vec = normxcorr2 ( template = traces [ i ] . data . astype ( np . float32 ) [ shift_len : - shift_len ] , image = master . data . astype ( np . float32 ) ) cc_vec = cc_vec [ 0 ] shift = np . abs ( cc_vec ) . argmax ( ) cc = cc_vec [ shift ] if plot : xcorr_plot ( template = traces [ i ] . data . astype ( np . float32 ) [ shift_len : - shift_len ] , image = master . data . astype ( np . float32 ) , shift = shift , cc = cc ) shift -= shift_len if cc < 0 and positive : cc = cc_vec . max ( ) shift = cc_vec . argmax ( ) - shift_len shifts . append ( shift / master . stats . sampling_rate ) ccs . append ( cc ) return shifts , ccs
Align traces relative to each other based on their cross - correlation value .
14,431
def temporary_directory ( ) : dir_name = tempfile . mkdtemp ( ) yield dir_name if os . path . exists ( dir_name ) : shutil . rmtree ( dir_name )
make a temporary directory yeild its name cleanup on exit
14,432
def _total_microsec ( t1 , t2 ) : td = t1 - t2 return ( td . seconds + td . days * 24 * 3600 ) * 10 ** 6 + td . microseconds
Calculate difference between two datetime stamps in microseconds .
14,433
def _templates_match ( t , family_file ) : return t . name == family_file . split ( os . sep ) [ - 1 ] . split ( '_detections.csv' ) [ 0 ]
Return True if a tribe matches a family file path .
14,434
def _group_process ( template_group , parallel , debug , cores , stream , daylong , ignore_length , overlap ) : master = template_group [ 0 ] processed_streams = [ ] kwargs = { 'filt_order' : master . filt_order , 'highcut' : master . highcut , 'lowcut' : master . lowcut , 'samp_rate' : master . samp_rate , 'debug' : debug , 'parallel' : parallel , 'num_cores' : cores } if daylong : if not master . process_length == 86400 : warnings . warn ( 'Processing day-long data, but template was cut from %i s long' ' data, will reduce correlations' % master . process_length ) func = dayproc kwargs . update ( { 'ignore_length' : ignore_length } ) starttimes = [ tr . stats . starttime . date for tr in stream ] if not len ( list ( set ( starttimes ) ) ) == 1 : warnings . warn ( 'Data start on different days, setting to last day' ) starttime = UTCDateTime ( stream . sort ( [ 'starttime' ] ) [ - 1 ] . stats . starttime . date ) else : starttime = stream . sort ( [ 'starttime' ] ) [ 0 ] . stats . starttime else : func = shortproc starttime = stream . sort ( [ 'starttime' ] ) [ 0 ] . stats . starttime endtime = stream . sort ( [ 'endtime' ] ) [ - 1 ] . stats . endtime data_len_samps = round ( ( endtime - starttime ) * master . samp_rate ) + 1 chunk_len_samps = ( master . process_length - overlap ) * master . samp_rate n_chunks = int ( data_len_samps / chunk_len_samps ) if n_chunks == 0 : print ( 'Data must be process_length or longer, not computing' ) for i in range ( n_chunks ) : kwargs . update ( { 'starttime' : starttime + ( i * ( master . process_length - overlap ) ) } ) if not daylong : kwargs . update ( { 'endtime' : kwargs [ 'starttime' ] + master . process_length } ) chunk_stream = stream . slice ( starttime = kwargs [ 'starttime' ] , endtime = kwargs [ 'endtime' ] ) . copy ( ) else : chunk_stream = stream . copy ( ) for tr in chunk_stream : tr . data = tr . data [ 0 : int ( master . process_length * tr . stats . sampling_rate ) ] processed_streams . append ( func ( st = chunk_stream , ** kwargs ) ) return processed_streams
Process data into chunks based on template processing length .
14,435
def _par_read ( dirname , compressed = True ) : templates = [ ] if compressed : arc = tarfile . open ( dirname , "r:*" ) members = arc . getmembers ( ) _parfile = [ member for member in members if member . name . split ( os . sep ) [ - 1 ] == 'template_parameters.csv' ] if len ( _parfile ) == 0 : arc . close ( ) raise MatchFilterError ( 'No template parameter file in archive' ) parfile = arc . extractfile ( _parfile [ 0 ] ) else : parfile = open ( dirname + '/' + 'template_parameters.csv' , 'r' ) for line in parfile : t_in = Template ( ) for key_pair in line . rstrip ( ) . split ( ',' ) : if key_pair . split ( ':' ) [ 0 ] . strip ( ) == 'name' : t_in . __dict__ [ key_pair . split ( ':' ) [ 0 ] . strip ( ) ] = key_pair . split ( ':' ) [ - 1 ] . strip ( ) elif key_pair . split ( ':' ) [ 0 ] . strip ( ) == 'filt_order' : try : t_in . __dict__ [ key_pair . split ( ':' ) [ 0 ] . strip ( ) ] = int ( key_pair . split ( ':' ) [ - 1 ] ) except ValueError : pass else : try : t_in . __dict__ [ key_pair . split ( ':' ) [ 0 ] . strip ( ) ] = float ( key_pair . split ( ':' ) [ - 1 ] ) except ValueError : pass templates . append ( t_in ) parfile . close ( ) if compressed : arc . close ( ) return templates
Internal write function to read a formatted parameter file .
14,436
def _badpath ( path , base ) : return not _resolved ( os . path . join ( base , path ) ) . startswith ( base )
joinpath will ignore base if path is absolute .
14,437
def _badlink ( info , base ) : tip = _resolved ( os . path . join ( base , os . path . dirname ( info . name ) ) ) return _badpath ( info . linkname , base = tip )
Links are interpreted relative to the directory containing the link
14,438
def _safemembers ( members ) : base = _resolved ( "." ) for finfo in members : if _badpath ( finfo . name , base ) : print ( finfo . name , "is blocked (illegal path)" ) elif finfo . issym ( ) and _badlink ( finfo , base ) : print ( finfo . name , "is blocked: Hard link to" , finfo . linkname ) elif finfo . islnk ( ) and _badlink ( finfo , base ) : print ( finfo . name , "is blocked: Symlink to" , finfo . linkname ) else : yield finfo
Check members of a tar archive for safety . Ensure that they do not contain paths or links outside of where we need them - this would only happen if the archive wasn t made by eqcorrscan .
14,439
def _write_family ( family , filename ) : with open ( filename , 'w' ) as f : for detection in family . detections : det_str = '' for key in detection . __dict__ . keys ( ) : if key == 'event' and detection . __dict__ [ key ] is not None : value = str ( detection . event . resource_id ) elif key in [ 'threshold' , 'detect_val' , 'threshold_input' ] : value = format ( detection . __dict__ [ key ] , '.32f' ) . rstrip ( '0' ) else : value = str ( detection . __dict__ [ key ] ) det_str += key + ': ' + value + '; ' f . write ( det_str + '\n' ) return
Write a family to a csv file .
14,440
def _read_family ( fname , all_cat , template ) : detections = [ ] with open ( fname , 'r' ) as f : for line in f : det_dict = { } gen_event = False for key_pair in line . rstrip ( ) . split ( ';' ) : key = key_pair . split ( ': ' ) [ 0 ] . strip ( ) value = key_pair . split ( ': ' ) [ - 1 ] . strip ( ) if key == 'event' : if len ( all_cat ) == 0 : gen_event = True continue el = [ e for e in all_cat if str ( e . resource_id ) . split ( '/' ) [ - 1 ] == value ] [ 0 ] det_dict . update ( { 'event' : el } ) elif key == 'detect_time' : det_dict . update ( { 'detect_time' : UTCDateTime ( value ) } ) elif key == 'chans' : det_dict . update ( { 'chans' : ast . literal_eval ( value ) } ) elif key in [ 'template_name' , 'typeofdet' , 'id' , 'threshold_type' ] : det_dict . update ( { key : value } ) elif key == 'no_chans' : det_dict . update ( { key : int ( float ( value ) ) } ) elif len ( key ) == 0 : continue else : det_dict . update ( { key : float ( value ) } ) detection = Detection ( ** det_dict ) if gen_event : detection . _calculate_event ( template = template ) detections . append ( detection ) return detections
Internal function to read csv family files .
14,441
def read_party ( fname = None , read_detection_catalog = True ) : party = Party ( ) party . read ( filename = fname , read_detection_catalog = read_detection_catalog ) return party
Read detections and metadata from a tar archive .
14,442
def read_detections ( fname ) : f = open ( fname , 'r' ) detections = [ ] for index , line in enumerate ( f ) : if index == 0 : continue if line . rstrip ( ) . split ( '; ' ) [ 0 ] == 'Template name' : continue detection = line . rstrip ( ) . split ( '; ' ) detection [ 1 ] = UTCDateTime ( detection [ 1 ] ) detection [ 2 ] = int ( float ( detection [ 2 ] ) ) detection [ 3 ] = ast . literal_eval ( detection [ 3 ] ) detection [ 4 ] = float ( detection [ 4 ] ) detection [ 5 ] = float ( detection [ 5 ] ) if len ( detection ) < 9 : detection . extend ( [ 'Unset' , float ( 'NaN' ) ] ) else : detection [ 7 ] = float ( detection [ 7 ] ) detections . append ( Detection ( template_name = detection [ 0 ] , detect_time = detection [ 1 ] , no_chans = detection [ 2 ] , detect_val = detection [ 4 ] , threshold = detection [ 5 ] , threshold_type = detection [ 6 ] , threshold_input = detection [ 7 ] , typeofdet = detection [ 8 ] , chans = detection [ 3 ] ) ) f . close ( ) return detections
Read detections from a file to a list of Detection objects .
14,443
def write_catalog ( detections , fname , format = "QUAKEML" ) : catalog = get_catalog ( detections ) catalog . write ( filename = fname , format = format )
Write events contained within detections to a catalog file .
14,444
def extract_from_stream ( stream , detections , pad = 5.0 , length = 30.0 ) : streams = [ ] for detection in detections : cut_stream = Stream ( ) for pick in detection . event . picks : tr = stream . select ( station = pick . waveform_id . station_code , channel = pick . waveform_id . channel_code ) if len ( tr ) == 0 : print ( 'No data in stream for pick:' ) print ( pick ) continue cut_stream += tr . slice ( starttime = pick . time - pad , endtime = pick . time - pad + length ) . copy ( ) streams . append ( cut_stream ) return streams
Extract waveforms for a list of detections from a stream .
14,445
def normxcorr2 ( template , image ) : array_xcorr = get_array_xcorr ( ) if type ( template ) != np . ndarray or type ( image ) != np . ndarray : print ( 'You have not provided numpy arrays, I will not convert them' ) return 'NaN' if len ( template ) > len ( image ) : ccc = array_xcorr ( templates = np . array ( [ image ] ) . astype ( np . float32 ) , stream = template . astype ( np . float32 ) , pads = [ 0 ] , threaded = False ) [ 0 ] [ 0 ] else : ccc = array_xcorr ( templates = np . array ( [ template ] ) . astype ( np . float32 ) , stream = image . astype ( np . float32 ) , pads = [ 0 ] , threaded = False ) [ 0 ] [ 0 ] ccc = ccc . reshape ( ( 1 , len ( ccc ) ) ) return ccc
Thin wrapper to eqcorrscan . utils . correlate functions .
14,446
def select ( self , template_name ) : return [ fam for fam in self . families if fam . template . name == template_name ] [ 0 ]
Select a specific family from the party .
14,447
def sort ( self ) : self . families . sort ( key = lambda x : x . template . name ) return self
Sort the families by template name .
14,448
def filter ( self , dates = None , min_dets = 1 ) : if dates is None : raise MatchFilterError ( 'Need a list defining a date range' ) new_party = Party ( ) for fam in self . families : new_fam = Family ( template = fam . template , detections = [ det for det in fam if dates [ 0 ] < det . detect_time < dates [ 1 ] ] ) if len ( new_fam ) >= min_dets : new_party . families . append ( new_fam ) return new_party
Return a new Party filtered according to conditions .
14,449
def plot ( self , plot_grouped = False , dates = None , min_dets = 1 , rate = False , ** kwargs ) : all_dets = [ ] if dates : new_party = self . filter ( dates = dates , min_dets = min_dets ) for fam in new_party . families : all_dets . extend ( fam . detections ) else : for fam in self . families : all_dets . extend ( fam . detections ) fig = cumulative_detections ( detections = all_dets , plot_grouped = plot_grouped , rate = rate , ** kwargs ) return fig
Plot the cumulative detections in time .
14,450
def rethreshold ( self , new_threshold , new_threshold_type = 'MAD' ) : for family in self . families : rethresh_detections = [ ] for d in family . detections : if new_threshold_type == 'MAD' and d . threshold_type == 'MAD' : new_thresh = ( d . threshold / d . threshold_input ) * new_threshold elif new_threshold_type == 'MAD' and d . threshold_type != 'MAD' : raise MatchFilterError ( 'Cannot recalculate MAD level, ' 'use another threshold type' ) elif new_threshold_type == 'absolute' : new_thresh = new_threshold elif new_threshold_type == 'av_chan_corr' : new_thresh = new_threshold * d . no_chans else : raise MatchFilterError ( 'new_threshold_type %s is not recognised' % str ( new_threshold_type ) ) if d . detect_val >= new_thresh : d . threshold = new_thresh d . threshold_input = new_threshold d . threshold_type = new_threshold_type rethresh_detections . append ( d ) family . detections = rethresh_detections return self
Remove detections from the Party that are below a new threshold .
14,451
def decluster ( self , trig_int , timing = 'detect' , metric = 'avg_cor' ) : all_detections = [ ] for fam in self . families : all_detections . extend ( fam . detections ) if timing == 'detect' : if metric == 'avg_cor' : detect_info = [ ( d . detect_time , d . detect_val / d . no_chans ) for d in all_detections ] elif metric == 'cor_sum' : detect_info = [ ( d . detect_time , d . detect_val ) for d in all_detections ] else : raise MatchFilterError ( 'metric is not cor_sum or avg_cor' ) elif timing == 'origin' : if metric == 'avg_cor' : detect_info = [ ( _get_origin ( d . event ) . time , d . detect_val / d . no_chans ) for d in all_detections ] elif metric == 'cor_sum' : detect_info = [ ( _get_origin ( d . event ) . time , d . detect_val ) for d in all_detections ] else : raise MatchFilterError ( 'metric is not cor_sum or avg_cor' ) else : raise MatchFilterError ( 'timing is not detect or origin' ) min_det = sorted ( [ d [ 0 ] for d in detect_info ] ) [ 0 ] detect_vals = np . array ( [ d [ 1 ] for d in detect_info ] ) detect_times = np . array ( [ _total_microsec ( d [ 0 ] . datetime , min_det . datetime ) for d in detect_info ] ) peaks_out = decluster ( peaks = detect_vals , index = detect_times , trig_int = trig_int * 10 ** 6 ) declustered_detections = [ ] for ind in peaks_out : matching_time_indeces = np . where ( detect_times == ind [ - 1 ] ) [ 0 ] matches = matching_time_indeces [ np . where ( detect_vals [ matching_time_indeces ] == ind [ 0 ] ) [ 0 ] [ 0 ] ] declustered_detections . append ( all_detections [ matches ] ) template_names = list ( set ( [ d . template_name for d in declustered_detections ] ) ) new_families = [ ] for template_name in template_names : template = [ fam . template for fam in self . families if fam . template . name == template_name ] [ 0 ] new_families . append ( Family ( template = template , detections = [ d for d in declustered_detections if d . template_name == template_name ] ) ) self . families = new_families return self
De - cluster a Party of detections by enforcing a detection separation .
14,452
def read ( self , filename = None , read_detection_catalog = True ) : tribe = Tribe ( ) families = [ ] if filename is None : filename = os . path . join ( os . path . dirname ( __file__ ) , '..' , 'tests' , 'test_data' , 'test_party.tgz' ) if isinstance ( filename , list ) : filenames = [ ] for _filename in filename : filenames . extend ( glob . glob ( _filename ) ) else : filenames = glob . glob ( filename ) for _filename in filenames : with tarfile . open ( _filename , "r:*" ) as arc : temp_dir = tempfile . mkdtemp ( ) arc . extractall ( path = temp_dir , members = _safemembers ( arc ) ) party_dir = glob . glob ( temp_dir + os . sep + '*' ) [ 0 ] tribe . _read_from_folder ( dirname = party_dir ) det_cat_file = glob . glob ( os . path . join ( party_dir , "catalog.*" ) ) if len ( det_cat_file ) != 0 and read_detection_catalog : try : all_cat = read_events ( det_cat_file [ 0 ] ) except TypeError as e : print ( e ) pass else : all_cat = Catalog ( ) for family_file in glob . glob ( join ( party_dir , '*_detections.csv' ) ) : template = [ t for t in tribe if _templates_match ( t , family_file ) ] family = Family ( template = template [ 0 ] or Template ( ) ) new_family = True if family . template . name in [ f . template . name for f in families ] : family = [ f for f in families if f . template . name == family . template . name ] [ 0 ] new_family = False family . detections = _read_family ( fname = family_file , all_cat = all_cat , template = template [ 0 ] ) if new_family : families . append ( family ) shutil . rmtree ( temp_dir ) self . families = families return self
Read a Party from a file .
14,453
def get_catalog ( self ) : catalog = Catalog ( ) for fam in self . families : if len ( fam . catalog ) != 0 : catalog . events . extend ( fam . catalog . events ) return catalog
Get an obspy catalog object from the party .
14,454
def min_chans ( self , min_chans ) : declustered = Party ( ) for family in self . families : fam = Family ( family . template ) for d in family . detections : if d . no_chans > min_chans : fam . detections . append ( d ) declustered . families . append ( fam ) self . families = declustered . families return self
Remove detections with fewer channels used than min_chans
14,455
def _uniq ( self ) : _detections = [ ] [ _detections . append ( d ) for d in self . detections if not _detections . count ( d ) ] self . detections = _detections return self
Get list of unique detections . Works in place .
14,456
def sort ( self ) : self . detections = sorted ( self . detections , key = lambda d : d . detect_time ) return self
Sort by detection time .
14,457
def plot ( self , plot_grouped = False ) : cumulative_detections ( detections = self . detections , plot_grouped = plot_grouped )
Plot the cumulative number of detections in time .
14,458
def same_processing ( self , other ) : for key in self . __dict__ . keys ( ) : if key in [ 'name' , 'st' , 'prepick' , 'event' , 'template_info' ] : continue if not self . __dict__ [ key ] == other . __dict__ [ key ] : return False return True
Check is the templates are processed the same .
14,459
def write ( self , filename , format = 'tar' ) : if format == 'tar' : Tribe ( templates = [ self ] ) . write ( filename = filename ) else : self . st . write ( filename , format = format ) return self
Write template .
14,460
def read ( self , filename ) : tribe = Tribe ( ) tribe . read ( filename = filename ) if len ( tribe ) > 1 : raise IOError ( 'Multiple templates in file' ) for key in self . __dict__ . keys ( ) : self . __dict__ [ key ] = tribe [ 0 ] . __dict__ [ key ] return self
Read template from tar format with metadata .
14,461
def detect ( self , stream , threshold , threshold_type , trig_int , plotvar , pre_processed = False , daylong = False , parallel_process = True , xcorr_func = None , concurrency = None , cores = None , ignore_length = False , overlap = "calculate" , debug = 0 , full_peaks = False ) : party = _group_detect ( templates = [ self ] , stream = stream . copy ( ) , threshold = threshold , threshold_type = threshold_type , trig_int = trig_int , plotvar = plotvar , pre_processed = pre_processed , daylong = daylong , parallel_process = parallel_process , xcorr_func = xcorr_func , concurrency = concurrency , cores = cores , ignore_length = ignore_length , overlap = overlap , debug = debug , full_peaks = full_peaks ) return party [ 0 ]
Detect using a single template within a continuous stream .
14,462
def construct ( self , method , name , lowcut , highcut , samp_rate , filt_order , prepick , ** kwargs ) : if method in [ 'from_meta_file' , 'from_seishub' , 'from_client' , 'multi_template_gen' ] : raise NotImplementedError ( 'Method is not supported, ' 'use Tribe.construct instead.' ) streams , events , process_lengths = template_gen . template_gen ( method = method , lowcut = lowcut , highcut = highcut , filt_order = filt_order , samp_rate = samp_rate , prepick = prepick , return_event = True , ** kwargs ) self . name = name st = streams [ 0 ] event = events [ 0 ] process_length = process_lengths [ 0 ] for tr in st : if not np . any ( tr . data . astype ( np . float16 ) ) : warnings . warn ( 'Data are zero in float16, missing data,' ' will not use: %s' % tr . id ) st . remove ( tr ) self . st = st self . lowcut = lowcut self . highcut = highcut self . filt_order = filt_order self . samp_rate = samp_rate self . process_length = process_length self . prepick = prepick self . event = event return self
Construct a template using a given method .
14,463
def sort ( self ) : self . templates = sorted ( self . templates , key = lambda x : x . name ) return self
Sort the tribe sorts by template name .
14,464
def select ( self , template_name ) : return [ t for t in self . templates if t . name == template_name ] [ 0 ]
Select a particular template from the tribe .
14,465
def remove ( self , template ) : self . templates = [ t for t in self . templates if t != template ] return self
Remove a template from the tribe .
14,466
def write ( self , filename , compress = True , catalog_format = "QUAKEML" ) : if catalog_format not in CAT_EXT_MAP . keys ( ) : raise TypeError ( "{0} is not supported" . format ( catalog_format ) ) if not os . path . isdir ( filename ) : os . makedirs ( filename ) self . _par_write ( filename ) tribe_cat = Catalog ( ) for t in self . templates : if t . event is not None : tribe_cat . append ( t . event ) if len ( tribe_cat ) > 0 : tribe_cat . write ( os . path . join ( filename , 'tribe_cat.{0}' . format ( CAT_EXT_MAP [ catalog_format ] ) ) , format = catalog_format ) for template in self . templates : template . st . write ( filename + '/' + template . name + '.ms' , format = 'MSEED' ) if compress : with tarfile . open ( filename + '.tgz' , "w:gz" ) as tar : tar . add ( filename , arcname = os . path . basename ( filename ) ) shutil . rmtree ( filename ) return self
Write the tribe to a file using tar archive formatting .
14,467
def _par_write ( self , dirname ) : filename = dirname + '/' + 'template_parameters.csv' with open ( filename , 'w' ) as parfile : for template in self . templates : for key in template . __dict__ . keys ( ) : if key not in [ 'st' , 'event' ] : parfile . write ( key + ': ' + str ( template . __dict__ [ key ] ) + ', ' ) parfile . write ( '\n' ) return self
Internal write function to write a formatted parameter file .
14,468
def read ( self , filename ) : with tarfile . open ( filename , "r:*" ) as arc : temp_dir = tempfile . mkdtemp ( ) arc . extractall ( path = temp_dir , members = _safemembers ( arc ) ) tribe_dir = glob . glob ( temp_dir + os . sep + '*' ) [ 0 ] self . _read_from_folder ( dirname = tribe_dir ) shutil . rmtree ( temp_dir ) return self
Read a tribe of templates from a tar formatted file .
14,469
def _read_from_folder ( self , dirname ) : templates = _par_read ( dirname = dirname , compressed = False ) t_files = glob . glob ( dirname + os . sep + '*.ms' ) tribe_cat_file = glob . glob ( os . path . join ( dirname , "tribe_cat.*" ) ) if len ( tribe_cat_file ) != 0 : tribe_cat = read_events ( tribe_cat_file [ 0 ] ) else : tribe_cat = Catalog ( ) previous_template_names = [ t . name for t in self . templates ] for template in templates : if template . name in previous_template_names : continue for event in tribe_cat : for comment in event . comments : if comment . text == 'eqcorrscan_template_' + template . name : template . event = event t_file = [ t for t in t_files if t . split ( os . sep ) [ - 1 ] == template . name + '.ms' ] if len ( t_file ) == 0 : print ( 'No waveform for template: ' + template . name ) templates . remove ( template ) continue elif len ( t_file ) > 1 : print ( 'Multiple waveforms found, using: ' + t_file [ 0 ] ) template . st = read ( t_file [ 0 ] ) self . templates . extend ( templates ) return
Internal folder reader .
14,470
def cluster ( self , method , ** kwargs ) : from eqcorrscan . utils import clustering tribes = [ ] func = getattr ( clustering , method ) if method in [ 'space_cluster' , 'space_time_cluster' ] : cat = Catalog ( [ t . event for t in self . templates ] ) groups = func ( cat , ** kwargs ) for group in groups : new_tribe = Tribe ( ) for event in group : new_tribe . templates . extend ( [ t for t in self . templates if t . event == event ] ) tribes . append ( new_tribe ) return tribes
Cluster the tribe .
14,471
def construct ( self , method , lowcut , highcut , samp_rate , filt_order , prepick , save_progress = False , ** kwargs ) : templates , catalog , process_lengths = template_gen . template_gen ( method = method , lowcut = lowcut , highcut = highcut , filt_order = filt_order , samp_rate = samp_rate , prepick = prepick , return_event = True , save_progress = save_progress , ** kwargs ) for template , event , process_len in zip ( templates , catalog , process_lengths ) : t = Template ( ) for tr in template : if not np . any ( tr . data . astype ( np . float16 ) ) : warnings . warn ( 'Data are zero in float16, missing data,' ' will not use: %s' % tr . id ) template . remove ( tr ) if len ( template ) == 0 : print ( 'Empty Template' ) continue t . st = template t . name = template . sort ( [ 'starttime' ] ) [ 0 ] . stats . starttime . strftime ( '%Y_%m_%dt%H_%M_%S' ) t . lowcut = lowcut t . highcut = highcut t . filt_order = filt_order t . samp_rate = samp_rate t . process_length = process_len t . prepick = prepick event . comments . append ( Comment ( text = "eqcorrscan_template_" + t . name , creation_info = CreationInfo ( agency = 'eqcorrscan' , author = getpass . getuser ( ) ) ) ) t . event = event self . templates . append ( t ) return self
Generate a Tribe of Templates .
14,472
def write ( self , fname , append = True ) : mode = 'w' if append and os . path . isfile ( fname ) : mode = 'a' header = '; ' . join ( [ 'Template name' , 'Detection time (UTC)' , 'Number of channels' , 'Channel list' , 'Detection value' , 'Threshold' , 'Threshold type' , 'Input threshold' , 'Detection type' ] ) print_str = "{0}; {1}; {2}; {3}; {4}; {5}; {6}; {7}; {8}\n" . format ( self . template_name , self . detect_time , self . no_chans , self . chans , self . detect_val , self . threshold , self . threshold_type , self . threshold_input , self . typeofdet ) with open ( fname , mode ) as _f : _f . write ( header + '\n' ) _f . write ( print_str )
Write detection to csv formatted file .
14,473
def _calculate_event ( self , template = None , template_st = None ) : if template is not None and template . name != self . template_name : print ( "Template names do not match: {0}: {1}" . format ( template . name , self . template_name ) ) return det_time = str ( self . detect_time . strftime ( '%Y%m%dT%H%M%S.%f' ) ) ev = Event ( resource_id = ResourceIdentifier ( id = self . template_name + '_' + det_time , prefix = 'smi:local' ) ) ev . creation_info = CreationInfo ( author = 'EQcorrscan' , creation_time = UTCDateTime ( ) ) ev . comments . append ( Comment ( text = 'threshold={0}' . format ( self . threshold ) ) ) ev . comments . append ( Comment ( text = 'detect_val={0}' . format ( self . detect_val ) ) ) if self . chans is not None : ev . comments . append ( Comment ( text = 'channels used: {0}' . format ( ' ' . join ( [ str ( pair ) for pair in self . chans ] ) ) ) ) if template is not None : template_st = template . st min_template_tm = min ( [ tr . stats . starttime for tr in template_st ] ) for tr in template_st : if ( tr . stats . station , tr . stats . channel ) not in self . chans : continue elif tr . stats . __contains__ ( "not_in_original" ) : continue else : pick_time = self . detect_time + ( tr . stats . starttime - min_template_tm ) ev . picks . append ( Pick ( time = pick_time , waveform_id = WaveformStreamID ( network_code = tr . stats . network , station_code = tr . stats . station , channel_code = tr . stats . channel , location_code = tr . stats . location ) ) ) self . event = ev return
Calculate an event for this detection using a given template .
14,474
def mktemplates ( network_code = 'GEONET' , publicIDs = [ '2016p008122' , '2016p008353' , '2016p008155' , '2016p008194' ] , plot = True ) : client = Client ( network_code ) catalog = Catalog ( ) for publicID in publicIDs : if network_code == 'GEONET' : data_stream = client . _download ( 'http://quakeml.geonet.org.nz/quakeml/1.2/' + publicID ) data_stream . seek ( 0 , 0 ) catalog += read_events ( data_stream , format = "quakeml" ) data_stream . close ( ) else : catalog += client . get_events ( eventid = publicID , includearrivals = True ) if plot : catalog . plot ( projection = 'local' , resolution = 'h' ) catalog = filter_picks ( catalog , top_n_picks = 5 ) for event in catalog : for pick in event . picks : if pick . phase_hint == 'S' : event . picks . remove ( pick ) templates = template_gen . template_gen ( method = 'from_client' , catalog = catalog , client_id = network_code , lowcut = 2.0 , highcut = 9.0 , samp_rate = 20.0 , filt_order = 4 , length = 3.0 , prepick = 0.15 , swin = 'all' , process_len = 3600 , debug = 0 , plot = plot ) for i , template in enumerate ( templates ) : template . write ( 'tutorial_template_' + str ( i ) + '.ms' , format = 'MSEED' ) return
Functional wrapper to make templates
14,475
def _read_tt ( path , stations , phase , phaseout = 'S' , ps_ratio = 1.68 , lags_switch = True ) : gridfiles = [ ] stations_out = [ ] for station in stations : gridfiles += ( glob . glob ( path + '*.' + phase + '.' + station + '.time.csv' ) ) if glob . glob ( path + '*.' + phase + '.' + station + '*.csv' ) : stations_out += [ station ] allnodes = [ ] for gridfile in gridfiles : print ( ' Reading slowness from: ' + gridfile ) f = open ( gridfile , 'r' ) grid = csv . reader ( f , delimiter = str ( ' ' ) ) traveltime = [ ] nodes = [ ] for row in grid : nodes . append ( ( float ( row [ 0 ] ) , float ( row [ 1 ] ) , float ( row [ 2 ] ) ) ) traveltime . append ( float ( row [ 3 ] ) ) traveltime = np . array ( traveltime ) if not phase == phaseout : if phase == 'S' : traveltime = traveltime / ps_ratio else : traveltime = traveltime * ps_ratio if lags_switch : lags = traveltime - min ( traveltime ) else : lags = traveltime if 'alllags' not in locals ( ) : alllags = [ lags ] else : alllags = np . concatenate ( ( alllags , [ lags ] ) , axis = 0 ) allnodes = nodes f . close ( ) alllags = np . array ( alllags ) return stations_out , allnodes , alllags
Read in . csv files of slowness generated from Grid2Time .
14,476
def _resample_grid ( stations , nodes , lags , mindepth , maxdepth , corners ) : resamp_nodes = [ ] resamp_lags = [ ] for i , node in enumerate ( nodes ) : if mindepth < float ( node [ 2 ] ) < maxdepth and corners . contains_point ( node [ 0 : 2 ] ) : resamp_nodes . append ( node ) resamp_lags . append ( [ lags [ : , i ] ] ) print ( np . shape ( resamp_lags ) ) resamp_lags = np . reshape ( resamp_lags , ( len ( resamp_lags ) , len ( stations ) ) ) . T print ( ' ' . join ( [ 'Grid now has ' , str ( len ( resamp_nodes ) ) , 'nodes' ] ) ) return stations , resamp_nodes , resamp_lags
Resample the lagtime grid to a given volume .
14,477
def _rm_similarlags ( stations , nodes , lags , threshold ) : netdif = abs ( ( lags . T - lags . T [ 0 ] ) . sum ( axis = 1 ) . reshape ( 1 , len ( nodes ) ) ) > threshold for i in range ( len ( nodes ) ) : _netdif = abs ( ( lags . T - lags . T [ i ] ) . sum ( axis = 1 ) . reshape ( 1 , len ( nodes ) ) ) > threshold netdif = np . concatenate ( ( netdif , _netdif ) , axis = 0 ) sys . stdout . write ( "\r" + str ( float ( i ) // len ( nodes ) * 100 ) + "% \r" ) sys . stdout . flush ( ) nodes_out = [ nodes [ 0 ] ] node_indices = [ 0 ] print ( "\n" ) print ( len ( nodes ) ) for i in range ( 1 , len ( nodes ) ) : if np . all ( netdif [ i ] [ node_indices ] ) : node_indices . append ( i ) nodes_out . append ( nodes [ i ] ) lags_out = lags . T [ node_indices ] . T print ( "Removed " + str ( len ( nodes ) - len ( nodes_out ) ) + " duplicate nodes" ) return stations , nodes_out , lags_out
Remove nodes that have a very similar network moveout to another node .
14,478
def _cum_net_resp ( node_lis , instance = 0 ) : cum_net_resp = np . load ( 'tmp' + str ( instance ) + '/node_' + str ( node_lis [ 0 ] ) + '.npy' ) [ 0 ] os . remove ( 'tmp' + str ( instance ) + '/node_' + str ( node_lis [ 0 ] ) + '.npy' ) indices = np . ones ( len ( cum_net_resp ) ) * node_lis [ 0 ] for i in node_lis [ 1 : ] : node_energy = np . load ( 'tmp' + str ( instance ) + '/node_' + str ( i ) + '.npy' ) [ 0 ] updated_indices = np . argmax ( [ cum_net_resp , node_energy ] , axis = 0 ) temp = np . array ( [ cum_net_resp , node_energy ] ) cum_net_resp = np . array ( [ temp [ updated_indices [ j ] ] [ j ] for j in range ( len ( updated_indices ) ) ] ) del temp , node_energy updated_indices [ updated_indices == 1 ] = i indices = updated_indices os . remove ( 'tmp' + str ( instance ) + '/node_' + str ( i ) + '.npy' ) return cum_net_resp , indices
Compute the cumulative network response by reading saved energy . npy files .
14,479
def _find_detections ( cum_net_resp , nodes , threshold , thresh_type , samp_rate , realstations , length ) : cum_net_resp = np . nan_to_num ( cum_net_resp ) if np . isnan ( cum_net_resp ) . any ( ) : raise ValueError ( "Nans present" ) print ( 'Mean of data is: ' + str ( np . median ( cum_net_resp ) ) ) print ( 'RMS of data is: ' + str ( np . sqrt ( np . mean ( np . square ( cum_net_resp ) ) ) ) ) print ( 'MAD of data is: ' + str ( np . median ( np . abs ( cum_net_resp ) ) ) ) if thresh_type == 'MAD' : thresh = ( np . median ( np . abs ( cum_net_resp ) ) * threshold ) elif thresh_type == 'abs' : thresh = threshold elif thresh_type == 'RMS' : thresh = _rms ( cum_net_resp ) * threshold print ( 'Threshold is set to: ' + str ( thresh ) ) print ( 'Max of data is: ' + str ( max ( cum_net_resp ) ) ) peaks = findpeaks . find_peaks2_short ( cum_net_resp , thresh , length * samp_rate , debug = 0 ) detections = [ ] if peaks : for peak in peaks : node = nodes [ peak [ 1 ] ] detections . append ( Detection ( template_name = str ( node [ 0 ] ) + '_' + str ( node [ 1 ] ) + '_' + str ( node [ 2 ] ) , detect_time = peak [ 1 ] / samp_rate , no_chans = len ( realstations ) , detect_val = peak [ 0 ] , threshold = thresh , typeofdet = 'brightness' , chans = realstations , id = str ( node [ 0 ] ) + '_' + str ( node [ 1 ] ) + '_' + str ( node [ 2 ] ) + str ( peak [ 1 ] / samp_rate ) , threshold_type = thresh_type , threshold_input = threshold ) ) else : detections = [ ] print ( 'I have found ' + str ( len ( peaks ) ) + ' possible detections' ) return detections
Find detections within the cumulative network response .
14,480
def coherence ( stream_in , stations = [ 'all' ] , clip = False ) : stream = stream_in . copy ( ) maxlen = np . max ( [ len ( tr . data ) for tr in stream ] ) if maxlen == 0 : warnings . warn ( 'template without data' ) return 0.0 , len ( stream ) if not stations [ 0 ] == 'all' : for tr in stream : if tr . stats . station not in stations : stream . remove ( tr ) for tr in stream : if not len ( tr . data ) == maxlen and not len ( tr . data ) == 0 : warnings . warn ( tr . stats . station + '.' + tr . stats . channel + ' is not the same length, padding \n' + 'Length is ' + str ( len ( tr . data ) ) + ' samples' ) pad = np . zeros ( maxlen - len ( tr . data ) ) if tr . stats . starttime . hour == 0 : tr . data = np . concatenate ( ( pad , tr . data ) , axis = 0 ) else : tr . data = np . concatenate ( ( tr . data , pad ) , axis = 0 ) elif len ( tr . data ) == 0 : tr . data = np . zeros ( maxlen ) if clip : for tr in stream : tr . trim ( tr . stats . starttime + clip [ 0 ] , tr . stats . starttime + clip [ 1 ] ) _coherence = 0.0 for i in range ( len ( stream ) ) : for j in range ( i + 1 , len ( stream ) ) : _coherence += np . abs ( normxcorr2 ( stream [ i ] . data , stream [ j ] . data ) ) [ 0 ] [ 0 ] _coherence = 2 * _coherence / ( len ( stream ) * ( len ( stream ) - 1 ) ) return _coherence , len ( stream )
Determine the average network coherence of a given template or detection .
14,481
def _do_ffts ( detector , stream , Nc ) : min_fftlen = int ( stream [ 0 ] [ 0 ] . data . shape [ 0 ] + detector . data [ 0 ] . shape [ 0 ] - Nc ) fftlen = scipy . fftpack . next_fast_len ( min_fftlen ) mplen = stream [ 0 ] [ 0 ] . data . shape [ 0 ] ulen = detector . data [ 0 ] . shape [ 0 ] num_st_fd = [ np . fft . rfft ( tr . data , n = fftlen ) for tr in stream [ 0 ] ] denom_st_fd = [ np . fft . rfft ( np . square ( tr . data ) , n = fftlen ) for tr in stream [ 0 ] ] w = np . fft . rfft ( np . ones ( detector . data [ 0 ] . shape [ 0 ] ) , n = fftlen ) detector_fd = [ ] for dat_mat in detector . data : detector_fd . append ( np . array ( [ np . fft . rfft ( col [ : : - 1 ] , n = fftlen ) for col in dat_mat . T ] ) ) return detector_fd , denom_st_fd , num_st_fd , w , ulen , mplen
Perform ffts on data detector and denominator boxcar
14,482
def _det_stat_freq ( det_freq , data_freq_sq , data_freq , w , Nc , ulen , mplen ) : num_cor = np . multiply ( det_freq , data_freq ) den_cor = np . multiply ( w , data_freq_sq ) num_ifft = np . real ( np . fft . irfft ( num_cor ) ) [ : , ulen - 1 : mplen : Nc ] denominator = np . real ( np . fft . irfft ( den_cor ) ) [ ulen - 1 : mplen : Nc ] result = np . sum ( np . square ( num_ifft ) , axis = 0 ) / denominator return result
Compute detection statistic in the frequency domain
14,483
def multi ( stream ) : stack = stream [ 0 ] . data for tr in stream [ 1 : ] : stack = np . dstack ( np . array ( [ stack , tr . data ] ) ) multiplex = stack . reshape ( stack . size , ) return multiplex
Internal multiplexer for multiplex_detect .
14,484
def subspace_detect ( detectors , stream , threshold , trig_int , moveout = 0 , min_trig = 1 , parallel = True , num_cores = None ) : from multiprocessing import Pool , cpu_count parameters = [ ] detections = [ ] for detector in detectors : parameter = ( detector . lowcut , detector . highcut , detector . filt_order , detector . sampling_rate , detector . multiplex , detector . stachans ) if parameter not in parameters : parameters . append ( parameter ) for parameter_set in parameters : parameter_detectors = [ ] for detector in detectors : det_par = ( detector . lowcut , detector . highcut , detector . filt_order , detector . sampling_rate , detector . multiplex , detector . stachans ) if det_par == parameter_set : parameter_detectors . append ( detector ) stream , stachans = _subspace_process ( streams = [ stream . copy ( ) ] , lowcut = parameter_set [ 0 ] , highcut = parameter_set [ 1 ] , filt_order = parameter_set [ 2 ] , sampling_rate = parameter_set [ 3 ] , multiplex = parameter_set [ 4 ] , stachans = parameter_set [ 5 ] , parallel = True , align = False , shift_len = None , reject = False ) if not parallel : for detector in parameter_detectors : detections += _detect ( detector = detector , st = stream [ 0 ] , threshold = threshold , trig_int = trig_int , moveout = moveout , min_trig = min_trig , process = False , extract_detections = False , debug = 0 ) else : if num_cores : ncores = num_cores else : ncores = cpu_count ( ) pool = Pool ( processes = ncores ) results = [ pool . apply_async ( _detect , args = ( detector , stream [ 0 ] , threshold , trig_int , moveout , min_trig , False , False , 0 ) ) for detector in parameter_detectors ] pool . close ( ) try : _detections = [ p . get ( ) for p in results ] except KeyboardInterrupt as e : pool . terminate ( ) raise e pool . join ( ) for d in _detections : if isinstance ( d , list ) : detections += d else : detections . append ( d ) return detections
Conduct subspace detection with chosen detectors .
14,485
def construct ( self , streams , lowcut , highcut , filt_order , sampling_rate , multiplex , name , align , shift_len = 0 , reject = 0.3 , no_missed = True , plot = False ) : self . lowcut = lowcut self . highcut = highcut self . filt_order = filt_order self . sampling_rate = sampling_rate self . name = name self . multiplex = multiplex p_streams , stachans = _subspace_process ( streams = copy . deepcopy ( streams ) , lowcut = lowcut , highcut = highcut , filt_order = filt_order , sampling_rate = sampling_rate , multiplex = multiplex , align = align , shift_len = shift_len , reject = reject , plot = plot , no_missed = no_missed ) u , sigma , v , svd_stachans = svd ( stream_list = p_streams , full = True ) self . stachans = stachans self . u = u self . v = v self . sigma = sigma self . data = copy . deepcopy ( u ) self . dimension = np . inf return self
Construct a subspace detector from a list of streams full rank .
14,486
def partition ( self , dimension ) : for i , channel in enumerate ( self . u ) : if self . v [ i ] . shape [ 1 ] < dimension : raise IndexError ( 'Channel is max dimension %s' % self . v [ i ] . shape [ 1 ] ) self . data [ i ] = channel [ : , 0 : dimension ] self . dimension = dimension return self
Partition subspace into desired dimension .
14,487
def energy_capture ( self , stachans = 'all' , size = ( 10 , 7 ) , show = False ) : if show : return subspace_fc_plot ( detector = self , stachans = stachans , size = size , show = show ) percent_capture = 0 if np . isinf ( self . dimension ) : return 100 for channel in self . sigma : fc = np . sum ( channel [ 0 : self . dimension ] ) / np . sum ( channel ) percent_capture += fc else : return 100 * ( percent_capture / len ( self . sigma ) )
Calculate the average percentage energy capture for this subspace .
14,488
def write ( self , filename ) : f = h5py . File ( filename , "w" ) data_group = f . create_group ( name = "data" ) for i , data in enumerate ( self . data ) : dset = data_group . create_dataset ( name = "data_" + str ( i ) , shape = data . shape , dtype = data . dtype ) dset [ ... ] = data data_group . attrs [ 'length' ] = len ( self . data ) data_group . attrs [ 'name' ] = self . name . encode ( "ascii" , "ignore" ) data_group . attrs [ 'sampling_rate' ] = self . sampling_rate data_group . attrs [ 'multiplex' ] = self . multiplex data_group . attrs [ 'lowcut' ] = self . lowcut data_group . attrs [ 'highcut' ] = self . highcut data_group . attrs [ 'filt_order' ] = self . filt_order data_group . attrs [ 'dimension' ] = self . dimension data_group . attrs [ 'user' ] = getpass . getuser ( ) data_group . attrs [ 'eqcorrscan_version' ] = str ( eqcorrscan . __version__ ) ascii_stachans = [ '.' . join ( stachan ) . encode ( "ascii" , "ignore" ) for stachan in self . stachans ] stachans = f . create_dataset ( name = "stachans" , shape = ( len ( ascii_stachans ) , ) , dtype = 'S10' ) stachans [ ... ] = ascii_stachans u_group = f . create_group ( "u" ) for i , u in enumerate ( self . u ) : uset = u_group . create_dataset ( name = "u_" + str ( i ) , shape = u . shape , dtype = u . dtype ) uset [ ... ] = u u_group . attrs [ 'length' ] = len ( self . u ) sigma_group = f . create_group ( "sigma" ) for i , sigma in enumerate ( self . sigma ) : sigmaset = sigma_group . create_dataset ( name = "sigma_" + str ( i ) , shape = sigma . shape , dtype = sigma . dtype ) sigmaset [ ... ] = sigma sigma_group . attrs [ 'length' ] = len ( self . sigma ) v_group = f . create_group ( "v" ) for i , v in enumerate ( self . v ) : vset = v_group . create_dataset ( name = "v_" + str ( i ) , shape = v . shape , dtype = v . dtype ) vset [ ... ] = v v_group . attrs [ 'length' ] = len ( self . v ) f . flush ( ) f . close ( ) return self
Write detector to a file - uses HDF5 file format .
14,489
def read ( self , filename ) : f = h5py . File ( filename , "r" ) self . data = [ ] for i in range ( f [ 'data' ] . attrs [ 'length' ] ) : self . data . append ( f [ 'data' ] [ 'data_' + str ( i ) ] . value ) self . u = [ ] for i in range ( f [ 'u' ] . attrs [ 'length' ] ) : self . u . append ( f [ 'u' ] [ 'u_' + str ( i ) ] . value ) self . sigma = [ ] for i in range ( f [ 'sigma' ] . attrs [ 'length' ] ) : self . sigma . append ( f [ 'sigma' ] [ 'sigma_' + str ( i ) ] . value ) self . v = [ ] for i in range ( f [ 'v' ] . attrs [ 'length' ] ) : self . v . append ( f [ 'v' ] [ 'v_' + str ( i ) ] . value ) self . stachans = [ tuple ( stachan . decode ( 'ascii' ) . split ( '.' ) ) for stachan in f [ 'stachans' ] . value ] self . dimension = f [ 'data' ] . attrs [ 'dimension' ] self . filt_order = f [ 'data' ] . attrs [ 'filt_order' ] self . highcut = f [ 'data' ] . attrs [ 'highcut' ] self . lowcut = f [ 'data' ] . attrs [ 'lowcut' ] self . multiplex = bool ( f [ 'data' ] . attrs [ 'multiplex' ] ) self . sampling_rate = f [ 'data' ] . attrs [ 'sampling_rate' ] if isinstance ( f [ 'data' ] . attrs [ 'name' ] , str ) : self . name = f [ 'data' ] . attrs [ 'name' ] else : self . name = f [ 'data' ] . attrs [ 'name' ] . decode ( 'ascii' ) return self
Read detector from a file must be HDF5 format .
14,490
def plot ( self , stachans = 'all' , size = ( 10 , 7 ) , show = True ) : return subspace_detector_plot ( detector = self , stachans = stachans , size = size , show = show )
Plot the output basis vectors for the detector at the given dimension .
14,491
def export_symbols ( * path ) : lines = open ( os . path . join ( * path ) , 'r' ) . readlines ( ) [ 2 : ] return [ s . strip ( ) for s in lines if s . strip ( ) != '' ]
Required for windows systems - functions defined in libutils . def .
14,492
def dist_calc ( loc1 , loc2 ) : R = 6371.009 dlat = np . radians ( abs ( loc1 [ 0 ] - loc2 [ 0 ] ) ) dlong = np . radians ( abs ( loc1 [ 1 ] - loc2 [ 1 ] ) ) ddepth = abs ( loc1 [ 2 ] - loc2 [ 2 ] ) mean_lat = np . radians ( ( loc1 [ 0 ] + loc2 [ 0 ] ) / 2 ) dist = R * np . sqrt ( dlat ** 2 + ( np . cos ( mean_lat ) * dlong ) ** 2 ) dist = np . sqrt ( dist ** 2 + ddepth ** 2 ) return dist
Function to calculate the distance in km between two points .
14,493
def calc_max_curv ( magnitudes , plotvar = False ) : counts = Counter ( magnitudes ) df = np . zeros ( len ( counts ) ) mag_steps = np . zeros ( len ( counts ) ) grad = np . zeros ( len ( counts ) - 1 ) grad_points = grad . copy ( ) for i , magnitude in enumerate ( sorted ( counts . keys ( ) , reverse = True ) ) : mag_steps [ i ] = magnitude if i > 0 : df [ i ] = counts [ magnitude ] + df [ i - 1 ] else : df [ i ] = counts [ magnitude ] for i , val in enumerate ( df ) : if i > 0 : grad [ i - 1 ] = ( val - df [ i - 1 ] ) / ( mag_steps [ i ] - mag_steps [ i - 1 ] ) grad_points [ i - 1 ] = mag_steps [ i ] - ( ( mag_steps [ i ] - mag_steps [ i - 1 ] ) / 2.0 ) curvature = np . zeros ( len ( grad ) - 1 ) curvature_points = curvature . copy ( ) for i , _grad in enumerate ( grad ) : if i > 0 : curvature [ i - 1 ] = ( _grad - grad [ i - 1 ] ) / ( grad_points [ i ] - grad_points [ i - 1 ] ) curvature_points [ i - 1 ] = grad_points [ i ] - ( ( grad_points [ i ] - grad_points [ i - 1 ] ) / 2.0 ) if plotvar : plt . scatter ( mag_steps , df , c = 'k' , label = 'Magnitude function' ) plt . plot ( mag_steps , df , c = 'k' ) plt . scatter ( grad_points , grad , c = 'r' , label = 'Gradient' ) plt . plot ( grad_points , grad , c = 'r' ) plt . scatter ( curvature_points , curvature , c = 'g' , label = 'Curvature' ) plt . plot ( curvature_points , curvature , c = 'g' ) plt . legend ( ) plt . show ( ) return curvature_points [ np . argmax ( abs ( curvature ) ) ]
Calculate the magnitude of completeness using the maximum curvature method .
14,494
def _sim_WA ( trace , PAZ , seedresp , water_level , velocity = False ) : PAZ_WA = { 'poles' : [ - 6.283 + 4.7124j , - 6.283 - 4.7124j ] , 'zeros' : [ 0 + 0j ] , 'gain' : 1.0 , 'sensitivity' : 2080 } if velocity : PAZ_WA [ 'zeros' ] = [ 0 + 0j , 0 + 0j ] trace . detrend ( 'simple' ) if PAZ : trace . data = seis_sim ( trace . data , trace . stats . sampling_rate , paz_remove = PAZ , paz_simulate = PAZ_WA , water_level = water_level , remove_sensitivity = True ) elif seedresp : trace . data = seis_sim ( trace . data , trace . stats . sampling_rate , paz_remove = None , paz_simulate = PAZ_WA , water_level = water_level , seedresp = seedresp ) else : UserWarning ( 'No response given to remove, will just simulate WA' ) trace . data = seis_sim ( trace . data , trace . stats . sampling_rate , paz_remove = None , paz_simulate = PAZ_WA , water_level = water_level ) return trace
Remove the instrument response from a trace and simulate a Wood - Anderson .
14,495
def _GSE2_PAZ_read ( gsefile ) : with open ( gsefile , 'r' ) as f : header = f . readline ( ) if not header [ 0 : 4 ] == 'CAL2' : raise IOError ( 'Unknown format for GSE file, only coded for CAL2' ) station = header . split ( ) [ 1 ] channel = header . split ( ) [ 2 ] sensor = header . split ( ) [ 3 ] date = dt . datetime . strptime ( header . split ( ) [ 7 ] , '%Y/%m/%d' ) header = f . readline ( ) if not header [ 0 : 4 ] == 'PAZ2' : raise IOError ( 'Unknown format for GSE file, only coded for PAZ2' ) gain = float ( header . split ( ) [ 3 ] ) kpoles = int ( header . split ( ) [ 4 ] ) kzeros = int ( header . split ( ) [ 5 ] ) poles = [ ] for i in range ( kpoles ) : pole = f . readline ( ) poles . append ( complex ( float ( pole . split ( ) [ 0 ] ) , float ( pole . split ( ) [ 1 ] ) ) ) zeros = [ ] for i in range ( kzeros ) : zero = f . readline ( ) zeros . append ( complex ( float ( zero . split ( ) [ 0 ] ) , float ( zero . split ( ) [ 1 ] ) ) ) for line in f : if line [ 0 : 4 ] == 'DIG2' : sensitivity = float ( line . split ( ) [ 2 ] ) PAZ = { 'poles' : poles , 'zeros' : zeros , 'gain' : gain , 'sensitivity' : sensitivity } return PAZ , date , station , channel , sensor
Read the instrument response information from a GSE Poles and Zeros file .
14,496
def _find_resp ( station , channel , network , time , delta , directory ) : possible_respfiles = glob . glob ( directory + os . path . sep + 'RESP.' + network + '.' + station + '.*.' + channel ) possible_respfiles += glob . glob ( directory + os . path . sep + 'RESP.' + network + '.' + channel + '.' + station ) possible_respfiles += glob . glob ( directory + os . path . sep + 'RESP.' + station + '.' + network ) station = str ( station ) channel = str ( channel ) possible_respfiles += glob . glob ( directory + os . path . sep + station . ljust ( 5 , str ( '_' ) ) + channel [ 0 : len ( channel ) - 1 ] . ljust ( 3 , str ( '_' ) ) + channel [ - 1 ] + '.*_GSE' ) PAZ = [ ] seedresp = [ ] for respfile in possible_respfiles : print ( 'Reading response from: ' + respfile ) if respfile . split ( os . path . sep ) [ - 1 ] [ 0 : 4 ] == 'RESP' : seedresp = { 'filename' : respfile , 'date' : UTCDateTime ( time ) , 'units' : 'DIS' , 'network' : network , 'station' : station , 'channel' : channel , 'location' : '*' } try : freq_resp , freqs = evalresp ( delta , 100 , seedresp [ 'filename' ] , seedresp [ 'date' ] , units = seedresp [ 'units' ] , freq = True , network = seedresp [ 'network' ] , station = seedresp [ 'station' ] , channel = seedresp [ 'channel' ] ) except : print ( 'Issues with RESP file' ) seedresp = [ ] continue elif respfile [ - 3 : ] == 'GSE' : PAZ , pazdate , pazstation , pazchannel , pazsensor = _GSE2_PAZ_read ( respfile ) if pazdate >= time and pazchannel != channel and pazstation != station : print ( 'Issue with GSE file' ) print ( 'date: ' + str ( pazdate ) + ' channel: ' + pazchannel + ' station: ' + pazstation ) PAZ = [ ] else : continue if PAZ or seedresp : break if PAZ : return PAZ elif seedresp : return seedresp
Helper function to find the response information .
14,497
def _pairwise ( iterable ) : a , b = itertools . tee ( iterable ) next ( b , None ) if sys . version_info . major == 2 : return itertools . izip ( a , b ) else : return zip ( a , b )
Wrapper on itertools for SVD_magnitude .
14,498
def filter_picks ( catalog , stations = None , channels = None , networks = None , locations = None , top_n_picks = None , evaluation_mode = 'all' ) : filtered_catalog = catalog . copy ( ) if stations : for event in filtered_catalog : if len ( event . picks ) == 0 : continue event . picks = [ pick for pick in event . picks if pick . waveform_id . station_code in stations ] if channels : for event in filtered_catalog : if len ( event . picks ) == 0 : continue event . picks = [ pick for pick in event . picks if pick . waveform_id . channel_code in channels ] if networks : for event in filtered_catalog : if len ( event . picks ) == 0 : continue event . picks = [ pick for pick in event . picks if pick . waveform_id . network_code in networks ] if locations : for event in filtered_catalog : if len ( event . picks ) == 0 : continue event . picks = [ pick for pick in event . picks if pick . waveform_id . location_code in locations ] if evaluation_mode == 'manual' : for event in filtered_catalog : event . picks = [ pick for pick in event . picks if pick . evaluation_mode == 'manual' ] elif evaluation_mode == 'automatic' : for event in filtered_catalog : event . picks = [ pick for pick in event . picks if pick . evaluation_mode == 'automatic' ] elif evaluation_mode != 'all' : warnings . warn ( 'Unrecognised evaluation_mode: %s, using all picks' % evaluation_mode ) if top_n_picks : all_picks = [ ] for event in filtered_catalog : all_picks += [ ( pick . waveform_id . station_code , pick . waveform_id . channel_code ) for pick in event . picks ] counted = Counter ( all_picks ) . most_common ( ) all_picks = [ ] for i in range ( counted [ 0 ] [ 1 ] ) : highest = [ item [ 0 ] for item in counted if item [ 1 ] >= counted [ 0 ] [ 1 ] - i ] highest = sorted ( highest , key = lambda tup : tup [ 0 ] ) for stachan in highest : if stachan not in all_picks : all_picks . append ( stachan ) if len ( all_picks ) > top_n_picks : all_picks = all_picks [ 0 : top_n_picks ] break for event in filtered_catalog : if len ( event . picks ) == 0 : continue event . picks = [ pick for pick in event . picks if ( pick . waveform_id . station_code , pick . waveform_id . channel_code ) in all_picks ] tmp_catalog = Catalog ( ) for event in filtered_catalog : if len ( event . picks ) > 0 : tmp_catalog . append ( event ) return tmp_catalog
Filter events in the catalog based on a number of parameters .
14,499
def spatial_clip ( catalog , corners , mindepth = None , maxdepth = None ) : cat_out = catalog . copy ( ) if mindepth is not None : for event in cat_out : try : origin = _get_origin ( event ) except IOError : continue if origin . depth < mindepth * 1000 : cat_out . events . remove ( event ) if maxdepth is not None : for event in cat_out : try : origin = _get_origin ( event ) except IOError : continue if origin . depth > maxdepth * 1000 : cat_out . events . remove ( event ) for event in cat_out : try : origin = _get_origin ( event ) except IOError : continue if not corners . contains_point ( ( origin . latitude , origin . longitude ) ) : cat_out . events . remove ( event ) return cat_out
Clip the catalog to a spatial box can be irregular .