_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q31700
FrequencySeries.lal
train
def lal(self): """Produces a LAL frequency series object equivalent to self. Returns ------- lal_data : {lal.*FrequencySeries} LAL frequency series object containing the same data as self. The actual type depends on the sample's dtype. If the epoch of self was 'None', the epoch of the returned LAL object will be LIGOTimeGPS(0,0); otherwise, the same as that of self. Raises ------ TypeError If frequency series is stored in GPU memory. """ lal_data = None if self._epoch is None: ep = _lal.LIGOTimeGPS(0,0) else: ep = self._epoch if self._data.dtype == _numpy.float32: lal_data = _lal.CreateREAL4FrequencySeries("",ep,0,self.delta_f,_lal.SecondUnit,len(self)) elif self._data.dtype == _numpy.float64: lal_data = _lal.CreateREAL8FrequencySeries("",ep,0,self.delta_f,_lal.SecondUnit,len(self)) elif self._data.dtype == _numpy.complex64: lal_data = _lal.CreateCOMPLEX8FrequencySeries("",ep,0,self.delta_f,_lal.SecondUnit,len(self)) elif self._data.dtype == _numpy.complex128: lal_data = _lal.CreateCOMPLEX16FrequencySeries("",ep,0,self.delta_f,_lal.SecondUnit,len(self)) lal_data.data.data[:] = self.numpy() return lal_data
python
{ "resource": "" }
q31701
FrequencySeries.save
train
def save(self, path, group=None, ifo='P1'): """ Save frequency series to a Numpy .npy, hdf, or text file. The first column contains the sample frequencies, the second contains the values. In the case of a complex frequency series saved as text, the imaginary part is written as a third column. When using hdf format, the data is stored as a single vector, along with relevant attributes. Parameters ---------- path: string Destination file path. Must end with either .hdf, .npy or .txt. group: string Additional name for internal storage use. Ex. hdf storage uses this as the key value. Raises ------ ValueError If path does not end in .npy or .txt. """ ext = _os.path.splitext(path)[1] if ext == '.npy': output = _numpy.vstack((self.sample_frequencies.numpy(), self.numpy())).T _numpy.save(path, output) elif ext == '.txt': if self.kind == 'real': output = _numpy.vstack((self.sample_frequencies.numpy(), self.numpy())).T elif self.kind == 'complex': output = _numpy.vstack((self.sample_frequencies.numpy(), self.numpy().real, self.numpy().imag)).T _numpy.savetxt(path, output) elif ext == '.xml' or path.endswith('.xml.gz'): from pycbc.io.live import make_psd_xmldoc from glue.ligolw import utils if self.kind != 'real': raise ValueError('XML only supports real frequency series') output = self.lal() # When writing in this format we must *not* have the 0 values at # frequencies less than flow. To resolve this we set the first # non-zero value < flow. data_lal = output.data.data first_idx = _numpy.argmax(data_lal>0) if not first_idx == 0: data_lal[:first_idx] = data_lal[first_idx] psddict = {ifo: output} utils.write_filename(make_psd_xmldoc(psddict), path, gz=path.endswith(".gz")) elif ext =='.hdf': key = 'data' if group is None else group f = h5py.File(path) ds = f.create_dataset(key, data=self.numpy(), compression='gzip', compression_opts=9, shuffle=True) ds.attrs['epoch'] = float(self.epoch) ds.attrs['delta_f'] = float(self.delta_f) else: raise ValueError('Path must end with .npy, .txt, .xml, .xml.gz ' 'or .hdf')
python
{ "resource": "" }
q31702
FrequencySeries.to_timeseries
train
def to_timeseries(self, delta_t=None): """ Return the Fourier transform of this time series. Note that this assumes even length time series! Parameters ---------- delta_t : {None, float}, optional The time resolution of the returned series. By default the resolution is determined by length and delta_f of this frequency series. Returns ------- TimeSeries: The inverse fourier transform of this frequency series. """ from pycbc.fft import ifft from pycbc.types import TimeSeries, real_same_precision_as nat_delta_t = 1.0 / ((len(self)-1)*2) / self.delta_f if not delta_t: delta_t = nat_delta_t # add 0.5 to round integer tlen = int(1.0 / self.delta_f / delta_t + 0.5) flen = int(tlen / 2 + 1) if flen < len(self): raise ValueError("The value of delta_t (%s) would be " "undersampled. Maximum delta_t " "is %s." % (delta_t, nat_delta_t)) if not delta_t: tmp = self else: tmp = FrequencySeries(zeros(flen, dtype=self.dtype), delta_f=self.delta_f, epoch=self.epoch) tmp[:len(self)] = self[:] f = TimeSeries(zeros(tlen, dtype=real_same_precision_as(self)), delta_t=delta_t) ifft(tmp, f) return f
python
{ "resource": "" }
q31703
FrequencySeries.cyclic_time_shift
train
def cyclic_time_shift(self, dt): """Shift the data and timestamps by a given number of seconds Shift the data and timestamps in the time domain a given number of seconds. To just change the time stamps, do ts.start_time += dt. The time shift may be smaller than the intrinsic sample rate of the data. Note that data will be cycliclly rotated, so if you shift by 2 seconds, the final 2 seconds of your data will now be at the beginning of the data set. Parameters ---------- dt : float Amount of time to shift the vector. Returns ------- data : pycbc.types.FrequencySeries The time shifted frequency series. """ from pycbc.waveform import apply_fseries_time_shift data = apply_fseries_time_shift(self, dt) data.start_time = self.start_time - dt return data
python
{ "resource": "" }
q31704
stack_xi_direction_brute
train
def stack_xi_direction_brute(xis, bestMasses, bestXis, direction_num, req_match, massRangeParams, metricParams, fUpper, scaleFactor=0.8, numIterations=3000): """ This function is used to assess the depth of the xi_space in a specified dimension at a specified point in the higher dimensions. It does this by iteratively throwing points at the space to find maxima and minima. Parameters ----------- xis : list or array Position in the xi space at which to assess the depth. This can be only a subset of the higher dimensions than that being sampled. bestMasses : list Contains [totalMass, eta, spin1z, spin2z]. Is a physical position mapped to xi coordinates in bestXis that is close to the xis point. This is aimed to give the code a starting point. bestXis : list Contains the position of bestMasses in the xi coordinate system. direction_num : int The dimension that you want to assess the depth of (0 = 1, 1 = 2 ...) req_match : float When considering points to assess the depth with, only consider points with a mismatch that is smaller than this with xis. massRangeParams : massRangeParameters instance Instance holding all the details of mass ranges and spin ranges. metricParams : metricParameters instance Structure holding all the options for construction of the metric and the eigenvalues, eigenvectors and covariance matrix needed to manipulate the space. fUpper : float The value of fUpper that was used when obtaining the xi_i coordinates. This lets us know how to rotate potential physical points into the correct xi_i space. This must be a key in metricParams.evals, metricParams.evecs and metricParams.evecsCV (ie. we must know how to do the transformation for the given value of fUpper) scaleFactor : float, optional (default = 0.8) The value of the scale factor that is used when calling pycbc.tmpltbank.get_mass_distribution. numIterations : int, optional (default = 3000) The number of times to make calls to get_mass_distribution when assessing the maximum/minimum of this parameter space. Making this smaller makes the code faster, but at the cost of accuracy. Returns -------- xi_min : float The minimal value of the specified dimension at the specified point in parameter space. xi_max : float The maximal value of the specified dimension at the specified point in parameter space. """ # Find minimum ximin = find_xi_extrema_brute(xis, bestMasses, bestXis, direction_num, \ req_match, massRangeParams, metricParams, \ fUpper, find_minimum=True, \ scaleFactor=scaleFactor, \ numIterations=numIterations) # Find maximum ximax = find_xi_extrema_brute(xis, bestMasses, bestXis, direction_num, \ req_match, massRangeParams, metricParams, \ fUpper, find_minimum=False, \ scaleFactor=scaleFactor, \ numIterations=numIterations) return ximin, ximax
python
{ "resource": "" }
q31705
find_xi_extrema_brute
train
def find_xi_extrema_brute(xis, bestMasses, bestXis, direction_num, req_match, \ massRangeParams, metricParams, fUpper, \ find_minimum=False, scaleFactor=0.8, \ numIterations=3000): """ This function is used to find the largest or smallest value of the xi space in a specified dimension at a specified point in the higher dimensions. It does this by iteratively throwing points at the space to find extrema. Parameters ----------- xis : list or array Position in the xi space at which to assess the depth. This can be only a subset of the higher dimensions than that being sampled. bestMasses : list Contains [totalMass, eta, spin1z, spin2z]. Is a physical position mapped to xi coordinates in bestXis that is close to the xis point. This is aimed to give the code a starting point. bestXis : list Contains the position of bestMasses in the xi coordinate system. direction_num : int The dimension that you want to assess the depth of (0 = 1, 1 = 2 ...) req_match : float When considering points to assess the depth with, only consider points with a mismatch that is smaller than this with xis. massRangeParams : massRangeParameters instance Instance holding all the details of mass ranges and spin ranges. metricParams : metricParameters instance Structure holding all the options for construction of the metric and the eigenvalues, eigenvectors and covariance matrix needed to manipulate the space. fUpper : float The value of fUpper that was used when obtaining the xi_i coordinates. This lets us know how to rotate potential physical points into the correct xi_i space. This must be a key in metricParams.evals, metricParams.evecs and metricParams.evecsCV (ie. we must know how to do the transformation for the given value of fUpper) find_minimum : boolean, optional (default = False) If True, find the minimum value of the xi direction. If False find the maximum value. scaleFactor : float, optional (default = 0.8) The value of the scale factor that is used when calling pycbc.tmpltbank.get_mass_distribution. numIterations : int, optional (default = 3000) The number of times to make calls to get_mass_distribution when assessing the maximum/minimum of this parameter space. Making this smaller makes the code faster, but at the cost of accuracy. Returns -------- xi_extent : float The extremal value of the specified dimension at the specified point in parameter space. """ # Setup xi_size = len(xis) bestChirpmass = bestMasses[0] * (bestMasses[1])**(3./5.) if find_minimum: xiextrema = 10000000000 else: xiextrema = -100000000000 for _ in range(numIterations): # Evaluate extrema of the xi direction specified totmass, eta, spin1z, spin2z, _, _, new_xis = \ get_mass_distribution([bestChirpmass,bestMasses[1],bestMasses[2], bestMasses[3]], scaleFactor, massRangeParams, metricParams, fUpper) cDist = (new_xis[0] - xis[0])**2 for j in range(1, xi_size): cDist += (new_xis[j] - xis[j])**2 redCDist = cDist[cDist < req_match] if len(redCDist): if not find_minimum: new_xis[direction_num][cDist > req_match] = -10000000 currXiExtrema = (new_xis[direction_num]).max() idx = (new_xis[direction_num]).argmax() else: new_xis[direction_num][cDist > req_match] = 10000000 currXiExtrema = (new_xis[direction_num]).min() idx = (new_xis[direction_num]).argmin() if ( ((not find_minimum) and (currXiExtrema > xiextrema)) or \ (find_minimum and (currXiExtrema < xiextrema)) ): xiextrema = currXiExtrema bestMasses[0] = totmass[idx] bestMasses[1] = eta[idx] bestMasses[2] = spin1z[idx] bestMasses[3] = spin2z[idx] bestChirpmass = bestMasses[0] * (bestMasses[1])**(3./5.) return xiextrema
python
{ "resource": "" }
q31706
is_main_process
train
def is_main_process(): """ Check if this is the main control process and may handle one time tasks """ try: from mpi4py import MPI comm = MPI.COMM_WORLD rank = comm.Get_rank() return rank == 0 except (ImportError, ValueError, RuntimeError): return True
python
{ "resource": "" }
q31707
_lockstep_fcn
train
def _lockstep_fcn(values): """ Wrapper to ensure that all processes execute together """ numrequired, fcn, args = values with _process_lock: _numdone.value += 1 # yep this is an ugly busy loop, do something better please # when we care about the performance of this call and not just the # guarantee it provides (ok... maybe never) while 1: if _numdone.value == numrequired: return fcn(args)
python
{ "resource": "" }
q31708
BroadcastPool.broadcast
train
def broadcast(self, fcn, args): """ Do a function call on every worker. Parameters ---------- fcn: funtion Function to call. args: tuple The arguments for Pool.map """ results = self.map(_lockstep_fcn, [(len(self), fcn, args)] * len(self)) _numdone.value = 0 return results
python
{ "resource": "" }
q31709
BroadcastPool.allmap
train
def allmap(self, fcn, args): """ Do a function call on every worker with different arguments Parameters ---------- fcn: funtion Function to call. args: tuple The arguments for Pool.map """ results = self.map(_lockstep_fcn, [(len(self), fcn, arg) for arg in args]) _numdone.value = 0 return results
python
{ "resource": "" }
q31710
BroadcastPool.map
train
def map(self, func, items, chunksize=None): """ Catch keyboard interuppts to allow the pool to exit cleanly. Parameters ---------- func: function Function to call items: list of tuples Arguments to pass chunksize: int, Optional Number of calls for each process to handle at once """ results = self.map_async(func, items, chunksize) while True: try: return results.get(1800) except TimeoutError: pass except KeyboardInterrupt: self.terminate() self.join() raise KeyboardInterrupt
python
{ "resource": "" }
q31711
lfilter
train
def lfilter(coefficients, timeseries): """ Apply filter coefficients to a time series Parameters ---------- coefficients: numpy.ndarray Filter coefficients to apply timeseries: numpy.ndarray Time series to be filtered. Returns ------- tseries: numpy.ndarray filtered array """ from pycbc.filter import correlate # If there aren't many points just use the default scipy method if len(timeseries) < 2**7: if hasattr(timeseries, 'numpy'): timeseries = timeseries.numpy() series = scipy.signal.lfilter(coefficients, 1.0, timeseries) return series else: cseries = (Array(coefficients[::-1] * 1)).astype(timeseries.dtype) cseries.resize(len(timeseries)) cseries.roll(len(timeseries) - len(coefficients) + 1) timeseries = Array(timeseries, copy=False) flen = len(cseries) / 2 + 1 ftype = complex_same_precision_as(timeseries) cfreq = zeros(flen, dtype=ftype) tfreq = zeros(flen, dtype=ftype) fft(Array(cseries), cfreq) fft(Array(timeseries), tfreq) cout = zeros(flen, ftype) out = zeros(len(timeseries), dtype=timeseries) correlate(cfreq, tfreq, cout) ifft(cout, out) return out.numpy() / len(out)
python
{ "resource": "" }
q31712
resample_to_delta_t
train
def resample_to_delta_t(timeseries, delta_t, method='butterworth'): """Resmple the time_series to delta_t Resamples the TimeSeries instance time_series to the given time step, delta_t. Only powers of two and real valued time series are supported at this time. Additional restrictions may apply to particular filter methods. Parameters ---------- time_series: TimeSeries The time series to be resampled delta_t: float The desired time step Returns ------- Time Series: TimeSeries A TimeSeries that has been resampled to delta_t. Raises ------ TypeError: time_series is not an instance of TimeSeries. TypeError: time_series is not real valued Examples -------- >>> h_plus_sampled = resample_to_delta_t(h_plus, 1.0/2048) """ if not isinstance(timeseries,TimeSeries): raise TypeError("Can only resample time series") if timeseries.kind is not 'real': raise TypeError("Time series must be real") if timeseries.delta_t == delta_t: return timeseries * 1 if method == 'butterworth': lal_data = timeseries.lal() _resample_func[timeseries.dtype](lal_data, delta_t) data = lal_data.data.data elif method == 'ldas': factor = int(delta_t / timeseries.delta_t) numtaps = factor * 20 + 1 # The kaiser window has been testing using the LDAS implementation # and is in the same configuration as used in the original lalinspiral filter_coefficients = scipy.signal.firwin(numtaps, 1.0 / factor, window=('kaiser', 5)) # apply the filter and decimate data = fir_zero_filter(filter_coefficients, timeseries)[::factor] else: raise ValueError('Invalid resampling method: %s' % method) ts = TimeSeries(data, delta_t = delta_t, dtype=timeseries.dtype, epoch=timeseries._epoch) # From the construction of the LDAS FIR filter there will be 10 corrupted samples # explanation here http://software.ligo.org/docs/lalsuite/lal/group___resample_time_series__c.html ts.corrupted_samples = 10 return ts
python
{ "resource": "" }
q31713
highpass
train
def highpass(timeseries, frequency, filter_order=8, attenuation=0.1): """Return a new timeseries that is highpassed. Return a new time series that is highpassed above the `frequency`. Parameters ---------- Time Series: TimeSeries The time series to be high-passed. frequency: float The frequency below which is suppressed. filter_order: {8, int}, optional The order of the filter to use when high-passing the time series. attenuation: {0.1, float}, optional The attenuation of the filter. Returns ------- Time Series: TimeSeries A new TimeSeries that has been high-passed. Raises ------ TypeError: time_series is not an instance of TimeSeries. TypeError: time_series is not real valued """ if not isinstance(timeseries, TimeSeries): raise TypeError("Can only resample time series") if timeseries.kind is not 'real': raise TypeError("Time series must be real") lal_data = timeseries.lal() _highpass_func[timeseries.dtype](lal_data, frequency, 1-attenuation, filter_order) return TimeSeries(lal_data.data.data, delta_t = lal_data.deltaT, dtype=timeseries.dtype, epoch=timeseries._epoch)
python
{ "resource": "" }
q31714
interpolate_complex_frequency
train
def interpolate_complex_frequency(series, delta_f, zeros_offset=0, side='right'): """Interpolate complex frequency series to desired delta_f. Return a new complex frequency series that has been interpolated to the desired delta_f. Parameters ---------- series : FrequencySeries Frequency series to be interpolated. delta_f : float The desired delta_f of the output zeros_offset : optional, {0, int} Number of sample to delay the start of the zero padding side : optional, {'right', str} The side of the vector to zero pad Returns ------- interpolated series : FrequencySeries A new FrequencySeries that has been interpolated. """ new_n = int( (len(series)-1) * series.delta_f / delta_f + 1) old_N = int( (len(series)-1) * 2 ) new_N = int( (new_n - 1) * 2 ) time_series = TimeSeries(zeros(old_N), delta_t =1.0/(series.delta_f*old_N), dtype=real_same_precision_as(series)) ifft(series, time_series) time_series.roll(-zeros_offset) time_series.resize(new_N) if side == 'left': time_series.roll(zeros_offset + new_N - old_N) elif side == 'right': time_series.roll(zeros_offset) out_series = FrequencySeries(zeros(new_n), epoch=series.epoch, delta_f=delta_f, dtype=series.dtype) fft(time_series, out_series) return out_series
python
{ "resource": "" }
q31715
multiifo_noise_coinc_rate
train
def multiifo_noise_coinc_rate(rates, slop): """ Calculate the expected rate of noise coincidences for multiple detectors Parameters ---------- rates: dict Dictionary keyed on ifo string Value is a sequence of single-detector trigger rates, units assumed to be Hz slop: float time added to maximum time-of-flight between detectors to account for timing error Returns ------- expected_coinc_rates: dict Dictionary keyed on the ifo combination string Value is expected coincidence rate in the combination, units Hz """ ifos = numpy.array(sorted(rates.keys())) rates_raw = list(rates[ifo] for ifo in ifos) expected_coinc_rates = {} # Calculate coincidence for all-ifo combination # multiply product of trigger rates by the overlap time allowed_area = multiifo_noise_coincident_area(ifos, slop) rateprod = [numpy.prod(rs) for rs in zip(*rates_raw)] ifostring = ' '.join(ifos) expected_coinc_rates[ifostring] = allowed_area * numpy.array(rateprod) # if more than one possible coincidence type exists, # calculate coincidence for subsets through recursion if len(ifos) > 2: # Calculate rate for each 'miss-one-out' detector combination subsets = itertools.combinations(ifos, len(ifos) - 1) for subset in subsets: rates_subset = {} for ifo in subset: rates_subset[ifo] = rates[ifo] sub_coinc_rates = multiifo_noise_coinc_rate(rates_subset, slop) # add these sub-coincidences to the overall dictionary for sub_coinc in sub_coinc_rates: expected_coinc_rates[sub_coinc] = sub_coinc_rates[sub_coinc] return expected_coinc_rates
python
{ "resource": "" }
q31716
multiifo_noise_coincident_area
train
def multiifo_noise_coincident_area(ifos, slop): """ calculate the total extent of time offset between 2 detectors, or area of the 2d space of time offsets for 3 detectors, for which a coincidence can be generated Parameters ---------- ifos: list of strings list of interferometers slop: float extra time to add to maximum time-of-flight for timing error Returns ------- allowed_area: float area in units of seconds^(n_ifos-1) that coincident values can fall in """ # set up detector objects dets = {} for ifo in ifos: dets[ifo] = pycbc.detector.Detector(ifo) n_ifos = len(ifos) if n_ifos == 2: allowed_area = 2. * \ (dets[ifos[0]].light_travel_time_to_detector(dets[ifos[1]]) + slop) elif n_ifos == 3: tofs = numpy.zeros(n_ifos) ifo2_num = [] # calculate travel time between detectors (plus extra for timing error) # TO DO: allow for different timing errors between different detectors for i, ifo in enumerate(ifos): ifo2_num.append(int(numpy.mod(i + 1, n_ifos))) det0 = dets[ifo] det1 = dets[ifos[ifo2_num[i]]] tofs[i] = det0.light_travel_time_to_detector(det1) + slop # combine these to calculate allowed area allowed_area = 0 for i, _ in enumerate(ifos): allowed_area += 2 * tofs[i] * tofs[ifo2_num[i]] - tofs[i]**2 else: raise NotImplementedError("Not able to deal with more than 3 ifos") return allowed_area
python
{ "resource": "" }
q31717
multiifo_signal_coincident_area
train
def multiifo_signal_coincident_area(ifos): """ Calculate the area in which signal time differences are physically allowed Parameters ---------- ifos: list of strings list of interferometers Returns ------- allowed_area: float area in units of seconds^(n_ifos-1) that coincident signals will occupy """ n_ifos = len(ifos) if n_ifos == 2: det0 = pycbc.detector.Detector(ifos[0]) det1 = pycbc.detector.Detector(ifos[1]) allowed_area = 2 * det0.light_travel_time_to_detector(det1) elif n_ifos == 3: dets = {} tofs = numpy.zeros(n_ifos) ifo2_num = [] # set up detector objects for ifo in ifos: dets[ifo] = pycbc.detector.Detector(ifo) # calculate travel time between detectors for i, ifo in enumerate(ifos): ifo2_num.append(int(numpy.mod(i + 1, n_ifos))) det0 = dets[ifo] det1 = dets[ifos[ifo2_num[i]]] tofs[i] = det0.light_travel_time_to_detector(det1) # calculate allowed area phi_12 = numpy.arccos((tofs[0]**2 + tofs[1]**2 - tofs[2]**2) / (2 * tofs[0] * tofs[1])) allowed_area = numpy.pi * tofs[0] * tofs[1] * numpy.sin(phi_12) else: raise NotImplementedError("Not able to deal with more than 3 ifos") return allowed_area
python
{ "resource": "" }
q31718
lm_amps_phases
train
def lm_amps_phases(**kwargs): """ Take input_params and return dictionaries with amplitudes and phases of each overtone of a specific lm mode, checking that all of them are given. """ l, m = kwargs['l'], kwargs['m'] amps, phis = {}, {} # amp220 is always required, because the amplitudes of subdominant modes # are given as fractions of amp220. try: amps['220'] = kwargs['amp220'] except KeyError: raise ValueError('amp220 is always required') # Get amplitudes of subdominant modes and all phases for n in range(kwargs['nmodes']): # If it is the 22 mode, skip 220 if (l, m, n) != (2, 2, 0): try: amps['%d%d%d' %(l,m,n)] = kwargs['amp%d%d%d' %(l,m,n)] * amps['220'] except KeyError: raise ValueError('amp%d%d%d is required' %(l,m,n)) try: phis['%d%d%d' %(l,m,n)] = kwargs['phi%d%d%d' %(l,m,n)] except KeyError: raise ValueError('phi%d%d%d is required' %(l,m,n)) return amps, phis
python
{ "resource": "" }
q31719
lm_freqs_taus
train
def lm_freqs_taus(**kwargs): """ Take input_params and return dictionaries with frequencies and damping times of each overtone of a specific lm mode, checking that all of them are given. """ lmns = kwargs['lmns'] freqs, taus = {}, {} for lmn in lmns: l, m, nmodes = int(lmn[0]), int(lmn[1]), int(lmn[2]) for n in range(nmodes): try: freqs['%d%d%d' %(l,m,n)] = kwargs['f_%d%d%d' %(l,m,n)] except KeyError: raise ValueError('f_%d%d%d is required' %(l,m,n)) try: taus['%d%d%d' %(l,m,n)] = kwargs['tau_%d%d%d' %(l,m,n)] except KeyError: raise ValueError('tau_%d%d%d is required' %(l,m,n)) return freqs, taus
python
{ "resource": "" }
q31720
qnm_freq_decay
train
def qnm_freq_decay(f_0, tau, decay): """Return the frequency at which the amplitude of the ringdown falls to decay of the peak amplitude. Parameters ---------- f_0 : float The ringdown-frequency, which gives the peak amplitude. tau : float The damping time of the sinusoid. decay: float The fraction of the peak amplitude. Returns ------- f_decay: float The frequency at which the amplitude of the frequency-domain ringdown falls to decay of the peak amplitude. """ q_0 = pi * f_0 * tau alpha = 1. / decay alpha_sq = 1. / decay / decay # Expression obtained analytically under the assumption # that 1./alpha_sq, q_0^2 >> 1 q_sq = (alpha_sq + 4*q_0*q_0 + alpha*numpy.sqrt(alpha_sq + 16*q_0*q_0)) / 4. return numpy.sqrt(q_sq) / pi / tau
python
{ "resource": "" }
q31721
spher_harms
train
def spher_harms(l, m, inclination): """Return spherical harmonic polarizations """ # FIXME: we are using spin -2 weighted spherical harmonics for now, # when possible switch to spheroidal harmonics. Y_lm = lal.SpinWeightedSphericalHarmonic(inclination, 0., -2, l, m).real Y_lminusm = lal.SpinWeightedSphericalHarmonic(inclination, 0., -2, l, -m).real Y_plus = Y_lm + (-1)**l * Y_lminusm Y_cross = Y_lm - (-1)**l * Y_lminusm return Y_plus, Y_cross
python
{ "resource": "" }
q31722
apply_taper
train
def apply_taper(delta_t, taper, f_0, tau, amp, phi, l, m, inclination): """Return tapering window. """ # Times of tapering do not include t=0 taper_times = -numpy.arange(1, int(taper*tau/delta_t))[::-1] * delta_t Y_plus, Y_cross = spher_harms(l, m, inclination) taper_hp = amp * Y_plus * numpy.exp(10*taper_times/tau) * \ numpy.cos(two_pi*f_0*taper_times + phi) taper_hc = amp * Y_cross * numpy.exp(10*taper_times/tau) * \ numpy.sin(two_pi*f_0*taper_times + phi) return taper_hp, taper_hc
python
{ "resource": "" }
q31723
get_td_qnm
train
def get_td_qnm(template=None, taper=None, **kwargs): """Return a time domain damped sinusoid. Parameters ---------- template: object An object that has attached properties. This can be used to substitute for keyword arguments. A common example would be a row in an xml table. taper: {None, float}, optional Tapering at the beginning of the waveform with duration taper * tau. This option is recommended with timescales taper=1./2 or 1. for time-domain ringdown-only injections. The abrupt turn on of the ringdown can cause issues on the waveform when doing the fourier transform to the frequency domain. Setting taper will add a rapid ringup with timescale tau/10. f_0 : float The ringdown-frequency. tau : float The damping time of the sinusoid. amp : float The amplitude of the ringdown (constant for now). phi : float The initial phase of the ringdown. Should also include the information from the azimuthal angle (phi_0 + m*Phi) inclination : {None, float}, optional Inclination of the system in radians for the spherical harmonics. l : {2, int}, optional l mode for the spherical harmonics. Default is l=2. m : {2, int}, optional m mode for the spherical harmonics. Default is m=2. delta_t : {None, float}, optional The time step used to generate the ringdown. If None, it will be set to the inverse of the frequency at which the amplitude is 1/1000 of the peak amplitude. t_final : {None, float}, optional The ending time of the output time series. If None, it will be set to the time at which the amplitude is 1/1000 of the peak amplitude. Returns ------- hplus: TimeSeries The plus phase of the ringdown in time domain. hcross: TimeSeries The cross phase of the ringdown in time domain. """ input_params = props(template, qnm_required_args, **kwargs) f_0 = input_params.pop('f_0') tau = input_params.pop('tau') amp = input_params.pop('amp') phi = input_params.pop('phi') # the following may not be in input_params inc = input_params.pop('inclination', None) l = input_params.pop('l', 2) m = input_params.pop('m', 2) delta_t = input_params.pop('delta_t', None) t_final = input_params.pop('t_final', None) if not delta_t: delta_t = 1. / qnm_freq_decay(f_0, tau, 1./1000) if delta_t < min_dt: delta_t = min_dt if not t_final: t_final = qnm_time_decay(tau, 1./1000) kmax = int(t_final / delta_t) + 1 times = numpy.arange(kmax) * delta_t if inc is not None: Y_plus, Y_cross = spher_harms(l, m, inc) else: Y_plus, Y_cross = 1, 1 hplus = amp * Y_plus * numpy.exp(-times/tau) * \ numpy.cos(two_pi*f_0*times + phi) hcross = amp * Y_cross * numpy.exp(-times/tau) * \ numpy.sin(two_pi*f_0*times + phi) if taper and delta_t < taper*tau: taper_window = int(taper*tau/delta_t) kmax += taper_window outplus = TimeSeries(zeros(kmax), delta_t=delta_t) outcross = TimeSeries(zeros(kmax), delta_t=delta_t) # If size of tapering window is less than delta_t, do not apply taper. if not taper or delta_t > taper*tau: outplus.data[:kmax] = hplus outcross.data[:kmax] = hcross return outplus, outcross else: taper_hp, taper_hc = apply_taper(delta_t, taper, f_0, tau, amp, phi, l, m, inc) start = - taper * tau outplus.data[:taper_window] = taper_hp outplus.data[taper_window:] = hplus outcross.data[:taper_window] = taper_hc outcross.data[taper_window:] = hcross outplus._epoch, outcross._epoch = start, start return outplus, outcross
python
{ "resource": "" }
q31724
get_fd_qnm
train
def get_fd_qnm(template=None, **kwargs): """Return a frequency domain damped sinusoid. Parameters ---------- template: object An object that has attached properties. This can be used to substitute for keyword arguments. A common example would be a row in an xml table. f_0 : float The ringdown-frequency. tau : float The damping time of the sinusoid. amp : float The amplitude of the ringdown (constant for now). phi : float The initial phase of the ringdown. Should also include the information from the azimuthal angle (phi_0 + m*Phi). inclination : {None, float}, optional Inclination of the system in radians for the spherical harmonics. l : {2, int}, optional l mode for the spherical harmonics. Default is l=2. m : {2, int}, optional m mode for the spherical harmonics. Default is m=2. t_0 : {0, float}, optional The starting time of the ringdown. delta_f : {None, float}, optional The frequency step used to generate the ringdown. If None, it will be set to the inverse of the time at which the amplitude is 1/1000 of the peak amplitude. f_lower: {None, float}, optional The starting frequency of the output frequency series. If None, it will be set to delta_f. f_final : {None, float}, optional The ending frequency of the output frequency series. If None, it will be set to the frequency at which the amplitude is 1/1000 of the peak amplitude. Returns ------- hplustilde: FrequencySeries The plus phase of the ringdown in frequency domain. hcrosstilde: FrequencySeries The cross phase of the ringdown in frequency domain. """ input_params = props(template, qnm_required_args, **kwargs) f_0 = input_params.pop('f_0') tau = input_params.pop('tau') amp = input_params.pop('amp') phi = input_params.pop('phi') # the following have defaults, and so will be populated t_0 = input_params.pop('t_0') # the following may not be in input_params inc = input_params.pop('inclination', None) l = input_params.pop('l', 2) m = input_params.pop('m', 2) delta_f = input_params.pop('delta_f', None) f_lower = input_params.pop('f_lower', None) f_final = input_params.pop('f_final', None) if not delta_f: delta_f = 1. / qnm_time_decay(tau, 1./1000) if not f_lower: f_lower = delta_f kmin = 0 else: kmin = int(f_lower / delta_f) if not f_final: f_final = qnm_freq_decay(f_0, tau, 1./1000) if f_final > max_freq: f_final = max_freq kmax = int(f_final / delta_f) + 1 freqs = numpy.arange(kmin, kmax)*delta_f if inc is not None: Y_plus, Y_cross = spher_harms(l, m, inc) else: Y_plus, Y_cross = 1, 1 denominator = 1 + (4j * pi * freqs * tau) - (4 * pi_sq * ( freqs*freqs - f_0*f_0) * tau*tau) norm = amp * tau / denominator if t_0 != 0: time_shift = numpy.exp(-1j * two_pi * freqs * t_0) norm *= time_shift # Analytical expression for the Fourier transform of the ringdown (damped sinusoid) hp_tilde = norm * Y_plus * ( (1 + 2j * pi * freqs * tau) * numpy.cos(phi) - two_pi * f_0 * tau * numpy.sin(phi) ) hc_tilde = norm * Y_cross * ( (1 + 2j * pi * freqs * tau) * numpy.sin(phi) + two_pi * f_0 * tau * numpy.cos(phi) ) outplus = FrequencySeries(zeros(kmax, dtype=complex128), delta_f=delta_f) outcross = FrequencySeries(zeros(kmax, dtype=complex128), delta_f=delta_f) outplus.data[kmin:kmax] = hp_tilde outcross.data[kmin:kmax] = hc_tilde return outplus, outcross
python
{ "resource": "" }
q31725
get_td_lm
train
def get_td_lm(template=None, taper=None, **kwargs): """Return time domain lm mode with the given number of overtones. Parameters ---------- template: object An object that has attached properties. This can be used to substitute for keyword arguments. A common example would be a row in an xml table. taper: {None, float}, optional Tapering at the beginning of the waveform with duration taper * tau. This option is recommended with timescales taper=1./2 or 1. for time-domain ringdown-only injections. The abrupt turn on of the ringdown can cause issues on the waveform when doing the fourier transform to the frequency domain. Setting taper will add a rapid ringup with timescale tau/10. Each overtone will have a different taper depending on its tau, the final taper being the superposition of all the tapers. freqs : dict {lmn:f_lmn} Dictionary of the central frequencies for each overtone, as many as number of modes. taus : dict {lmn:tau_lmn} Dictionary of the damping times for each overtone, as many as number of modes. l : int l mode (lm modes available: 22, 21, 33, 44, 55). m : int m mode (lm modes available: 22, 21, 33, 44, 55). nmodes: int Number of overtones desired (maximum n=8) amp220 : float Amplitude of the fundamental 220 mode, needed for any lm. amplmn : float Fraction of the amplitude of the lmn overtone relative to the fundamental mode, as many as the number of subdominant modes. philmn : float Phase of the lmn overtone, as many as the number of modes. Should also include the information from the azimuthal angle (phi + m*Phi). inclination : {None, float}, optional Inclination of the system in radians for the spherical harmonics. delta_t : {None, float}, optional The time step used to generate the ringdown. If None, it will be set to the inverse of the frequency at which the amplitude is 1/1000 of the peak amplitude (the minimum of all modes). t_final : {None, float}, optional The ending time of the output time series. If None, it will be set to the time at which the amplitude is 1/1000 of the peak amplitude (the maximum of all modes). Returns ------- hplus: TimeSeries The plus phase of a lm mode with overtones (n) in time domain. hcross: TimeSeries The cross phase of a lm mode with overtones (n) in time domain. """ input_params = props(template, lm_required_args, **kwargs) # Get required args amps, phis = lm_amps_phases(**input_params) f_0 = input_params.pop('freqs') tau = input_params.pop('taus') inc = input_params.pop('inclination', None) l, m = input_params.pop('l'), input_params.pop('m') nmodes = input_params.pop('nmodes') if int(nmodes) == 0: raise ValueError('Number of overtones (nmodes) must be greater ' 'than zero.') # The following may not be in input_params delta_t = input_params.pop('delta_t', None) t_final = input_params.pop('t_final', None) if not delta_t: delta_t = lm_deltat(f_0, tau, ['%d%d%d' %(l,m,nmodes)]) if not t_final: t_final = lm_tfinal(tau, ['%d%d%d' %(l, m, nmodes)]) kmax = int(t_final / delta_t) + 1 # Different overtones will have different tapering window-size # Find maximum window size to create long enough output vector if taper: taper_window = int(taper*max(tau.values())/delta_t) kmax += taper_window outplus = TimeSeries(zeros(kmax, dtype=float64), delta_t=delta_t) outcross = TimeSeries(zeros(kmax, dtype=float64), delta_t=delta_t) if taper: start = - taper * max(tau.values()) outplus._epoch, outcross._epoch = start, start for n in range(nmodes): hplus, hcross = get_td_qnm(template=None, taper=taper, f_0=f_0['%d%d%d' %(l,m,n)], tau=tau['%d%d%d' %(l,m,n)], phi=phis['%d%d%d' %(l,m,n)], amp=amps['%d%d%d' %(l,m,n)], inclination=inc, l=l, m=m, delta_t=delta_t, t_final=t_final) if not taper: outplus.data += hplus.data outcross.data += hcross.data else: outplus = taper_shift(hplus, outplus) outcross = taper_shift(hcross, outcross) return outplus, outcross
python
{ "resource": "" }
q31726
get_fd_lm
train
def get_fd_lm(template=None, **kwargs): """Return frequency domain lm mode with a given number of overtones. Parameters ---------- template: object An object that has attached properties. This can be used to substitute for keyword arguments. A common example would be a row in an xml table. freqs : dict {lmn:f_lmn} Dictionary of the central frequencies for each overtone, as many as number of modes. taus : dict {lmn:tau_lmn} Dictionary of the damping times for each overtone, as many as number of modes. l : int l mode (lm modes available: 22, 21, 33, 44, 55). m : int m mode (lm modes available: 22, 21, 33, 44, 55). nmodes: int Number of overtones desired (maximum n=8) amplmn : float Amplitude of the lmn overtone, as many as the number of nmodes. philmn : float Phase of the lmn overtone, as many as the number of modes. Should also include the information from the azimuthal angle (phi + m*Phi). inclination : {None, float}, optional Inclination of the system in radians for the spherical harmonics. delta_f : {None, float}, optional The frequency step used to generate the ringdown. If None, it will be set to the inverse of the time at which the amplitude is 1/1000 of the peak amplitude (the minimum of all modes). f_lower: {None, float}, optional The starting frequency of the output frequency series. If None, it will be set to delta_f. f_final : {None, float}, optional The ending frequency of the output frequency series. If None, it will be set to the frequency at which the amplitude is 1/1000 of the peak amplitude (the maximum of all modes). Returns ------- hplustilde: FrequencySeries The plus phase of a lm mode with n overtones in frequency domain. hcrosstilde: FrequencySeries The cross phase of a lm mode with n overtones in frequency domain. """ input_params = props(template, lm_required_args, **kwargs) # Get required args amps, phis = lm_amps_phases(**input_params) f_0 = input_params.pop('freqs') tau = input_params.pop('taus') l, m = input_params.pop('l'), input_params.pop('m') inc = input_params.pop('inclination', None) nmodes = input_params.pop('nmodes') if int(nmodes) == 0: raise ValueError('Number of overtones (nmodes) must be greater ' 'than zero.') # The following may not be in input_params delta_f = input_params.pop('delta_f', None) f_lower = input_params.pop('f_lower', None) f_final = input_params.pop('f_final', None) if not delta_f: delta_f = lm_deltaf(tau, ['%d%d%d' %(l,m,nmodes)]) if not f_final: f_final = lm_ffinal(f_0, tau, ['%d%d%d' %(l, m, nmodes)]) kmax = int(f_final / delta_f) + 1 outplus = FrequencySeries(zeros(kmax, dtype=complex128), delta_f=delta_f) outcross = FrequencySeries(zeros(kmax, dtype=complex128), delta_f=delta_f) for n in range(nmodes): hplus, hcross = get_fd_qnm(template=None, f_0=f_0['%d%d%d' %(l,m,n)], tau=tau['%d%d%d' %(l,m,n)], amp=amps['%d%d%d' %(l,m,n)], phi=phis['%d%d%d' %(l,m,n)], inclination=inc, l=l, m=m, delta_f=delta_f, f_lower=f_lower, f_final=f_final) outplus.data += hplus.data outcross.data += hcross.data return outplus, outcross
python
{ "resource": "" }
q31727
normalize_pdf
train
def normalize_pdf(mu, pofmu): """ Takes a function pofmu defined at rate sample values mu and normalizes it to be a suitable pdf. Both mu and pofmu must be arrays or lists of the same length. """ if min(pofmu) < 0: raise ValueError("Probabilities cannot be negative, don't ask me to " "normalize a function with negative values!") if min(mu) < 0: raise ValueError("Rates cannot be negative, don't ask me to " "normalize a function over a negative domain!") dp = integral_element(mu, pofmu) return mu, pofmu/sum(dp)
python
{ "resource": "" }
q31728
compute_upper_limit
train
def compute_upper_limit(mu_in, post, alpha=0.9): """ Returns the upper limit mu_high of confidence level alpha for a posterior distribution post on the given parameter mu. The posterior need not be normalized. """ if 0 < alpha < 1: dp = integral_element(mu_in, post) high_idx = bisect.bisect_left(dp.cumsum() / dp.sum(), alpha) # if alpha is in (0,1] and post is non-negative, bisect_left # will always return an index in the range of mu since # post.cumsum()/post.sum() will always begin at 0 and end at 1 mu_high = mu_in[high_idx] elif alpha == 1: mu_high = numpy.max(mu_in[post > 0]) else: raise ValueError("Confidence level must be in (0,1].") return mu_high
python
{ "resource": "" }
q31729
compute_lower_limit
train
def compute_lower_limit(mu_in, post, alpha=0.9): """ Returns the lower limit mu_low of confidence level alpha for a posterior distribution post on the given parameter mu. The posterior need not be normalized. """ if 0 < alpha < 1: dp = integral_element(mu_in, post) low_idx = bisect.bisect_right(dp.cumsum() / dp.sum(), 1 - alpha) # if alpha is in [0,1) and post is non-negative, bisect_right # will always return an index in the range of mu since # post.cumsum()/post.sum() will always begin at 0 and end at 1 mu_low = mu_in[low_idx] elif alpha == 1: mu_low = numpy.min(mu_in[post > 0]) else: raise ValueError("Confidence level must be in (0,1].") return mu_low
python
{ "resource": "" }
q31730
hpd_coverage
train
def hpd_coverage(mu, pdf, thresh): ''' Integrates a pdf over mu taking only bins where the mean over the bin is above a given threshold This gives the coverage of the HPD interval for the given threshold. ''' dp = integral_element(mu, pdf) bin_mean = (pdf[1:] + pdf[:-1]) / 2. return dp[bin_mean > thresh].sum()
python
{ "resource": "" }
q31731
hpd_threshold
train
def hpd_threshold(mu_in, post, alpha, tol): ''' For a PDF post over samples mu_in, find a density threshold such that the region having higher density has coverage of at least alpha, and less than alpha plus a given tolerance. ''' norm_post = normalize_pdf(mu_in, post) # initialize bisection search p_minus = 0.0 p_plus = max(post) while abs(hpd_coverage(mu_in, norm_post, p_minus) - hpd_coverage(mu_in, norm_post, p_plus)) >= tol: p_test = (p_minus + p_plus) / 2. if hpd_coverage(mu_in, post, p_test) >= alpha: # test value was too low or just right p_minus = p_test else: # test value was too high p_plus = p_test # p_minus never goes above the required threshold and p_plus never goes below # thus on exiting p_minus is at or below the required threshold and the # difference in coverage is within tolerance return p_minus
python
{ "resource": "" }
q31732
compute_volume_vs_mass
train
def compute_volume_vs_mass(found, missed, mass_bins, bin_type, dbins=None): """ Compute the average luminosity an experiment was sensitive to Assumes that luminosity is uniformly distributed in space. Input is the sets of found and missed injections. """ # mean and std estimate for luminosity volArray = bin_utils.BinnedArray(mass_bins) vol2Array = bin_utils.BinnedArray(mass_bins) # found/missed stats foundArray = bin_utils.BinnedArray(mass_bins) missedArray = bin_utils.BinnedArray(mass_bins) # compute the mean luminosity in each mass bin effvmass = [] errvmass = [] # 2D case first if bin_type == "Mass1_Mass2": for j, mc1 in enumerate(mass_bins.centres()[0]): for k, mc2 in enumerate(mass_bins.centres()[1]): newfound = filter_injections_by_mass( found, mass_bins, j, bin_type, k) newmissed = filter_injections_by_mass( missed, mass_bins, j, bin_type, k) foundArray[(mc1, mc2)] = len(newfound) missedArray[(mc1, mc2)] = len(newmissed) # compute the volume using this injection set meaneff, efferr, meanvol, volerr = mean_efficiency_volume( newfound, newmissed, dbins) effvmass.append(meaneff) errvmass.append(efferr) volArray[(mc1, mc2)] = meanvol vol2Array[(mc1, mc2)] = volerr return volArray, vol2Array, foundArray, missedArray, effvmass, errvmass for j, mc in enumerate(mass_bins.centres()[0]): # filter out injections not in this mass bin newfound = filter_injections_by_mass(found, mass_bins, j, bin_type) newmissed = filter_injections_by_mass(missed, mass_bins, j, bin_type) foundArray[(mc, )] = len(newfound) missedArray[(mc, )] = len(newmissed) # compute the volume using this injection set meaneff, efferr, meanvol, volerr = mean_efficiency_volume( newfound, newmissed, dbins) effvmass.append(meaneff) errvmass.append(efferr) volArray[(mc, )] = meanvol vol2Array[(mc, )] = volerr return volArray, vol2Array, foundArray, missedArray, effvmass, errvmass
python
{ "resource": "" }
q31733
calculate_acf
train
def calculate_acf(data, delta_t=1.0, unbiased=False): r"""Calculates the one-sided autocorrelation function. Calculates the autocorrelation function (ACF) and returns the one-sided ACF. The ACF is defined as the autocovariance divided by the variance. The ACF can be estimated using .. math:: \hat{R}(k) = \frac{1}{n \sigma^{2}} \sum_{t=1}^{n-k} \left( X_{t} - \mu \right) \left( X_{t+k} - \mu \right) Where :math:`\hat{R}(k)` is the ACF, :math:`X_{t}` is the data series at time t, :math:`\mu` is the mean of :math:`X_{t}`, and :math:`\sigma^{2}` is the variance of :math:`X_{t}`. Parameters ----------- data : TimeSeries or numpy.array A TimeSeries or numpy.array of data. delta_t : float The time step of the data series if it is not a TimeSeries instance. unbiased : bool If True the normalization of the autocovariance function is n-k instead of n. This is called the unbiased estimation of the autocovariance. Note that this does not mean the ACF is unbiased. Returns ------- acf : numpy.array If data is a TimeSeries then acf will be a TimeSeries of the one-sided ACF. Else acf is a numpy.array. """ # if given a TimeSeries instance then get numpy.array if isinstance(data, TimeSeries): y = data.numpy() delta_t = data.delta_t else: y = data # Zero mean y = y - y.mean() ny_orig = len(y) npad = 1 while npad < 2*ny_orig: npad = npad << 1 ypad = numpy.zeros(npad) ypad[:ny_orig] = y # FFT data minus the mean fdata = TimeSeries(ypad, delta_t=delta_t).to_frequencyseries() # correlate # do not need to give the congjugate since correlate function does it cdata = FrequencySeries(zeros(len(fdata), dtype=fdata.dtype), delta_f=fdata.delta_f, copy=False) correlate(fdata, fdata, cdata) # IFFT correlated data to get unnormalized autocovariance time series acf = cdata.to_timeseries() acf = acf[:ny_orig] # normalize the autocovariance # note that dividing by acf[0] is the same as ( y.var() * len(acf) ) if unbiased: acf /= ( y.var() * numpy.arange(len(acf), 0, -1) ) else: acf /= acf[0] # return input datatype if isinstance(data, TimeSeries): return TimeSeries(acf, delta_t=delta_t) else: return acf
python
{ "resource": "" }
q31734
insert_fft_options
train
def insert_fft_options(optgroup): """ Inserts the options that affect the behavior of this backend Parameters ---------- optgroup: fft_option OptionParser argument group whose options are extended """ optgroup.add_argument("--fftw-measure-level", help="Determines the measure level used in planning " "FFTW FFTs; allowed values are: " + str([0,1,2,3]), type=int, default=_default_measurelvl) optgroup.add_argument("--fftw-threads-backend", help="Give 'openmp', 'pthreads' or 'unthreaded' to specify which threaded FFTW to use", default=None) optgroup.add_argument("--fftw-input-float-wisdom-file", help="Filename from which to read single-precision wisdom", default=None) optgroup.add_argument("--fftw-input-double-wisdom-file", help="Filename from which to read double-precision wisdom", default=None) optgroup.add_argument("--fftw-output-float-wisdom-file", help="Filename to which to write single-precision wisdom", default=None) optgroup.add_argument("--fftw-output-double-wisdom-file", help="Filename to which to write double-precision wisdom", default=None) optgroup.add_argument("--fftw-import-system-wisdom", help = "If given, call fftw[f]_import_system_wisdom()", action = "store_true")
python
{ "resource": "" }
q31735
kl
train
def kl(samples1, samples2, pdf1=False, pdf2=False, bins=30, hist_min=None, hist_max=None): """ Computes the Kullback-Leibler divergence for a single parameter from two distributions. Parameters ---------- samples1 : numpy.array Samples or probability density function (must also set `pdf1=True`). samples2 : numpy.array Samples or probability density function (must also set `pdf2=True`). pdf1 : bool Set to `True` if `samples1` is a probability density funtion already. pdf2 : bool Set to `True` if `samples2` is a probability density funtion already. bins : int Number of bins to use when calculating probability density function from a set of samples of the distribution. hist_min : numpy.float64 Minimum of the distributions' values to use. hist_max : numpy.float64 Maximum of the distributions' values to use. Returns ------- numpy.float64 The Kullback-Leibler divergence value. """ hist_range = (hist_min, hist_max) if not pdf1: samples1, _ = numpy.histogram(samples1, bins=bins, range=hist_range, normed=True) if not pdf2: samples2, _ = numpy.histogram(samples2, bins=bins, range=hist_range, normed=True) return stats.entropy(samples1, qk=samples2)
python
{ "resource": "" }
q31736
rst_dict_table
train
def rst_dict_table(dict_, key_format=str, val_format=str, header=None, sort=True): """Returns an RST-formatted table of keys and values from a `dict` Parameters ---------- dict_ : dict data to display in table key_format : callable callable function with which to format keys val_format : callable callable function with which to format values header : None, tuple of str a 2-tuple of header for the two columns, or `None` to exclude a header line (default) sort : bool, optional Sort the dictionary keys alphabetically when writing the table. Examples -------- >>> a = {'key1': 'value1', 'key2': 'value2'} >>> print(rst_dict_table(a)) ==== ====== key1 value1 key2 value2 ==== ====== >>> print(rst_dict_table(a, key_format='``{}``'.format, ... val_format=':class:`{}`'.format, ... header=('Key', 'Value')) ======== =============== Key Value ======== =============== ``key1`` :class:`value1` ``key2`` :class:`value2` ======== =============== """ keys, values = zip(*dict_.items()) # apply formatting keys = map(key_format, keys) values = map(val_format, values) # work out longest elements in each column nckey = max(map(len, keys)) ncval = max(map(len, values)) if header: khead, vhead = header nckey = max(nckey, len(khead)) ncval = max(ncval, len(vhead)) # build table header line divider = "{} {}".format('='*nckey, '='*ncval) def row(key, val): fmt = '{{0:{0}s}} {{1}}'.format(nckey, ncval) return fmt.format(key, val) # build table of lines lines = [divider] if header: lines.extend((row(*header), divider)) params = zip(keys, values) if sort: params = sorted(params) for key, val in params: fmt = '{{0:{0}s}} {{1}}'.format(nckey, ncval) lines.append(fmt.format(key, val)) lines.append(divider) return '\n'.join(lines)
python
{ "resource": "" }
q31737
read_model_from_config
train
def read_model_from_config(cp, ifo, section="calibration"): """Returns an instance of the calibration model specified in the given configuration file. Parameters ---------- cp : WorflowConfigParser An open config file to read. ifo : string The detector (H1, L1) whose model will be loaded. section : {"calibration", string} Section name from which to retrieve the model. Returns ------- instance An instance of the calibration model class. """ model = cp.get_opt_tag(section, "{}_model".format(ifo.lower()), None) recalibrator = models[model].from_config(cp, ifo.lower(), section) return recalibrator
python
{ "resource": "" }
q31738
_gates_from_cli
train
def _gates_from_cli(opts, gate_opt): """Parses the given `gate_opt` into something understandable by `strain.gate_data`. """ gates = {} if getattr(opts, gate_opt) is None: return gates for gate in getattr(opts, gate_opt): try: ifo, central_time, half_dur, taper_dur = gate.split(':') central_time = float(central_time) half_dur = float(half_dur) taper_dur = float(taper_dur) except ValueError: raise ValueError("--gate {} not formatted correctly; ".format( gate) + "see help") try: gates[ifo].append((central_time, half_dur, taper_dur)) except KeyError: gates[ifo] = [(central_time, half_dur, taper_dur)] return gates
python
{ "resource": "" }
q31739
apply_gates_to_td
train
def apply_gates_to_td(strain_dict, gates): """Applies the given dictionary of gates to the given dictionary of strain. Parameters ---------- strain_dict : dict Dictionary of time-domain strain, keyed by the ifos. gates : dict Dictionary of gates. Keys should be the ifo to apply the data to, values are a tuple giving the central time of the gate, the half duration, and the taper duration. Returns ------- dict Dictionary of time-domain strain with the gates applied. """ # copy data to new dictionary outdict = dict(strain_dict.items()) for ifo in gates: outdict[ifo] = strain.gate_data(outdict[ifo], gates[ifo]) return outdict
python
{ "resource": "" }
q31740
apply_gates_to_fd
train
def apply_gates_to_fd(stilde_dict, gates): """Applies the given dictionary of gates to the given dictionary of strain in the frequency domain. Gates are applied by IFFT-ing the strain data to the time domain, applying the gate, then FFT-ing back to the frequency domain. Parameters ---------- stilde_dict : dict Dictionary of frequency-domain strain, keyed by the ifos. gates : dict Dictionary of gates. Keys should be the ifo to apply the data to, values are a tuple giving the central time of the gate, the half duration, and the taper duration. Returns ------- dict Dictionary of frequency-domain strain with the gates applied. """ # copy data to new dictionary outdict = dict(stilde_dict.items()) # create a time-domin strain dictionary to apply the gates to strain_dict = dict([[ifo, outdict[ifo].to_timeseries()] for ifo in gates]) # apply gates and fft back to the frequency domain for ifo,d in apply_gates_to_td(strain_dict, gates).items(): outdict[ifo] = d.to_frequencyseries() return outdict
python
{ "resource": "" }
q31741
add_gate_option_group
train
def add_gate_option_group(parser): """Adds the options needed to apply gates to data. Parameters ---------- parser : object ArgumentParser instance. """ gate_group = parser.add_argument_group("Options for gating data.") gate_group.add_argument("--gate", nargs="+", type=str, metavar="IFO:CENTRALTIME:HALFDUR:TAPERDUR", help="Apply one or more gates to the data before " "filtering.") gate_group.add_argument("--gate-overwhitened", action="store_true", help="Overwhiten data first, then apply the " "gates specified in --gate. Overwhitening " "allows for sharper tapers to be used, " "since lines are not blurred.") gate_group.add_argument("--psd-gate", nargs="+", type=str, metavar="IFO:CENTRALTIME:HALFDUR:TAPERDUR", help="Apply one or more gates to the data used " "for computing the PSD. Gates are applied " "prior to FFT-ing the data for PSD " "estimation.") return gate_group
python
{ "resource": "" }
q31742
power_chisq_at_points_from_precomputed
train
def power_chisq_at_points_from_precomputed(corr, snr, snr_norm, bins, indices): """Calculate the chisq timeseries from precomputed values for only select points. This function calculates the chisq at each point by explicitly time shifting and summing each bin. No FFT is involved. Parameters ---------- corr: FrequencySeries The product of the template and data in the frequency domain. snr: numpy.ndarray The unnormalized array of snr values at only the selected points in `indices`. snr_norm: float The normalization of the snr (EXPLAINME : refer to Findchirp paper?) bins: List of integers The edges of the equal power bins indices: Array The indices where we will calculate the chisq. These must be relative to the given `corr` series. Returns ------- chisq: Array An array containing only the chisq at the selected points. """ num_bins = len(bins) - 1 chisq = shift_sum(corr, indices, bins) # pylint:disable=assignment-from-no-return return (chisq * num_bins - (snr.conj() * snr).real) * (snr_norm ** 2.0)
python
{ "resource": "" }
q31743
power_chisq_from_precomputed
train
def power_chisq_from_precomputed(corr, snr, snr_norm, bins, indices=None, return_bins=False): """Calculate the chisq timeseries from precomputed values. This function calculates the chisq at all times by performing an inverse FFT of each bin. Parameters ---------- corr: FrequencySeries The produce of the template and data in the frequency domain. snr: TimeSeries The unnormalized snr time series. snr_norm: The snr normalization factor (true snr = snr * snr_norm) EXPLAINME - define 'true snr'? bins: List of integers The edges of the chisq bins. indices: {Array, None}, optional Index values into snr that indicate where to calculate chisq values. If none, calculate chisq for all possible indices. return_bins: {boolean, False}, optional Return a list of the SNRs for each chisq bin. Returns ------- chisq: TimeSeries """ # Get workspace memory global _q_l, _qtilde_l, _chisq_l bin_snrs = [] if _q_l is None or len(_q_l) != len(snr): q = zeros(len(snr), dtype=complex_same_precision_as(snr)) qtilde = zeros(len(snr), dtype=complex_same_precision_as(snr)) _q_l = q _qtilde_l = qtilde else: q = _q_l qtilde = _qtilde_l if indices is not None: snr = snr.take(indices) if _chisq_l is None or len(_chisq_l) < len(snr): chisq = zeros(len(snr), dtype=real_same_precision_as(snr)) _chisq_l = chisq else: chisq = _chisq_l[0:len(snr)] chisq.clear() num_bins = len(bins) - 1 for j in range(num_bins): k_min = int(bins[j]) k_max = int(bins[j+1]) qtilde[k_min:k_max] = corr[k_min:k_max] pycbc.fft.ifft(qtilde, q) qtilde[k_min:k_max].clear() if return_bins: bin_snrs.append(TimeSeries(q * snr_norm * num_bins ** 0.5, delta_t=snr.delta_t, epoch=snr.start_time)) if indices is not None: chisq_accum_bin(chisq, q.take(indices)) else: chisq_accum_bin(chisq, q) chisq = (chisq * num_bins - snr.squared_norm()) * (snr_norm ** 2.0) if indices is None: chisq = TimeSeries(chisq, delta_t=snr.delta_t, epoch=snr.start_time, copy=False) if return_bins: return chisq, bin_snrs else: return chisq
python
{ "resource": "" }
q31744
power_chisq
train
def power_chisq(template, data, num_bins, psd, low_frequency_cutoff=None, high_frequency_cutoff=None, return_bins=False): """Calculate the chisq timeseries Parameters ---------- template: FrequencySeries or TimeSeries A time or frequency series that contains the filter template. data: FrequencySeries or TimeSeries A time or frequency series that contains the data to filter. The length must be commensurate with the template. (EXPLAINME - does this mean 'the same as' or something else?) num_bins: int The number of bins in the chisq. Note that the dof goes as 2*num_bins-2. psd: FrequencySeries The psd of the data. low_frequency_cutoff: {None, float}, optional The low frequency cutoff for the filter high_frequency_cutoff: {None, float}, optional The high frequency cutoff for the filter return_bins: {boolean, False}, optional Return a list of the individual chisq bins Returns ------- chisq: TimeSeries TimeSeries containing the chisq values for all times. """ htilde = make_frequency_series(template) stilde = make_frequency_series(data) bins = power_chisq_bins(htilde, num_bins, psd, low_frequency_cutoff, high_frequency_cutoff) corra = zeros((len(htilde)-1)*2, dtype=htilde.dtype) total_snr, corr, tnorm = matched_filter_core(htilde, stilde, psd, low_frequency_cutoff, high_frequency_cutoff, corr_out=corra) return power_chisq_from_precomputed(corr, total_snr, tnorm, bins, return_bins=return_bins)
python
{ "resource": "" }
q31745
SingleDetSkyMaxPowerChisq.calculate_chisq_bins
train
def calculate_chisq_bins(self, template, psd): """ Obtain the chisq bins for this template and PSD. """ num_bins = int(self.parse_option(template, self.num_bins)) if hasattr(psd, 'sigmasq_vec') and \ template.approximant in psd.sigmasq_vec: kmin = int(template.f_lower / psd.delta_f) kmax = template.end_idx bins = power_chisq_bins_from_sigmasq_series( psd.sigmasq_vec[template.approximant], num_bins, kmin, kmax) else: bins = power_chisq_bins(template, num_bins, psd, template.f_lower) return bins
python
{ "resource": "" }
q31746
build_includes
train
def build_includes(): """Creates rst files in the _include directory using the python scripts there. This will ignore any files in the _include directory that start with ``_``. """ print("Running scripts in _include:") cwd = os.getcwd() os.chdir('_include') pyfiles = glob.glob('*.py') for fn in pyfiles: if not fn.startswith('_'): print(' {}'.format(fn)) subprocess.check_output(['python', fn]) os.chdir(cwd)
python
{ "resource": "" }
q31747
apply_cyclic
train
def apply_cyclic(value, bounds): """Given a value, applies cyclic boundary conditions between the minimum and maximum bounds. Parameters ---------- value : float The value to apply the cyclic conditions to. bounds : Bounds instance Boundaries to use for applying cyclic conditions. Returns ------- float The value after the cyclic bounds are applied. """ return (value - bounds._min) %(bounds._max - bounds._min) + bounds._min
python
{ "resource": "" }
q31748
reflect_well
train
def reflect_well(value, bounds): """Given some boundaries, reflects the value until it falls within both boundaries. This is done iteratively, reflecting left off of the `boundaries.max`, then right off of the `boundaries.min`, etc. Parameters ---------- value : float The value to apply the reflected boundaries to. bounds : Bounds instance Boundaries to reflect between. Both `bounds.min` and `bounds.max` must be instances of `ReflectedBound`, otherwise an AttributeError is raised. Returns ------- float The value after being reflected between the two bounds. """ while value not in bounds: value = bounds._max.reflect_left(value) value = bounds._min.reflect_right(value) return value
python
{ "resource": "" }
q31749
ReflectedBound.reflect_left
train
def reflect_left(self, value): """Only reflects the value if is > self.""" if value > self: value = self.reflect(value) return value
python
{ "resource": "" }
q31750
ReflectedBound.reflect_right
train
def reflect_right(self, value): """Only reflects the value if is < self.""" if value < self: value = self.reflect(value) return value
python
{ "resource": "" }
q31751
Bounds.apply_conditions
train
def apply_conditions(self, value): """Applies any boundary conditions to the given value. The value is manipulated according based on the following conditions: * If `self.cyclic` is True then `value` is wrapped around to the minimum (maximum) bound if `value` is `>= self.max` (`< self.min`) bound. For example, if the minimum and maximum bounds are `0, 2*pi` and `value = 5*pi`, then the returned value will be `pi`. * If `self.min` is a reflected boundary then `value` will be reflected to the right if it is `< self.min`. For example, if `self.min = 10` and `value = 3`, then the returned value will be 17. * If `self.max` is a reflected boundary then `value` will be reflected to the left if it is `> self.max`. For example, if `self.max = 20` and `value = 27`, then the returned value will be 13. * If `self.min` and `self.max` are both reflected boundaries, then `value` will be reflected between the two boundaries until it falls within the bounds. The first reflection occurs off of the maximum boundary. For example, if `self.min = 10`, `self.max = 20`, and `value = 42`, the returned value will be 18 ( the first reflection yields -2, the second 22, and the last 18). * If neither bounds are reflected and cyclic is False, then the value is just returned as-is. Parameters ---------- value : float The value to apply the conditions to. Returns ------- float The value after the conditions are applied; see above for details. """ retval = value if self._cyclic: retval = apply_cyclic(value, self) retval = self._reflect(retval) if isinstance(retval, numpy.ndarray) and retval.size == 1: try: retval = retval[0] except IndexError: retval = float(retval) return retval
python
{ "resource": "" }
q31752
parse_veto_definer
train
def parse_veto_definer(veto_def_filename): """ Parse a veto definer file from the filename and return a dictionary indexed by ifo and veto definer category level. Parameters ---------- veto_def_filename: str The path to the veto definer file Returns: parsed_definition: dict Returns a dictionary first indexed by ifo, then category level, and finally a list of veto definitions. """ from glue.ligolw import table, lsctables, utils as ligolw_utils from glue.ligolw.ligolw import LIGOLWContentHandler as h lsctables.use_in(h) indoc = ligolw_utils.load_filename(veto_def_filename, False, contenthandler=h) veto_table = table.get_table(indoc, 'veto_definer') ifo = veto_table.getColumnByName('ifo') name = veto_table.getColumnByName('name') version = numpy.array(veto_table.getColumnByName('version')) category = numpy.array(veto_table.getColumnByName('category')) start = numpy.array(veto_table.getColumnByName('start_time')) end = numpy.array(veto_table.getColumnByName('end_time')) start_pad = numpy.array(veto_table.getColumnByName('start_pad')) end_pad = numpy.array(veto_table.getColumnByName('end_pad')) data = {} for i in range(len(veto_table)): if ifo[i] not in data: data[ifo[i]] = {} # The veto-definer categories are weird! Hardware injections are stored # in "3" and numbers above that are bumped up by one (although not # often used any more). So we remap 3 to H and anything above 3 to # N-1. 2 and 1 correspond to 2 and 1 (YAY!) if category[i] > 3: curr_cat = "CAT_{}".format(category[i]-1) elif category[i] == 3: curr_cat = "CAT_H" else: curr_cat = "CAT_{}".format(category[i]) if curr_cat not in data[ifo[i]]: data[ifo[i]][curr_cat] = [] veto_info = {'name': name[i], 'version': version[i], 'start': start[i], 'end': end[i], 'start_pad': start_pad[i], 'end_pad': end_pad[i], } data[ifo[i]][curr_cat].append(veto_info) return data
python
{ "resource": "" }
q31753
query_cumulative_flags
train
def query_cumulative_flags(ifo, segment_names, start_time, end_time, source='any', server="segments.ligo.org", veto_definer=None, bounds=None, padding=None, override_ifos=None, cache=False): """Return the times where any flag is active Parameters ---------- ifo: string or dict The interferometer to query (H1, L1). If a dict, an element for each flag name must be provided. segment_name: list of strings The status flag to query from LOSC. start_time: int The starting gps time to begin querying from LOSC end_time: int The end gps time of the query source: str, Optional Choice between "GWOSC" or "dqsegdb". If dqsegdb, the server option may also be given. The default is to try GWOSC first then try dqsegdb. server: str, Optional The server path. Only used with dqsegdb atm. veto_definer: str, Optional The path to a veto definer to define groups of flags which themselves define a set of segments. bounds: dict, Optional Dict containing start end tuples keyed by the flag name which indicated places which should have a distinct time period to be active. padding: dict, Optional Dict keyed by the flag name. Each element is a tuple (start_pad, end_pad) which indicates how to change the segment boundaries. override_ifos: dict, Optional A dict keyed by flag_name to override the ifo option on a per flag basis. Returns --------- segments: glue.segments.segmentlist List of segments """ total_segs = segmentlist([]) for flag_name in segment_names: ifo_name = ifo if override_ifos is not None and flag_name in override_ifos: ifo_name = override_ifos[flag_name] segs = query_flag(ifo_name, flag_name, start_time, end_time, source=source, server=server, veto_definer=veto_definer, cache=cache) if padding and flag_name in padding: s, e = padding[flag_name] segs2 = segmentlist([]) for seg in segs: segs2.append(segment(seg[0] + s, seg[1] + e)) segs = segs2 if bounds is not None and flag_name in bounds: s, e = bounds[flag_name] valid = segmentlist([segment([s, e])]) segs = (segs & valid).coalesce() total_segs = (total_segs + segs).coalesce() return total_segs
python
{ "resource": "" }
q31754
parse_flag_str
train
def parse_flag_str(flag_str): """ Parse a dq flag query string Parameters ---------- flag_str: str String needing to be parsed Returns ------- flags: list of strings List of reduced name strings which can be passed to lower level query commands signs: dict Dict of bools indicated if this should add positively to the segmentlist ifos: dict Ifo specified for the given flag bounds: dict The boundary of a given flag padding: dict Any padding that should be applied to the segments in for a given flag. """ flags = flag_str.replace(' ', '').strip().split(',') signs = {} ifos = {} bounds = {} padding = {} bflags = [] for flag in flags: # Check if the flag should add or subtract time sign = flag[0] == '+' flag = flag[1:] ifo = pad = bound = None # Check for non-default IFO if len(flag.split(':')[0]) == 2: ifo = flag.split(':')[0] flag = flag[3:] # Check for padding options if '<' in flag: popt = flag.split('<')[1].split('>')[0] spad, epad = popt.split(':') pad = (float(spad), float(epad)) flag = flag.replace(popt, '').replace('<>', '') # Check if there are bounds on the flag if '[' in flag: bopt = flag.split('[')[1].split(']')[0] start, end = bopt.split(':') bound = (int(start), int(end)) flag = flag.replace(bopt, '').replace('[]', '') if ifo: ifos[flag] = ifo if pad: padding[flag] = pad if bound: bounds[flag] = bound bflags.append(flag) signs[flag] = sign return bflags, signs, ifos, bounds, padding
python
{ "resource": "" }
q31755
query_str
train
def query_str(ifo, flag_str, start_time, end_time, server="segments.ligo.org", veto_definer=None): """ Query for flags based on a special str syntax Parameters ---------- ifo: str The ifo to be mainly quering for. (may be overriden in syntax) flag_str: str Specification of how to do the query. Ex. +H1:DATA:1<-8,8>[0,100000000] would return H1 time for the DATA available flag with version 1. It would then apply an 8 second padding and only return times within the chosen range 0,1000000000. start_time: int The start gps time. May be overriden for individual flags with the flag str bounds syntax end_time: int The end gps time. May be overriden for individual flags with the flag str bounds syntax Returns ------- segs: segmentlist A list of segments corresponding to the flag query string """ flags, sign, ifos, bounds, padding = parse_flag_str(flag_str) up = [f for f in flags if sign[f]] down = [f for f in flags if not sign[f]] if len(up) + len(down) != len(flags): raise ValueError('Not all flags could be parsed, check +/- prefix') segs = query_cumulative_flags(ifo, up, start_time, end_time, server=server, veto_definer=veto_definer, bounds=bounds, padding=padding, override_ifos=ifos) mseg = query_cumulative_flags(ifo, down, start_time, end_time, server=server, veto_definer=veto_definer, bounds=bounds, padding=padding, override_ifos=ifos) segs = (segs - mseg).coalesce() return segs
python
{ "resource": "" }
q31756
BaseDataModel.data
train
def data(self, data): """Store a copy of the data.""" self._data = {det: d.copy() for (det, d) in data.items()}
python
{ "resource": "" }
q31757
BaseDataModel.logplr
train
def logplr(self): """Returns the log of the prior-weighted likelihood ratio at the current parameter values. The logprior is calculated first. If the logprior returns ``-inf`` (possibly indicating a non-physical point), then ``loglr`` is not called. """ logp = self.logprior if logp == -numpy.inf: return logp else: return logp + self.loglr
python
{ "resource": "" }
q31758
BaseDataModel.write_metadata
train
def write_metadata(self, fp): """Adds data to the metadata that's written. Parameters ---------- fp : pycbc.inference.io.BaseInferenceFile instance The inference file to write to. """ super(BaseDataModel, self).write_metadata(fp) fp.write_stilde(self.data)
python
{ "resource": "" }
q31759
apply_fseries_time_shift
train
def apply_fseries_time_shift(htilde, dt, kmin=0, copy=True): """Shifts a frequency domain waveform in time. The waveform is assumed to be sampled at equal frequency intervals. """ if htilde.precision != 'single': raise NotImplementedError("CUDA version of apply_fseries_time_shift only supports single precision") if copy: out = htilde.copy() else: out = htilde kmin = numpy.int32(kmin) kmax = numpy.int32(len(htilde)) nb = int(numpy.ceil(kmax / nt_float)) if nb > 1024: raise ValueError("More than 1024 blocks not supported yet") phi = numpy.float32(-2 * numpy.pi * dt * htilde.delta_f) fseries_ts_fn.prepared_call((nb, 1), (nt, 1, 1), out.data.gpudata, phi, kmin, kmax) if copy: htilde = FrequencySeries(out, delta_f=htilde.delta_f, epoch=htilde.epoch, copy=False) return htilde
python
{ "resource": "" }
q31760
UniformF0Tau.from_config
train
def from_config(cls, cp, section, variable_args): """Initialize this class from a config file. Bounds on ``f0``, ``tau``, ``final_mass`` and ``final_spin`` should be specified by providing ``min-{param}`` and ``max-{param}``. If the ``f0`` or ``tau`` param should be renamed, ``rdfreq`` and ``damping_time`` should be provided; these must match ``variable_args``. If ``rdfreq`` and ``damping_time`` are not provided, ``variable_args`` are expected to be ``f0`` and ``tau``. Only ``min/max-f0`` and ``min/max-tau`` need to be provided. Example: .. code-block:: ini [{section}-f0+tau] name = uniform_f0_tau min-f0 = 10 max-f0 = 2048 min-tau = 0.0001 max-tau = 0.010 min-final_mass = 10 Parameters ---------- cp : pycbc.workflow.WorkflowConfigParser WorkflowConfigParser instance to read. section : str The name of the section to read. variable_args : str The name of the variable args. These should be separated by ``pycbc.VARARGS_DELIM``. Returns ------- UniformF0Tau : This class initialized with the parameters provided in the config file. """ tag = variable_args variable_args = set(variable_args.split(pycbc.VARARGS_DELIM)) # get f0 and tau f0 = bounded.get_param_bounds_from_config(cp, section, tag, 'f0') tau = bounded.get_param_bounds_from_config(cp, section, tag, 'tau') # see if f0 and tau should be renamed if cp.has_option_tag(section, 'rdfreq', tag): rdfreq = cp.get_opt_tag(section, 'rdfreq', tag) else: rdfreq = 'f0' if cp.has_option_tag(section, 'damping_time', tag): damping_time = cp.get_opt_tag(section, 'damping_time', tag) else: damping_time = 'tau' # check that they match whats in the variable args if not variable_args == set([rdfreq, damping_time]): raise ValueError("variable args do not match rdfreq and " "damping_time names") # get the final mass and spin values, if provided final_mass = bounded.get_param_bounds_from_config( cp, section, tag, 'final_mass') final_spin = bounded.get_param_bounds_from_config( cp, section, tag, 'final_spin') extra_opts = {} if cp.has_option_tag(section, 'norm_tolerance', tag): extra_opts['norm_tolerance'] = float( cp.get_opt_tag(section, 'norm_tolerance', tag)) if cp.has_option_tag(section, 'norm_seed', tag): extra_opts['norm_seed'] = int( cp.get_opt_tag(section, 'norm_seed', tag)) return cls(f0=f0, tau=tau, final_mass=final_mass, final_spin=final_spin, rdfreq=rdfreq, damping_time=damping_time, **extra_opts)
python
{ "resource": "" }
q31761
UniformSolidAngle.apply_boundary_conditions
train
def apply_boundary_conditions(self, **kwargs): """Maps the given values to be within the domain of the azimuthal and polar angles, before applying any other boundary conditions. Parameters ---------- \**kwargs : The keyword args must include values for both the azimuthal and polar angle, using the names they were initilialized with. For example, if `polar_angle='theta'` and `azimuthal_angle=`phi`, then the keyword args must be `theta={val1}, phi={val2}`. Returns ------- dict A dictionary of the parameter names and the conditioned values. """ polarval = kwargs[self._polar_angle] azval = kwargs[self._azimuthal_angle] # constrain each angle to its domain polarval = self._polardist._domain.apply_conditions(polarval) azval = self._azimuthaldist._domain.apply_conditions(azval) # apply any other boundary conditions polarval = self._bounds[self._polar_angle].apply_conditions(polarval) azval = self._bounds[self._azimuthal_angle].apply_conditions(azval) return {self._polar_angle: polarval, self._azimuthal_angle: azval}
python
{ "resource": "" }
q31762
UniformSolidAngle._pdf
train
def _pdf(self, **kwargs): """ Returns the pdf at the given angles. Parameters ---------- \**kwargs: The keyword arguments should specify the value for each angle, using the names of the polar and azimuthal angles as the keywords. Unrecognized arguments are ignored. Returns ------- float The value of the pdf at the given values. """ return self._polardist._pdf(**kwargs) * \ self._azimuthaldist._pdf(**kwargs)
python
{ "resource": "" }
q31763
UniformSolidAngle._logpdf
train
def _logpdf(self, **kwargs): """ Returns the logpdf at the given angles. Parameters ---------- \**kwargs: The keyword arguments should specify the value for each angle, using the names of the polar and azimuthal angles as the keywords. Unrecognized arguments are ignored. Returns ------- float The value of the pdf at the given values. """ return self._polardist._logpdf(**kwargs) +\ self._azimuthaldist._logpdf(**kwargs)
python
{ "resource": "" }
q31764
create_axes_grid
train
def create_axes_grid(parameters, labels=None, height_ratios=None, width_ratios=None, no_diagonals=False): """Given a list of parameters, creates a figure with an axis for every possible combination of the parameters. Parameters ---------- parameters : list Names of the variables to be plotted. labels : {None, dict}, optional A dictionary of parameters -> parameter labels. height_ratios : {None, list}, optional Set the height ratios of the axes; see `matplotlib.gridspec.GridSpec` for details. width_ratios : {None, list}, optional Set the width ratios of the axes; see `matplotlib.gridspec.GridSpec` for details. no_diagonals : {False, bool}, optional Do not produce axes for the same parameter on both axes. Returns ------- fig : pyplot.figure The figure that was created. axis_dict : dict A dictionary mapping the parameter combinations to the axis and their location in the subplots grid; i.e., the key, values are: `{('param1', 'param2'): (pyplot.axes, row index, column index)}` """ if labels is None: labels = {p: p for p in parameters} elif any(p not in labels for p in parameters): raise ValueError("labels must be provided for all parameters") # Create figure with adequate size for number of parameters. ndim = len(parameters) if no_diagonals: ndim -= 1 if ndim < 3: fsize = (8, 7) else: fsize = (ndim*3 - 1, ndim*3 - 2) fig = pyplot.figure(figsize=fsize) # create the axis grid gs = gridspec.GridSpec(ndim, ndim, width_ratios=width_ratios, height_ratios=height_ratios, wspace=0.05, hspace=0.05) # create grid of axis numbers to easily create axes in the right locations axes = numpy.arange(ndim**2).reshape((ndim, ndim)) # Select possible combinations of plots and establish rows and columns. combos = list(itertools.combinations(parameters, 2)) # add the diagonals if not no_diagonals: combos += [(p, p) for p in parameters] # create the mapping between parameter combos and axes axis_dict = {} # cycle over all the axes, setting thing as needed for nrow in range(ndim): for ncolumn in range(ndim): ax = pyplot.subplot(gs[axes[nrow, ncolumn]]) # map to a parameter index px = parameters[ncolumn] if no_diagonals: py = parameters[nrow+1] else: py = parameters[nrow] if (px, py) in combos: axis_dict[px, py] = (ax, nrow, ncolumn) # x labels only on bottom if nrow + 1 == ndim: ax.set_xlabel('{}'.format(labels[px]), fontsize=18) else: pyplot.setp(ax.get_xticklabels(), visible=False) # y labels only on left if ncolumn == 0: ax.set_ylabel('{}'.format(labels[py]), fontsize=18) else: pyplot.setp(ax.get_yticklabels(), visible=False) else: # make non-used axes invisible ax.axis('off') return fig, axis_dict
python
{ "resource": "" }
q31765
construct_kde
train
def construct_kde(samples_array, use_kombine=False): """Constructs a KDE from the given samples. """ if use_kombine: try: import kombine except ImportError: raise ImportError("kombine is not installed.") # construct the kde if use_kombine: kde = kombine.clustered_kde.KDE(samples_array) else: kde = scipy.stats.gaussian_kde(samples_array.T) return kde
python
{ "resource": "" }
q31766
create_marginalized_hist
train
def create_marginalized_hist(ax, values, label, percentiles=None, color='k', fillcolor='gray', linecolor='navy', linestyle='-', title=True, expected_value=None, expected_color='red', rotated=False, plot_min=None, plot_max=None): """Plots a 1D marginalized histogram of the given param from the given samples. Parameters ---------- ax : pyplot.Axes The axes on which to draw the plot. values : array The parameter values to plot. label : str A label to use for the title. percentiles : {None, float or array} What percentiles to draw lines at. If None, will draw lines at `[5, 50, 95]` (i.e., the bounds on the upper 90th percentile and the median). color : {'k', string} What color to make the histogram; default is black. fillcolor : {'gray', string, or None} What color to fill the histogram with. Set to None to not fill the histogram. Default is 'gray'. linestyle : str, optional What line style to use for the histogram. Default is '-'. linecolor : {'navy', string} What color to use for the percentile lines. Default is 'navy'. title : bool, optional Add a title with a estimated value +/- uncertainty. The estimated value is the pecentile halfway between the max/min of ``percentiles``, while the uncertainty is given by the max/min of the ``percentiles``. If no percentiles are specified, defaults to quoting the median +/- 95/5 percentiles. rotated : {False, bool} Plot the histogram on the y-axis instead of the x. Default is False. plot_min : {None, float} The minimum value to plot. If None, will default to whatever `pyplot` creates. plot_max : {None, float} The maximum value to plot. If None, will default to whatever `pyplot` creates. scalefac : {1., float} Factor to scale the default font sizes by. Default is 1 (no scaling). """ if fillcolor is None: htype = 'step' else: htype = 'stepfilled' if rotated: orientation = 'horizontal' else: orientation = 'vertical' ax.hist(values, bins=50, histtype=htype, orientation=orientation, facecolor=fillcolor, edgecolor=color, ls=linestyle, lw=2, density=True) if percentiles is None: percentiles = [5., 50., 95.] if len(percentiles) > 0: plotp = numpy.percentile(values, percentiles) else: plotp = [] for val in plotp: if rotated: ax.axhline(y=val, ls='dashed', color=linecolor, lw=2, zorder=3) else: ax.axvline(x=val, ls='dashed', color=linecolor, lw=2, zorder=3) # plot expected if expected_value is not None: if rotated: ax.axhline(expected_value, color=expected_color, lw=1.5, zorder=2) else: ax.axvline(expected_value, color=expected_color, lw=1.5, zorder=2) if title: if len(percentiles) > 0: minp = min(percentiles) maxp = max(percentiles) medp = (maxp + minp) / 2. else: minp = 5 medp = 50 maxp = 95 values_min = numpy.percentile(values, minp) values_med = numpy.percentile(values, medp) values_max = numpy.percentile(values, maxp) negerror = values_med - values_min poserror = values_max - values_med fmt = '${0}$'.format(str_utils.format_value( values_med, negerror, plus_error=poserror)) if rotated: ax.yaxis.set_label_position("right") # sets colored title for marginal histogram set_marginal_histogram_title(ax, fmt, color, label=label, rotated=rotated) # Remove x-ticks ax.set_xticks([]) # turn off x-labels ax.set_xlabel('') # set limits ymin, ymax = ax.get_ylim() if plot_min is not None: ymin = plot_min if plot_max is not None: ymax = plot_max ax.set_ylim(ymin, ymax) else: # sets colored title for marginal histogram set_marginal_histogram_title(ax, fmt, color, label=label) # Remove y-ticks ax.set_yticks([]) # turn off y-label ax.set_ylabel('') # set limits xmin, xmax = ax.get_xlim() if plot_min is not None: xmin = plot_min if plot_max is not None: xmax = plot_max ax.set_xlim(xmin, xmax)
python
{ "resource": "" }
q31767
set_marginal_histogram_title
train
def set_marginal_histogram_title(ax, fmt, color, label=None, rotated=False): """ Sets the title of the marginal histograms. Parameters ---------- ax : Axes The `Axes` instance for the plot. fmt : str The string to add to the title. color : str The color of the text to add to the title. label : str If title does not exist, then include label at beginning of the string. rotated : bool If `True` then rotate the text 270 degrees for sideways title. """ # get rotation angle of the title rotation = 270 if rotated else 0 # get how much to displace title on axes xscale = 1.05 if rotated else 0.0 if rotated: yscale = 1.0 elif len(ax.get_figure().axes) > 1: yscale = 1.15 else: yscale = 1.05 # get class that packs text boxes vertical or horizonitally packer_class = offsetbox.VPacker if rotated else offsetbox.HPacker # if no title exists if not hasattr(ax, "title_boxes"): # create a text box title = "{} = {}".format(label, fmt) tbox1 = offsetbox.TextArea( title, textprops=dict(color=color, size=15, rotation=rotation, ha='left', va='bottom')) # save a list of text boxes as attribute for later ax.title_boxes = [tbox1] # pack text boxes ybox = packer_class(children=ax.title_boxes, align="bottom", pad=0, sep=5) # else append existing title else: # delete old title ax.title_anchor.remove() # add new text box to list tbox1 = offsetbox.TextArea( " {}".format(fmt), textprops=dict(color=color, size=15, rotation=rotation, ha='left', va='bottom')) ax.title_boxes = ax.title_boxes + [tbox1] # pack text boxes ybox = packer_class(children=ax.title_boxes, align="bottom", pad=0, sep=5) # add new title and keep reference to instance as an attribute anchored_ybox = offsetbox.AnchoredOffsetbox( loc=2, child=ybox, pad=0., frameon=False, bbox_to_anchor=(xscale, yscale), bbox_transform=ax.transAxes, borderpad=0.) ax.title_anchor = ax.add_artist(anchored_ybox)
python
{ "resource": "" }
q31768
remove_common_offset
train
def remove_common_offset(arr): """Given an array of data, removes a common offset > 1000, returning the removed value. """ offset = 0 isneg = (arr <= 0).all() # make sure all values have the same sign if isneg or (arr >= 0).all(): # only remove offset if the minimum and maximum values are the same # order of magintude and > O(1000) minpwr = numpy.log10(abs(arr).min()) maxpwr = numpy.log10(abs(arr).max()) if numpy.floor(minpwr) == numpy.floor(maxpwr) and minpwr > 3: offset = numpy.floor(10**minpwr) if isneg: offset *= -1 arr = arr - offset return arr, int(offset)
python
{ "resource": "" }
q31769
reduce_ticks
train
def reduce_ticks(ax, which, maxticks=3): """Given a pyplot axis, resamples its `which`-axis ticks such that are at most `maxticks` left. Parameters ---------- ax : axis The axis to adjust. which : {'x' | 'y'} Which axis to adjust. maxticks : {3, int} Maximum number of ticks to use. Returns ------- array An array of the selected ticks. """ ticks = getattr(ax, 'get_{}ticks'.format(which))() if len(ticks) > maxticks: # make sure the left/right value is not at the edge minax, maxax = getattr(ax, 'get_{}lim'.format(which))() dw = abs(maxax-minax)/10. start_idx, end_idx = 0, len(ticks) if ticks[0] < minax + dw: start_idx += 1 if ticks[-1] > maxax - dw: end_idx -= 1 # get reduction factor fac = int(len(ticks) / maxticks) ticks = ticks[start_idx:end_idx:fac] return ticks
python
{ "resource": "" }
q31770
BinnedRatios.logregularize
train
def logregularize(self, epsilon=2**-1074): """ Find bins in the denominator that are 0, and set them to 1, while setting the corresponding bin in the numerator to float epsilon. This has the effect of allowing the logarithm of the ratio array to be evaluated without error. """ self.numerator.array[self.denominator.array == 0] = epsilon self.denominator.array[self.denominator.array == 0] = 1 return self
python
{ "resource": "" }
q31771
median_bias
train
def median_bias(n): """Calculate the bias of the median average PSD computed from `n` segments. Parameters ---------- n : int Number of segments used in PSD estimation. Returns ------- ans : float Calculated bias. Raises ------ ValueError For non-integer or non-positive `n`. Notes ----- See arXiv:gr-qc/0509116 appendix B for details. """ if type(n) is not int or n <= 0: raise ValueError('n must be a positive integer') if n >= 1000: return numpy.log(2) ans = 1 for i in range(1, int((n - 1) / 2 + 1)): ans += 1.0 / (2*i + 1) - 1.0 / (2*i) return ans
python
{ "resource": "" }
q31772
inverse_spectrum_truncation
train
def inverse_spectrum_truncation(psd, max_filter_len, low_frequency_cutoff=None, trunc_method=None): """Modify a PSD such that the impulse response associated with its inverse square root is no longer than `max_filter_len` time samples. In practice this corresponds to a coarse graining or smoothing of the PSD. Parameters ---------- psd : FrequencySeries PSD whose inverse spectrum is to be truncated. max_filter_len : int Maximum length of the time-domain filter in samples. low_frequency_cutoff : {None, int} Frequencies below `low_frequency_cutoff` are zeroed in the output. trunc_method : {None, 'hann'} Function used for truncating the time-domain filter. None produces a hard truncation at `max_filter_len`. Returns ------- psd : FrequencySeries PSD whose inverse spectrum has been truncated. Raises ------ ValueError For invalid types or values of `max_filter_len` and `low_frequency_cutoff`. Notes ----- See arXiv:gr-qc/0509116 for details. """ # sanity checks if type(max_filter_len) is not int or max_filter_len <= 0: raise ValueError('max_filter_len must be a positive integer') if low_frequency_cutoff is not None and low_frequency_cutoff < 0 \ or low_frequency_cutoff > psd.sample_frequencies[-1]: raise ValueError('low_frequency_cutoff must be within the bandwidth of the PSD') N = (len(psd)-1)*2 inv_asd = FrequencySeries((1. / psd)**0.5, delta_f=psd.delta_f, \ dtype=complex_same_precision_as(psd)) inv_asd[0] = 0 inv_asd[N//2] = 0 q = TimeSeries(numpy.zeros(N), delta_t=(N / psd.delta_f), \ dtype=real_same_precision_as(psd)) if low_frequency_cutoff: kmin = int(low_frequency_cutoff / psd.delta_f) inv_asd[0:kmin] = 0 ifft(inv_asd, q) trunc_start = max_filter_len // 2 trunc_end = N - max_filter_len // 2 if trunc_end < trunc_start: raise ValueError('Invalid value in inverse_spectrum_truncation') if trunc_method == 'hann': trunc_window = Array(numpy.hanning(max_filter_len), dtype=q.dtype) q[0:trunc_start] *= trunc_window[max_filter_len//2:max_filter_len] q[trunc_end:N] *= trunc_window[0:max_filter_len//2] if trunc_start < trunc_end: q[trunc_start:trunc_end] = 0 psd_trunc = FrequencySeries(numpy.zeros(len(psd)), delta_f=psd.delta_f, \ dtype=complex_same_precision_as(psd)) fft(q, psd_trunc) psd_trunc *= psd_trunc.conj() psd_out = 1. / abs(psd_trunc) return psd_out
python
{ "resource": "" }
q31773
process_full_data
train
def process_full_data(fname, rhomin, mass1, mass2, lo_mchirp, hi_mchirp): """Read the zero-lag and time-lag triggers identified by templates in a specified range of chirp mass. Parameters ---------- hdfile: File that stores all the triggers rhomin: float Minimum value of SNR threhold (will need including ifar) mass1: array First mass of the waveform in the template bank mass2: array Second mass of the waveform in the template bank lo_mchirp: float Minimum chirp mass for the template hi_mchirp: float Maximum chirp mass for the template Returns ------- dictionary containing foreground triggers and background information """ with h5py.File(fname, 'r') as bulk: id_bkg = bulk['background_exc/template_id'][:] id_fg = bulk['foreground/template_id'][:] mchirp_bkg = mchirp_from_mass1_mass2(mass1[id_bkg], mass2[id_bkg]) bound = np.sign((mchirp_bkg - lo_mchirp) * (hi_mchirp - mchirp_bkg)) idx_bkg = np.where(bound == 1) mchirp_fg = mchirp_from_mass1_mass2(mass1[id_fg], mass2[id_fg]) bound = np.sign((mchirp_fg - lo_mchirp) * (hi_mchirp - mchirp_fg)) idx_fg = np.where(bound == 1) zerolagstat = bulk['foreground/stat'][:][idx_fg] cstat_back_exc = bulk['background_exc/stat'][:][idx_bkg] dec_factors = bulk['background_exc/decimation_factor'][:][idx_bkg] return {'zerolagstat': zerolagstat[zerolagstat > rhomin], 'dec_factors': dec_factors[cstat_back_exc > rhomin], 'cstat_back_exc': cstat_back_exc[cstat_back_exc > rhomin]}
python
{ "resource": "" }
q31774
save_bkg_falloff
train
def save_bkg_falloff(fname_statmap, fname_bank, path, rhomin, lo_mchirp, hi_mchirp): ''' Read the STATMAP files to derive snr falloff for the background events. Save the output to a txt file Bank file is also provided to restrict triggers to BBH templates. Parameters ---------- fname_statmap: string STATMAP file containing trigger information fname_bank: string File name of the template bank path: string Destination where txt file is saved rhomin: float Minimum value of SNR threhold (will need including ifar) lo_mchirp: float Minimum chirp mass for the template hi_mchirp: float Maximum chirp mass for template ''' with h5py.File(fname_bank, 'r') as bulk: mass1_bank = bulk['mass1'][:] mass2_bank = bulk['mass2'][:] full_data = process_full_data(fname_statmap, rhomin, mass1_bank, mass2_bank, lo_mchirp, hi_mchirp) max_bg_stat = np.max(full_data['cstat_back_exc']) bg_bins = np.linspace(rhomin, max_bg_stat, 76) bg_counts = np.histogram(full_data['cstat_back_exc'], weights=full_data['dec_factors'], bins=bg_bins)[0] zerolagstat = full_data['zerolagstat'] coincs = zerolagstat[zerolagstat >= rhomin] bkg = (bg_bins[:-1], bg_bins[1:], bg_counts) return bkg, coincs
python
{ "resource": "" }
q31775
log_rho_bg
train
def log_rho_bg(trigs, bins, counts): ''' Calculate the log of background fall-off Parameters ---------- trigs: array SNR values of all the triggers bins: string bins for histogrammed triggers path: string counts for histogrammed triggers Returns ------- array ''' trigs = np.atleast_1d(trigs) N = sum(counts) assert np.all(trigs >= np.min(bins)), \ 'Trigger SNR values cannot all be below the lowest bin limit!' # If there are any triggers that are louder than the max bin, put one # fictitious count in a bin that extends from the limits of the slide # triggers out to the loudest trigger. # If there is no counts for a foreground trigger put a fictitious count # in the background bin if np.any(trigs >= np.max(bins)): N = N + 1 #log_plimit = -np.log(N) - np.log(np.max(trigs) - bins[-1]) CHECK IT log_rhos = [] for t in trigs: if t >= np.max(bins): log_rhos.append(-log(N)-log(np.max(trigs) - bins[-1])) else: i = bisect.bisect(bins, t) - 1 if counts[i] == 0: counts[i] = 1 log_rhos.append(log(counts[i]) - log(bins[i+1] - bins[i]) - log(N)) return np.array(log_rhos)
python
{ "resource": "" }
q31776
fgmc
train
def fgmc(log_fg_ratios, mu_log_vt, sigma_log_vt, Rf, maxfg): ''' Function to fit the likelihood Fixme ''' Lb = np.random.uniform(0., maxfg, len(Rf)) pquit = 0 while pquit < 0.1: # quit when the posterior on Lf is very close to its prior nsamp = len(Lb) Rf_sel = np.random.choice(Rf, nsamp) vt = np.random.lognormal(mu_log_vt, sigma_log_vt, len(Rf_sel)) Lf = Rf_sel * vt log_Lf, log_Lb = log(Lf), log(Lb) plR = 0 for lfr in log_fg_ratios: plR += np.logaddexp(lfr + log_Lf, log_Lb) plR -= (Lf + Lb) plRn = plR - max(plR) idx = np.exp(plRn) > np.random.random(len(plRn)) pquit = ss.stats.ks_2samp(Lb, Lb[idx])[1] Lb = Lb[idx] return Rf_sel[idx], Lf[idx], Lb
python
{ "resource": "" }
q31777
_optm
train
def _optm(x, alpha, mu, sigma): '''Return probability density of skew-lognormal See scipy.optimize.curve_fit ''' return ss.skewnorm.pdf(x, alpha, mu, sigma)
python
{ "resource": "" }
q31778
draw_imf_samples
train
def draw_imf_samples(**kwargs): ''' Draw samples for power-law model Parameters ---------- **kwargs: string Keyword arguments as model parameters and number of samples Returns ------- array The first mass array The second mass ''' alpha_salpeter = kwargs.get('alpha', -2.35) nsamples = kwargs.get('nsamples', 1) min_mass = kwargs.get('min_mass', 5.) max_mass = kwargs.get('max_mass', 95.) max_mtotal = min_mass + max_mass a = (max_mass/min_mass)**(alpha_salpeter + 1.0) - 1.0 beta = 1.0 / (alpha_salpeter + 1.0) k = nsamples * int(1.5 + log(1 + 100./nsamples)) aa = min_mass * (1.0 + a * np.random.random(k))**beta bb = np.random.uniform(min_mass, aa, k) idx = np.where(aa + bb < max_mtotal) m1, m2 = (np.maximum(aa, bb))[idx], (np.minimum(aa, bb))[idx] return np.resize(m1, nsamples), np.resize(m2, nsamples)
python
{ "resource": "" }
q31779
draw_lnm_samples
train
def draw_lnm_samples(**kwargs): ''' Draw samples for uniform-in-log model Parameters ---------- **kwargs: string Keyword arguments as model parameters and number of samples Returns ------- array The first mass array The second mass ''' #PDF doesnt match with sampler nsamples = kwargs.get('nsamples', 1) min_mass = kwargs.get('min_mass', 5.) max_mass = kwargs.get('max_mass', 95.) max_mtotal = min_mass + max_mass lnmmin = log(min_mass) lnmmax = log(max_mass) k = nsamples * int(1.5 + log(1 + 100./nsamples)) aa = np.exp(np.random.uniform(lnmmin, lnmmax, k)) bb = np.exp(np.random.uniform(lnmmin, lnmmax, k)) idx = np.where(aa + bb < max_mtotal) m1, m2 = (np.maximum(aa, bb))[idx], (np.minimum(aa, bb))[idx] return np.resize(m1, nsamples), np.resize(m2, nsamples)
python
{ "resource": "" }
q31780
draw_flat_samples
train
def draw_flat_samples(**kwargs): ''' Draw samples for uniform in mass Parameters ---------- **kwargs: string Keyword arguments as model parameters and number of samples Returns ------- array The first mass array The second mass ''' #PDF doesnt match with sampler nsamples = kwargs.get('nsamples', 1) min_mass = kwargs.get('min_mass', 1.) max_mass = kwargs.get('max_mass', 2.) m1 = np.random.uniform(min_mass, max_mass, nsamples) m2 = np.random.uniform(min_mass, max_mass, nsamples) return np.maximum(m1, m2), np.minimum(m1, m2)
python
{ "resource": "" }
q31781
mchirp_sampler_lnm
train
def mchirp_sampler_lnm(**kwargs): ''' Draw chirp mass samples for uniform-in-log model Parameters ---------- **kwargs: string Keyword arguments as model parameters and number of samples Returns ------- mchirp-astro: array The chirp mass samples for the population ''' m1, m2 = draw_lnm_samples(**kwargs) mchirp_astro = mchirp_from_mass1_mass2(m1, m2) return mchirp_astro
python
{ "resource": "" }
q31782
mchirp_sampler_imf
train
def mchirp_sampler_imf(**kwargs): ''' Draw chirp mass samples for power-law model Parameters ---------- **kwargs: string Keyword arguments as model parameters and number of samples Returns ------- mchirp-astro: array The chirp mass samples for the population ''' m1, m2 = draw_imf_samples(**kwargs) mchirp_astro = mchirp_from_mass1_mass2(m1, m2) return mchirp_astro
python
{ "resource": "" }
q31783
mchirp_sampler_flat
train
def mchirp_sampler_flat(**kwargs): ''' Draw chirp mass samples for flat in mass model Parameters ---------- **kwargs: string Keyword arguments as model parameters and number of samples Returns ------- mchirp-astro: array The chirp mass samples for the population ''' m1, m2 = draw_flat_samples(**kwargs) mchirp_astro = mchirp_from_mass1_mass2(m1, m2) return mchirp_astro
python
{ "resource": "" }
q31784
load_array
train
def load_array(path, group=None): """ Load an Array from a .hdf, .txt or .npy file. The default data types will be double precision floating point. Parameters ---------- path : string source file path. Must end with either .npy or .txt. group: string Additional name for internal storage use. Ex. hdf storage uses this as the key value. Raises ------ ValueError If path does not end in .npy or .txt. """ ext = _os.path.splitext(path)[1] if ext == '.npy': data = _numpy.load(path) elif ext == '.txt': data = _numpy.loadtxt(path) elif ext == '.hdf': key = 'data' if group is None else group return Array(h5py.File(path)[key]) else: raise ValueError('Path must end with .npy, .hdf, or .txt') if data.ndim == 1: return Array(data) elif data.ndim == 2: return Array(data[:,0] + 1j*data[:,1]) else: raise ValueError('File has %s dimensions, cannot convert to Array, \ must be 1 (real) or 2 (complex)' % data.ndim)
python
{ "resource": "" }
q31785
Array._return
train
def _return(self, ary): """Wrap the ary to return an Array type """ if isinstance(ary, Array): return ary return Array(ary, copy=False)
python
{ "resource": "" }
q31786
Array._icheckother
train
def _icheckother(fn, self, other): """ Checks the input to in-place operations """ self._typecheck(other) if type(other) in _ALLOWED_SCALARS: if self.kind == 'real' and type(other) == complex: raise TypeError('dtypes are incompatible') other = force_precision_to_match(other, self.precision) elif isinstance(other, type(self)) or type(other) is Array: if len(other) != len(self): raise ValueError('lengths do not match') if self.kind == 'real' and other.kind == 'complex': raise TypeError('dtypes are incompatible') if other.precision == self.precision: _convert_to_scheme(other) other = other._data else: raise TypeError('precisions do not match') else: return NotImplemented return fn(self, other)
python
{ "resource": "" }
q31787
Array.almost_equal_elem
train
def almost_equal_elem(self,other,tol,relative=True): """ Compare whether two array types are almost equal, element by element. If the 'relative' parameter is 'True' (the default) then the 'tol' parameter (which must be positive) is interpreted as a relative tolerance, and the comparison returns 'True' only if abs(self[i]-other[i]) <= tol*abs(self[i]) for all elements of the array. If 'relative' is 'False', then 'tol' is an absolute tolerance, and the comparison is true only if abs(self[i]-other[i]) <= tol for all elements of the array. Other meta-data (type, dtype, and length) must be exactly equal. If either object's memory lives on the GPU it will be copied to the CPU for the comparison, which may be slow. But the original object itself will not have its memory relocated nor scheme changed. Parameters ---------- other Another Python object, that should be tested for almost-equality with 'self', element-by-element. tol A non-negative number, the tolerance, which is interpreted as either a relative tolerance (the default) or an absolute tolerance. relative A boolean, indicating whether 'tol' should be interpreted as a relative tolerance (if True, the default if this argument is omitted) or as an absolute tolerance (if tol is False). Returns ------- boolean 'True' if the data agree within the tolerance, as interpreted by the 'relative' keyword, and if the types, lengths, and dtypes are exactly the same. """ # Check that the tolerance is non-negative and raise an # exception otherwise. if (tol<0): raise ValueError("Tolerance cannot be negative") # Check that the meta-data agree; the type check is written in # this way so that this method may be safely called from # subclasses as well. if type(other) != type(self): return False if self.dtype != other.dtype: return False if len(self) != len(other): return False # The numpy() method will move any GPU memory onto the CPU. # Slow, but the user was warned. diff = abs(self.numpy()-other.numpy()) if relative: cmpary = tol*abs(self.numpy()) else: cmpary = tol*ones(len(self),dtype=self.dtype) return (diff<=cmpary).all()
python
{ "resource": "" }
q31788
Array.almost_equal_norm
train
def almost_equal_norm(self,other,tol,relative=True): """ Compare whether two array types are almost equal, normwise. If the 'relative' parameter is 'True' (the default) then the 'tol' parameter (which must be positive) is interpreted as a relative tolerance, and the comparison returns 'True' only if abs(norm(self-other)) <= tol*abs(norm(self)). If 'relative' is 'False', then 'tol' is an absolute tolerance, and the comparison is true only if abs(norm(self-other)) <= tol Other meta-data (type, dtype, and length) must be exactly equal. If either object's memory lives on the GPU it will be copied to the CPU for the comparison, which may be slow. But the original object itself will not have its memory relocated nor scheme changed. Parameters ---------- other another Python object, that should be tested for almost-equality with 'self', based on their norms. tol a non-negative number, the tolerance, which is interpreted as either a relative tolerance (the default) or an absolute tolerance. relative A boolean, indicating whether 'tol' should be interpreted as a relative tolerance (if True, the default if this argument is omitted) or as an absolute tolerance (if tol is False). Returns ------- boolean 'True' if the data agree within the tolerance, as interpreted by the 'relative' keyword, and if the types, lengths, and dtypes are exactly the same. """ # Check that the tolerance is non-negative and raise an # exception otherwise. if (tol<0): raise ValueError("Tolerance cannot be negative") # Check that the meta-data agree; the type check is written in # this way so that this method may be safely called from # subclasses as well. if type(other) != type(self): return False if self.dtype != other.dtype: return False if len(self) != len(other): return False # The numpy() method will move any GPU memory onto the CPU. # Slow, but the user was warned. diff = self.numpy()-other.numpy() dnorm = norm(diff) if relative: return (dnorm <= tol*norm(self)) else: return (dnorm <= tol)
python
{ "resource": "" }
q31789
Array.resize
train
def resize(self, new_size): """Resize self to new_size """ if new_size == len(self): return else: self._saved = LimitedSizeDict(size_limit=2**5) new_arr = zeros(new_size, dtype=self.dtype) if len(self) <= new_size: new_arr[0:len(self)] = self else: new_arr[:] = self[0:new_size] self._data = new_arr._data
python
{ "resource": "" }
q31790
Array.lal
train
def lal(self): """ Returns a LAL Object that contains this data """ lal_data = None if self._data.dtype == float32: lal_data = _lal.CreateREAL4Vector(len(self)) elif self._data.dtype == float64: lal_data = _lal.CreateREAL8Vector(len(self)) elif self._data.dtype == complex64: lal_data = _lal.CreateCOMPLEX8Vector(len(self)) elif self._data.dtype == complex128: lal_data = _lal.CreateCOMPLEX16Vector(len(self)) lal_data.data[:] = self.numpy() return lal_data
python
{ "resource": "" }
q31791
Array.save
train
def save(self, path, group=None): """ Save array to a Numpy .npy, hdf, or text file. When saving a complex array as text, the real and imaginary parts are saved as the first and second column respectively. When using hdf format, the data is stored as a single vector, along with relevant attributes. Parameters ---------- path: string Destination file path. Must end with either .hdf, .npy or .txt. group: string Additional name for internal storage use. Ex. hdf storage uses this as the key value. Raises ------ ValueError If path does not end in .npy or .txt. """ ext = _os.path.splitext(path)[1] if ext == '.npy': _numpy.save(path, self.numpy()) elif ext == '.txt': if self.kind == 'real': _numpy.savetxt(path, self.numpy()) elif self.kind == 'complex': output = _numpy.vstack((self.numpy().real, self.numpy().imag)).T _numpy.savetxt(path, output) elif ext == '.hdf': key = 'data' if group is None else group f = h5py.File(path) f.create_dataset(key, data=self.numpy(), compression='gzip', compression_opts=9, shuffle=True) else: raise ValueError('Path must end with .npy, .txt, or .hdf')
python
{ "resource": "" }
q31792
Array.trim_zeros
train
def trim_zeros(self): """Remove the leading and trailing zeros. """ tmp = self.numpy() f = len(self)-len(_numpy.trim_zeros(tmp, trim='f')) b = len(self)-len(_numpy.trim_zeros(tmp, trim='b')) return self[f:len(self)-b]
python
{ "resource": "" }
q31793
two_column_layout
train
def two_column_layout(path, cols, **kwargs): """ Make a well layout in a two column format Parameters ---------- path: str Location to make the well html file cols: list of tuples The format of the items on the well result section. Each tuple contains the two files that are shown in the left and right hand side of a row in the well.html page. """ path = os.path.join(os.getcwd(), path, 'well.html') from pycbc.results.render import render_workflow_html_template render_workflow_html_template(path, 'two_column.html', cols, **kwargs)
python
{ "resource": "" }
q31794
single_layout
train
def single_layout(path, files, **kwargs): """ Make a well layout in single column format path: str Location to make the well html file files: list of pycbc.workflow.core.Files This list of images to show in order within the well layout html file. """ two_column_layout(path, [(f,) for f in files], **kwargs)
python
{ "resource": "" }
q31795
group_layout
train
def group_layout(path, files, **kwargs): """ Make a well layout in chunks of two from a list of files path: str Location to make the well html file files: list of pycbc.workflow.core.Files This list of images to show in order within the well layout html file. Every two are placed on the same row. """ if len(files) > 0: two_column_layout(path, list(grouper(files, 2)), **kwargs)
python
{ "resource": "" }
q31796
make_seg_table
train
def make_seg_table(workflow, seg_files, seg_names, out_dir, tags=None, title_text=None, description=None): """ Creates a node in the workflow for writing the segment summary table. Returns a File instances for the output file. """ seg_files = list(seg_files) seg_names = list(seg_names) if tags is None: tags = [] makedir(out_dir) node = PlotExecutable(workflow.cp, 'page_segtable', ifos=workflow.ifos, out_dir=out_dir, tags=tags).create_node() node.add_input_list_opt('--segment-files', seg_files) quoted_seg_names = [] for s in seg_names: quoted_seg_names.append("'" + s + "'") node.add_opt('--segment-names', ' '.join(quoted_seg_names)) if description: node.add_opt('--description', "'" + description + "'") if title_text: node.add_opt('--title-text', "'" + title_text + "'") node.new_output_file_opt(workflow.analysis_time, '.html', '--output-file') workflow += node return node.output_files[0]
python
{ "resource": "" }
q31797
make_veto_table
train
def make_veto_table(workflow, out_dir, vetodef_file=None, tags=None): """ Creates a node in the workflow for writing the veto_definer table. Returns a File instances for the output file. """ if vetodef_file is None: vetodef_file = workflow.cp.get_opt_tags("workflow-segments", "segments-veto-definer-file", []) file_url = urlparse.urljoin('file:', urllib.pathname2url(vetodef_file)) vdf_file = File(workflow.ifos, 'VETO_DEFINER', workflow.analysis_time, file_url=file_url) vdf_file.PFN(file_url, site='local') else: vdf_file = vetodef_file if tags is None: tags = [] makedir(out_dir) node = PlotExecutable(workflow.cp, 'page_vetotable', ifos=workflow.ifos, out_dir=out_dir, tags=tags).create_node() node.add_input_opt('--veto-definer-file', vdf_file) node.new_output_file_opt(workflow.analysis_time, '.html', '--output-file') workflow += node return node.output_files[0]
python
{ "resource": "" }
q31798
make_ifar_plot
train
def make_ifar_plot(workflow, trigger_file, out_dir, tags=None, hierarchical_level=None): """ Creates a node in the workflow for plotting cumulative histogram of IFAR values. """ if hierarchical_level is not None and tags: tags = [("HIERARCHICAL_LEVEL_{:02d}".format( hierarchical_level))] + tags elif hierarchical_level is not None and not tags: tags = ["HIERARCHICAL_LEVEL_{:02d}".format(hierarchical_level)] elif hierarchical_level is None and not tags: tags = [] makedir(out_dir) node = PlotExecutable(workflow.cp, 'page_ifar', ifos=workflow.ifos, out_dir=out_dir, tags=tags).create_node() node.add_input_opt('--trigger-file', trigger_file) if hierarchical_level is not None: node.add_opt('--use-hierarchical-level', hierarchical_level) node.new_output_file_opt(workflow.analysis_time, '.png', '--output-file') workflow += node return node.output_files[0]
python
{ "resource": "" }
q31799
MultiTemperedMetadataIO.write_sampler_metadata
train
def write_sampler_metadata(self, sampler): """Adds writing ntemps to file. """ super(MultiTemperedMetadataIO, self).write_sampler_metadata(sampler) self[self.sampler_group].attrs["ntemps"] = sampler.ntemps
python
{ "resource": "" }