code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
from gwpy.timeseries import TimeSeries # prepare input if isinstance(data, TimeSeries): duration = abs(data.span) sampling = data.sample_rate.to('Hz').value kwargs.update({'epoch': data.t0.value}) data = data.fft().value # return a raw Q-transform and its significance qgram, N = QTiling(duration, sampling, mismatch=mismatch, qrange=qrange, frange=frange).transform(data, **kwargs) far = 1.5 * N * numpy.exp(-qgram.peak['energy']) / duration return (qgram, far)
def q_scan(data, mismatch=DEFAULT_MISMATCH, qrange=DEFAULT_QRANGE, frange=DEFAULT_FRANGE, duration=None, sampling=None, **kwargs)
Transform data by scanning over a `QTiling` This utility is provided mainly to allow direct manipulation of the `QTiling.transform` output. Most users probably just want to use :meth:`~gwpy.timeseries.TimeSeries.q_transform`, which wraps around this. Parameters ---------- data : `~gwpy.timeseries.TimeSeries` or `ndarray` the time- or frequency-domain input data mismatch : `float`, optional maximum allowed fractional mismatch between neighbouring tiles qrange : `tuple` of `float`, optional `(low, high)` range of Qs to scan frange : `tuple` of `float`, optional `(low, high)` range of frequencies to scan duration : `float`, optional duration (seconds) of input, required if `data` is not a `TimeSeries` sampling : `float`, optional sample rate (Hertz) of input, required if `data` is not a `TimeSeries` **kwargs other keyword arguments to be passed to :meth:`QTiling.transform`, including ``'epoch'`` and ``'search'`` Returns ------- qgram : `QGram` the raw output of :meth:`QTiling.transform` far : `float` expected false alarm rate (Hertz) of white Gaussian noise with the same peak energy and total duration as `qgram`
8.049634
5.680469
1.417072
# work out how many Qs we need cumum = log(self.qrange[1] / self.qrange[0]) / 2**(1/2.) nplanes = int(max(ceil(cumum / self.deltam), 1)) dq = cumum / nplanes # pylint: disable=invalid-name for i in xrange(nplanes): yield self.qrange[0] * exp(2**(1/2.) * dq * (i + .5))
def _iter_qs(self)
Iterate over the Q values
6.226251
5.835938
1.066881
weight = 1 + numpy.log10(self.qrange[1]/self.qrange[0]) / numpy.sqrt(2) nind, nplanes, peak, result = (0, 0, 0, None) # identify the plane with the loudest tile for plane in self: nplanes += 1 nind += sum([1 + row.ntiles * row.deltam for row in plane]) result = plane.transform(fseries, **kwargs) if result.peak['energy'] > peak: out = result peak = out.peak['energy'] return (out, nind * weight / nplanes)
def transform(self, fseries, **kwargs)
Compute the time-frequency plane at fixed Q with the most significant tile Parameters ---------- fseries : `~gwpy.timeseries.FrequencySeries` the complex FFT of a time-series data set **kwargs other keyword arguments to pass to `QPlane.transform` Returns ------- out : `QGram` signal energies over the time-frequency plane containing the most significant tile N : `int` estimated number of statistically independent tiles See Also -------- QPlane.transform compute the Q-transform over a single time-frequency plane
7.809713
7.946497
0.982787
# work out how many frequencies we need minf, maxf = self.frange fcum_mismatch = log(maxf / minf) * (2 + self.q**2)**(1/2.) / 2. nfreq = int(max(1, ceil(fcum_mismatch / self.deltam))) fstep = fcum_mismatch / nfreq fstepmin = 1 / self.duration # for each frequency, yield a QTile for i in xrange(nfreq): yield (minf * exp(2 / (2 + self.q**2)**(1/2.) * (i + .5) * fstep) // fstepmin * fstepmin)
def _iter_frequencies(self)
Iterate over the frequencies of this `QPlane`
7.509758
6.851018
1.096152
bandwidths = 2 * pi ** (1/2.) * self.frequencies / self.q return self.frequencies - bandwidths / 2.
def farray(self)
Array of frequencies for the lower-edge of each frequency bin :type: `numpy.ndarray`
12.14434
13.053366
0.930361
out = [] for qtile in self: # get energy from transform out.append(qtile.transform(fseries, norm=norm, epoch=epoch)) return QGram(self, out, search)
def transform(self, fseries, norm=True, epoch=None, search=None)
Calculate the energy `TimeSeries` for the given `fseries` Parameters ---------- fseries : `~gwpy.frequencyseries.FrequencySeries` the complex FFT of a time-series data set norm : `bool`, `str`, optional normalize the energy of the output by the median (if `True` or ``'median'``) or the ``'mean'``, if `False` the output is the complex `~numpy.fft.ifft` output of the Q-tranform epoch : `~gwpy.time.LIGOTimeGPS`, `float`, optional the epoch of these data, only used for metadata in the output `TimeSeries`, and not requires if the input `fseries` has the epoch populated. search : `~gwpy.segments.Segment`, optional search window of interest to determine the loudest Q-plane Returns ------- results : `QGram` the complex energies of the Q-transform of the input `fseries` at each frequency See Also -------- QTile.transform for details on the transform over a row of `(Q, frequency)` tiles QGram an object with energies populated over time-frequency tiles
9.627724
5.7957
1.661184
tcum_mismatch = self.duration * 2 * pi * self.frequency / self.q return next_power_of_two(tcum_mismatch / self.deltam)
def ntiles(self)
The number of tiles in this row :type: `int`
20.652765
27.337934
0.755462
# real frequencies wfrequencies = self._get_indices() / self.duration # dimensionless frequencies xfrequencies = wfrequencies * self.qprime / self.frequency # normalize and generate bi-square window norm = self.ntiles / (self.duration * self.sampling) * ( 315 * self.qprime / (128 * self.frequency)) ** (1/2.) return (1 - xfrequencies ** 2) ** 2 * norm
def get_window(self)
Generate the bi-square window for this row Returns ------- window : `numpy.ndarray`
13.165977
12.536674
1.050197
return numpy.round(self._get_indices() + 1 + self.frequency * self.duration).astype(int)
def get_data_indices(self)
Returns the index array of interesting frequencies for this row
15.936829
12.221923
1.303954
pad = self.ntiles - self.windowsize return (int((pad - 1)/2.), int((pad + 1)/2.))
def padding(self)
The `(left, right)` padding required for the IFFT :type: `tuple` of `int`
8.147497
9.359472
0.870508
from ..timeseries import TimeSeries windowed = fseries[self.get_data_indices()] * self.get_window() # pad data, move negative frequencies to the end, and IFFT padded = numpy.pad(windowed, self.padding, mode='constant') wenergy = npfft.ifftshift(padded) # return a `TimeSeries` if epoch is None: epoch = fseries.epoch tdenergy = npfft.ifft(wenergy) cenergy = TimeSeries(tdenergy, x0=epoch, dx=self.duration/tdenergy.size, copy=False) energy = type(cenergy)( cenergy.value.real ** 2. + cenergy.value.imag ** 2., x0=cenergy.x0, dx=cenergy.dx, copy=False) if norm: norm = norm.lower() if isinstance(norm, string_types) else norm if norm in (True, 'median'): narray = energy / energy.median() elif norm in ('mean',): narray = energy / energy.mean() else: raise ValueError("Invalid normalisation %r" % norm) return narray.astype("float32", casting="same_kind", copy=False) return energy
def transform(self, fseries, norm=True, epoch=None)
Calculate the energy `TimeSeries` for the given fseries Parameters ---------- fseries : `~gwpy.frequencyseries.FrequencySeries` the complex FFT of a time-series data set norm : `bool`, `str`, optional normalize the energy of the output by the median (if `True` or ``'median'``) or the ``'mean'``, if `False` the output is the energy (power) of the Q-tranform epoch : `~gwpy.time.LIGOTimeGPS`, `float`, optional the epoch of these data, only used for metadata in the output `TimeSeries`, and not requires if the input `fseries` has the epoch populated. Returns ------- energy : `~gwpy.timeseries.TimeSeries` a `TimeSeries` of the energy from the Q-transform of this tile against the data.
5.171618
4.548695
1.136945
from ..table import EventTable # get plane properties freqs = self.plane.frequencies bws = 2 * (freqs - self.plane.farray) # collect table data as a recarray names = ('time', 'frequency', 'duration', 'bandwidth', 'energy') rec = numpy.recarray((0,), names=names, formats=['f8'] * len(names)) for f, bw, row in zip(freqs, bws, self.energies): ind, = (row.value >= snrthresh ** 2 / 2.).nonzero() new = ind.size if new > 0: rec.resize((rec.size + new,), refcheck=False) rec['time'][-new:] = row.times.value[ind] rec['frequency'][-new:] = f rec['duration'][-new:] = row.dt.to('s').value rec['bandwidth'][-new:] = bw rec['energy'][-new:] = row.value[ind] # save to a table out = EventTable(rec, copy=False) out.meta['q'] = self.plane.q return out
def table(self, snrthresh=5.5)
Represent this `QPlane` as an `EventTable` Parameters ---------- snrthresh : `float`, optional lower inclusive threshold on individual tile SNR to keep in the table, default: 5.5 Returns ------- out : `~gwpy.table.EventTable` a table of time-frequency tiles on this `QPlane` Notes ----- Only tiles with signal energy greater than or equal to `snrthresh ** 2 / 2` will be stored in the output `EventTable`.
4.247115
3.837932
1.106616
if dtype is None: dtype = max( numpy.array(start, subok=True, copy=False).dtype, numpy.array(step, subok=True, copy=False).dtype, ) start = start.astype(dtype, copy=False) step = step.astype(dtype, copy=False) return cls(start + numpy.arange(num, dtype=dtype) * step, copy=False)
def define(cls, start, step, num, dtype=None)
Define a new `Index`. The output is basically:: start + numpy.arange(num) * step Parameters ---------- start : `Number` The starting value of the index. step : `Number` The step size of the index. num : `int` The size of the index (number of samples). dtype : `numpy.dtype`, `None`, optional The desired dtype of the index, if not given, defaults to the higher-precision dtype from ``start`` and ``step``. Returns ------- index : `Index` A new `Index` created from the given parameters.
2.505182
2.512081
0.997254
try: return self.info.meta['regular'] except (TypeError, KeyError): if self.info.meta is None: self.info.meta = {} self.info.meta['regular'] = self.is_regular() return self.info.meta['regular']
def regular(self)
`True` if this index is linearly increasing
2.93454
2.872317
1.021663
if self.size <= 1: return False return numpy.isclose(numpy.diff(self.value, n=2), 0).all()
def is_regular(self)
Determine whether this `Index` contains linearly increasing samples This also works for linear decrease
6.05268
5.467777
1.106973
if not args: # only default treename if args not given kwargs.setdefault('treename', 'triggers') return EventTable.read(source, *args, format='root', **kwargs)
def table_from_omicron(source, *args, **kwargs)
Read an `EventTable` from an Omicron ROOT file This function just redirects to the format='root' reader with appropriate defaults.
12.618601
6.758979
1.866939
out = [] args = list(args) while args: try: plotter = self._plot_method(args[0]) except TypeError: break out.append(plotter(args[0], **kwargs)) args.pop(0) if args: out.extend(super(SegmentAxes, self).plot(*args, **kwargs)) self.autoscale(enable=None, axis='both', tight=False) return out
def plot(self, *args, **kwargs)
Plot data onto these axes Parameters ---------- args a single instance of - `~gwpy.segments.DataQualityFlag` - `~gwpy.segments.Segment` - `~gwpy.segments.SegmentList` - `~gwpy.segments.SegmentListDict` or equivalent types upstream from :mod:`ligo.segments` kwargs keyword arguments applicable to `~matplotib.axes.Axes.plot` Returns ------- Line2D the `~matplotlib.lines.Line2D` for this line layer See Also -------- :meth:`matplotlib.axes.Axes.plot` for a full description of acceptable ``*args` and ``**kwargs``
3.863133
4.37337
0.883331
out = [] for lab, flag in flags.items(): if label.lower() == 'name': lab = flag.name elif label.lower() != 'key': lab = label out.append(self.plot_flag(flag, label=to_string(lab), known=known, **kwargs)) return out
def plot_dict(self, flags, label='key', known='x', **kwargs)
Plot a `~gwpy.segments.DataQualityDict` onto these axes Parameters ---------- flags : `~gwpy.segments.DataQualityDict` data-quality dict to display label : `str`, optional labelling system to use, or fixed label for all `DataQualityFlags`. Special values include - ``'key'``: use the key of the `DataQualityDict`, - ``'name'``: use the :attr:`~DataQualityFlag.name` of the `DataQualityFlag` If anything else, that fixed label will be used for all lines. known : `str`, `dict`, `None`, default: '/' display `known` segments with the given hatching, or give a dict of keyword arguments to pass to :meth:`~SegmentAxes.plot_segmentlist`, or `None` to hide. **kwargs any other keyword arguments acceptable for `~matplotlib.patches.Rectangle` Returns ------- collection : `~matplotlib.patches.PatchCollection` list of `~matplotlib.patches.Rectangle` patches
4.053547
3.828454
1.058795
# get y axis position if y is None: y = self.get_next_y() # default a 'good' flag to green segments and vice-versa if flag.isgood: kwargs.setdefault('facecolor', '#33cc33') kwargs.setdefault('known', '#ff0000') else: kwargs.setdefault('facecolor', '#ff0000') kwargs.setdefault('known', '#33cc33') known = kwargs.pop('known') # get flag name name = kwargs.pop('label', flag.label or flag.name) # make active collection kwargs.setdefault('zorder', 0) coll = self.plot_segmentlist(flag.active, y=y, label=name, **kwargs) # make known collection if known not in (None, False): known_kw = { 'facecolor': coll.get_facecolor()[0], 'collection': 'ignore', 'zorder': -1000, } if isinstance(known, dict): known_kw.update(known) elif known == 'fancy': known_kw.update(height=kwargs.get('height', .8)*.05) elif known in HATCHES: known_kw.update(fill=False, hatch=known) else: known_kw.update(fill=True, facecolor=known, height=kwargs.get('height', .8)*.5) self.plot_segmentlist(flag.known, y=y, label=name, **known_kw) return coll
def plot_flag(self, flag, y=None, **kwargs)
Plot a `~gwpy.segments.DataQualityFlag` onto these axes. Parameters ---------- flag : `~gwpy.segments.DataQualityFlag` Data-quality flag to display. y : `float`, optional Y-axis value for new segments. height : `float`, optional, Height for each segment, default: `0.8`. known : `str`, `dict`, `None` One of the following - ``'fancy'`` - to use fancy format (try it and see) - ``'x'`` (or similar) - to use hatching - `str` to specify ``facecolor`` for known segmentlist - `dict` of kwargs to use - `None` to ignore known segmentlist **kwargs Any other keyword arguments acceptable for `~matplotlib.patches.Rectangle`. Returns ------- collection : `~matplotlib.patches.PatchCollection` list of `~matplotlib.patches.Rectangle` patches for active segments
3.664616
3.215161
1.139792
# get colour facecolor = kwargs.pop('facecolor', kwargs.pop('color', '#629fca')) if is_color_like(facecolor): kwargs.setdefault('edgecolor', tint(facecolor, factor=.5)) # get y if y is None: y = self.get_next_y() # build patches patches = [SegmentRectangle(seg, y, height=height, facecolor=facecolor, **kwargs) for seg in segmentlist] if collection: # map to PatchCollection coll = PatchCollection(patches, match_original=patches, zorder=kwargs.get('zorder', 1)) coll.set_rasterized(rasterized) coll._ignore = collection == 'ignore' coll._ypos = y out = self.add_collection(coll) # reset label with tex-formatting now # matplotlib default label is applied by add_collection # so we can only replace the leading underscore after # this point if label is None: label = coll.get_label() coll.set_label(to_string(label)) else: out = [] for patch in patches: patch.set_label(label) patch.set_rasterized(rasterized) label = '' out.append(self.add_patch(patch)) self.autoscale(enable=None, axis='both', tight=False) return out
def plot_segmentlist(self, segmentlist, y=None, height=.8, label=None, collection=True, rasterized=None, **kwargs)
Plot a `~gwpy.segments.SegmentList` onto these axes Parameters ---------- segmentlist : `~gwpy.segments.SegmentList` list of segments to display y : `float`, optional y-axis value for new segments collection : `bool`, default: `True` add all patches as a `~matplotlib.collections.PatchCollection`, doesn't seem to work for hatched rectangles label : `str`, optional custom descriptive name to print as y-axis tick label **kwargs any other keyword arguments acceptable for `~matplotlib.patches.Rectangle` Returns ------- collection : `~matplotlib.patches.PatchCollection` list of `~matplotlib.patches.Rectangle` patches
5.102679
5.231642
0.975349
if y is None: y = self.get_next_y() collections = [] for name, segmentlist in segmentlistdict.items(): collections.append(self.plot_segmentlist(segmentlist, y=y, label=name, **kwargs)) y += dy return collections
def plot_segmentlistdict(self, segmentlistdict, y=None, dy=1, **kwargs)
Plot a `~gwpy.segments.SegmentListDict` onto these axes Parameters ---------- segmentlistdict : `~gwpy.segments.SegmentListDict` (name, `~gwpy.segments.SegmentList`) dict y : `float`, optional starting y-axis value for new segmentlists **kwargs any other keyword arguments acceptable for `~matplotlib.patches.Rectangle` Returns ------- collections : `list` list of `~matplotlib.patches.PatchCollection` sets for each segmentlist
2.813086
2.984505
0.942564
if ignore is None: return self.collections return [c for c in self.collections if getattr(c, '_ignore', None) == ignore]
def get_collections(self, ignore=None)
Return the collections matching the given `_ignore` value Parameters ---------- ignore : `bool`, or `None` value of `_ignore` to match Returns ------- collections : `list` if `ignore=None`, simply returns all collections, otherwise returns those collections matching the `ignore` parameter
4.081553
4.538357
0.899346
try: out = subprocess.check_output(['klist', '-k', keytab], stderr=subprocess.PIPE) except OSError: raise KerberosError("Failed to locate klist, cannot read keytab") except subprocess.CalledProcessError: raise KerberosError("Cannot read keytab {!r}".format(keytab)) principals = [] for line in out.splitlines(): if isinstance(line, bytes): line = line.decode('utf-8') try: kvno, principal, = re.split(r'\s+', line.strip(' '), 1) except ValueError: continue else: if not kvno.isdigit(): continue principals.append(tuple(principal.split('@')) + (int(kvno),)) # return unique, ordered list return list(OrderedDict.fromkeys(principals).keys())
def parse_keytab(keytab)
Read the contents of a KRB5 keytab file, returning a list of credentials listed within Parameters ---------- keytab : `str` path to keytab file Returns ------- creds : `list` of `tuple` the (unique) list of `(username, realm, kvno)` as read from the keytab file Examples -------- >>> from gwpy.io.kerberos import parse_keytab >>> print(parse_keytab("creds.keytab")) [('albert.einstein', 'LIGO.ORG', 1)]
3.070652
3.08767
0.994489
try: return self._y0 except AttributeError: self._y0 = Quantity(0, self.yunit) return self._y0
def y0(self)
Y-axis coordinate of the first data point :type: `~astropy.units.Quantity` scalar
3.813336
3.459409
1.102309
try: return self._dy except AttributeError: try: self._yindex except AttributeError: self._dy = Quantity(1, self.yunit) else: if not self.yindex.regular: raise AttributeError( "This series has an irregular y-axis " "index, so 'dy' is not well defined") self._dy = self.yindex[1] - self.yindex[0] return self._dy
def dy(self)
Y-axis sample separation :type: `~astropy.units.Quantity` scalar
4.060012
3.781729
1.073586
try: return self._dy.unit except AttributeError: try: return self._y0.unit except AttributeError: return self._default_yunit
def yunit(self)
Unit of Y-axis index :type: `~astropy.units.Unit`
4.880557
4.997509
0.976598
try: return self._yindex except AttributeError: self._yindex = Index.define(self.y0, self.dy, self.shape[1]) return self._yindex
def yindex(self)
Positions of the data on the y-axis :type: `~astropy.units.Quantity` array
4.514862
5.100659
0.885153
super(Array2D, self).is_compatible(other) # check y-axis metadata if isinstance(other, type(self)): try: if not self.dy == other.dy: raise ValueError("%s sample sizes do not match: " "%s vs %s." % (type(self).__name__, self.dy, other.dy)) except AttributeError: raise ValueError("Series with irregular y-indexes cannot " "be compatible") return True
def is_compatible(self, other)
Check whether this array and ``other`` have compatible metadata
5.431916
4.919536
1.104152
x = Quantity(x, self.xindex.unit).value y = Quantity(y, self.yindex.unit).value try: idx = (self.xindex.value == x).nonzero()[0][0] except IndexError as exc: exc.args = ("Value %r not found in array xindex" % x,) raise try: idy = (self.yindex.value == y).nonzero()[0][0] except IndexError as exc: exc.args = ("Value %r not found in array yindex",) raise return self[idx, idy]
def value_at(self, x, y)
Return the value of this `Series` at the given `(x, y)` coordinates Parameters ---------- x : `float`, `~astropy.units.Quantity` the `xindex` value at which to search x : `float`, `~astropy.units.Quantity` the `yindex` value at which to search Returns ------- z : `~astropy.units.Quantity` the value of this Series at the given coordinates
2.474875
2.476393
0.999387
names = [] for group in h5group: try: names.append(h5group[group].attrs['name']) except KeyError: if strict: raise continue return names
def find_flag_groups(h5group, strict=True)
Returns all HDF5 Groups under the given group that contain a flag The check is just that the sub-group has a ``'name'`` attribute, so its not fool-proof by any means. Parameters ---------- h5group : `h5py.Group` the parent group in which to search strict : `bool`, optional, default: `True` if `True` raise an exception for any sub-group that doesn't have a name, otherwise just return all of those that do Raises ------ KeyError if a sub-group doesn't have a ``'name'`` attribtue and ``strict=True``
2.603215
3.751809
0.693856
return ( isinstance(obj, h5py.Group) and isinstance(obj.get("active"), h5py.Dataset) and isinstance(obj.get("known"), h5py.Dataset) )
def _is_flag_group(obj)
Returns `True` if `obj` is an `h5py.Group` that looks like if contains a flag
3.78826
3.66244
1.034354
flag_groups = [] def _find(name, obj): if _is_flag_group(obj): flag_groups.append(name) h5f.visititems(_find) return flag_groups
def _find_flag_groups(h5f)
Return all groups in `h5f` that look like flags
2.887212
2.5863
1.116348
# if user chose the path, just use it if path: return h5f[path] # if the user gave us the group directly, use it if _is_flag_group(h5f): return h5f # otherwise try and find a single group that matches try: path, = _find_flag_groups(h5f) except ValueError: pass else: return h5f[path] # if not exactly 1 valid group in the file, complain raise ValueError( "please pass a valid HDF5 Group, or specify the HDF5 Group " "path via the ``path=`` keyword argument", )
def _get_flag_group(h5f, path)
Determine the group to use in order to read a flag
5.121419
4.980146
1.028367
# extract correct group dataset = _get_flag_group(h5f, path) # read dataset active = SegmentList.read(dataset['active'], format='hdf5', gpstype=gpstype) try: known = SegmentList.read(dataset['known'], format='hdf5', gpstype=gpstype) except KeyError as first_keyerror: try: known = SegmentList.read(dataset['valid'], format='hdf5', gpstype=gpstype) except KeyError: raise first_keyerror return DataQualityFlag(active=active, known=known, **dict(dataset.attrs))
def read_hdf5_flag(h5f, path=None, gpstype=LIGOTimeGPS)
Read a `DataQualityFlag` object from an HDF5 file or group.
4.25126
3.727333
1.140564
# find dataset dataset = io_hdf5.find_dataset(h5f, path=path) segtable = Table.read(dataset, format='hdf5', **kwargs) out = SegmentList() for row in segtable: start = LIGOTimeGPS(int(row['start_time']), int(row['start_time_ns'])) end = LIGOTimeGPS(int(row['end_time']), int(row['end_time_ns'])) if gpstype is LIGOTimeGPS: out.append(Segment(start, end)) else: out.append(Segment(gpstype(start), gpstype(end))) return out
def read_hdf5_segmentlist(h5f, path=None, gpstype=LIGOTimeGPS, **kwargs)
Read a `SegmentList` object from an HDF5 file or group.
2.451719
2.412496
1.016258
if path: h5f = h5f[path] # allow alternative keyword argument name (FIXME) if names is None: names = kwargs.pop('flags', None) # try and get list of names automatically if names is None: try: names = find_flag_groups(h5f, strict=True) except KeyError: names = None if not names: raise ValueError("Failed to automatically parse available flag " "names from HDF5, please give a list of names " "to read via the ``names=`` keyword") # read data out = DataQualityDict() for name in names: try: out[name] = read_hdf5_flag(h5f, name, **kwargs) except KeyError as exc: if on_missing == 'ignore': pass elif on_missing == 'warn': warnings.warn(str(exc)) else: raise ValueError('no H5Group found for flag ' '{0!r}'.format(name)) return out
def read_hdf5_dict(h5f, names=None, path=None, on_missing='error', **kwargs)
Read a `DataQualityDict` from an HDF5 file
4.029649
3.729529
1.080471
# write segmentlists flag.active.write(h5group, 'active', **kwargs) kwargs['append'] = True flag.known.write(h5group, 'known', **kwargs) # store metadata for attr in ['name', 'label', 'category', 'description', 'isgood', 'padding']: value = getattr(flag, attr) if value is None: continue elif isinstance(value, Quantity): h5group.attrs[attr] = value.value elif isinstance(value, UnitBase): h5group.attrs[attr] = str(value) else: h5group.attrs[attr] = value return h5group
def write_hdf5_flag_group(flag, h5group, **kwargs)
Write a `DataQualityFlag` into the given HDF5 group
3.481646
3.609586
0.964556
if path: try: parent = output[path] except KeyError: parent = output.create_group(path) else: parent = output for name in flags: # handle existing group if name in parent: if not (overwrite and append): raise IOError("Group '%s' already exists, give ``append=True, " "overwrite=True`` to overwrite it" % os.path.join(parent.name, name)) del parent[name] # create group group = parent.create_group(name) # write flag write_hdf5_flag_group(flags[name], group, **kwargs)
def write_hdf5_dict(flags, output, path=None, append=False, overwrite=False, **kwargs)
Write this `DataQualityFlag` to a `h5py.Group`. This allows writing to an HDF5-format file. Parameters ---------- output : `str`, :class:`h5py.Group` path to new output file, or open h5py `Group` to write to. path : `str` the HDF5 group path in which to write a new group for this flag **kwargs other keyword arguments passed to :meth:`h5py.Group.create_dataset` Returns ------- dqfgroup : :class:`h5py.Group` HDF group containing these data. This group contains 'active' and 'known' datasets, and metadata attrs. See also -------- astropy.io for details on acceptable keyword arguments when writing a :class:`~astropy.table.Table` to HDF5
3.083696
3.516668
0.87688
# verify path (default to flag name) if path is None: path = flag.name if path is None: raise ValueError("Cannot determine target group name for flag in HDF5 " "structure, please set `name` for each flag, or " "specify the ``path`` keyword when writing") return write_hdf5_dict({path: flag}, output, **kwargs)
def write_hdf5_flag(flag, output, path=None, **kwargs)
Write a `DataQualityFlag` to an HDF5 file/group
6.898448
7.303953
0.944481
if path is None: raise ValueError("Please specify the HDF5 path via the " "``path=`` keyword argument") # convert segmentlist to Table data = numpy.zeros((len(seglist), 4), dtype=int) for i, seg in enumerate(seglist): start, end = map(LIGOTimeGPS, seg) data[i, :] = (start.gpsSeconds, start.gpsNanoSeconds, end.gpsSeconds, end.gpsNanoSeconds) segtable = Table(data, names=['start_time', 'start_time_ns', 'end_time', 'end_time_ns']) # write table to HDF5 return segtable.write(output, path=path, format='hdf5', **kwargs)
def write_hdf5_segmentlist(seglist, output, path=None, **kwargs)
Write a `SegmentList` to an HDF5 file/group Parameters ---------- seglist : :class:`~ligo.segments.segmentlist` data to write output : `str`, `h5py.File`, `h5py.Group` filename or HDF5 object to write to path : `str` path to which to write inside the HDF5 file, relative to ``output`` **kwargs other keyword arguments are passed to :meth:`~astropy.table.Table.write`
3.223353
3.329922
0.967997
return '0' base_str = format % x if "e" not in base_str: return base_str mantissa, exponent = base_str.split("e") if float(mantissa).is_integer(): mantissa = int(float(mantissa)) exponent = exponent.lstrip("0+") if exponent.startswith('-0'): exponent = '-' + exponent[2:] if float(mantissa) == 1.0: return r"10^{%s}" % exponent return r"%s\!\!\times\!\!10^{%s}" % (mantissa, exponent)
def float_to_latex(x, format="%.2g"): # pylint: disable=redefined-builtin # pylint: disable=anomalous-backslash-in-string r if x == 0.
r"""Convert a floating point number to a latex representation. In particular, scientific notation is handled gracefully: e -> 10^ Parameters ---------- x : `float` the number to represent format : `str`, optional the output string format Returns ------- tex : `str` a TeX representation of the input Examples -------- >>> from gwpy.plot.tex import float_to_latex >>> float_to_latex(1) '1' >>> float_to_latex(2000) '2\times 10^{3}' >>> float_to_latex(100) '10^{2}' >>> float_to_latex(-500) r'-5\!\!\times\!\!10^{2}'
2.796808
3.029199
0.923283
# pylint: disable=anomalous-backslash-in-string r if text is None: return '' out = [] x = None # loop over matches in reverse order and replace for m in re_latex_control.finditer(text): a, b = m.span() char = m.group()[0] out.append(text[x:a]) out.append(r'\%s' % char) x = b if not x: # no match return text # append prefix and return joined components out.append(text[b:]) return ''.join(out)
def label_to_latex(text)
r"""Convert text into a latex-passable representation. This method just escapes the following reserved LaTeX characters: % \ _ ~ &, whilst trying to avoid doubly-escaping already escaped characters Parameters ---------- text : `str` input text to convert Returns ------- tex : `str` a modified version of the input text with all unescaped reserved latex characters escaped Examples -------- >>> from gwpy.plot.tex import label_to_latex >>> label_to_latex('normal text') 'normal text' >>> label_to_latex('$1 + 2 = 3$') '$1 + 2 = 3$' >>> label_to_latex('H1:ABC-DEF_GHI') 'H1:ABC-DEF\\_GHI' >>> label_to_latex('H1:ABC-DEF\_GHI') 'H1:ABC-DEF\\_GHI'
4.978117
5.580898
0.891992
# open cache file if isinstance(cache, FILE_LIKE + string_types): return read_cache(cache, sort=file_segment, segment=Segment(start, end)) # format existing cache file cache = type(cache)(cache) # copy cache # sort cache try: cache.sort(key=file_segment) # sort except ValueError: # if this failed, then the sieving will also fail, but lets proceed # anyway, since the user didn't actually ask us to do this (but # its a very good idea) return cache # sieve cache if start is None: # start time of earliest file start = file_segment(cache[0])[0] if end is None: # end time of latest file end = file_segment(cache[-1])[-1] return sieve(cache, segment=Segment(start, end))
def preformat_cache(cache, start=None, end=None)
Preprocess a `list` of file paths for reading. - read the cache from the file (if necessary) - sieve the cache to only include data we need Parameters ---------- cache : `list`, `str` List of file paths, or path to a LAL-format cache file on disk. start : `~gwpy.time.LIGOTimeGPS`, `float`, `str`, optional GPS start time of required data, defaults to start of data found; any input parseable by `~gwpy.time.to_gps` is fine. end : `~gwpy.time.LIGOTimeGPS`, `float`, `str`, optional GPS end time of required data, defaults to end of data found; any input parseable by `~gwpy.time.to_gps` is fine. Returns ------- modcache : `list` A parsed, sieved list of paths based on the input arguments.
6.878297
7.062432
0.973927
tqdm_kw = { 'desc': 'Processing', 'file': sys.stdout, 'bar_format': TQDM_BAR_FORMAT, } tqdm_kw.update(kwargs) pbar = tqdm(**tqdm_kw) if not pbar.disable: pbar.desc = pbar.desc.rstrip(': ') pbar.refresh() return pbar
def progress_bar(**kwargs)
Create a `tqdm.tqdm` progress bar This is just a thin wrapper around `tqdm.tqdm` to set some updated defaults
3.011504
3.010881
1.000207
gpass = 10 ** (-gpass / 10.) gstop = 10 ** (-gstop / 10.) return int(2/3. * log10(1 / (10 * gpass * gstop)) * sample_rate / transitionwidth)
def num_taps(sample_rate, transitionwidth, gpass, gstop)
Returns the number of taps for an FIR filter with the given shape Parameters ---------- sample_rate : `float` sampling rate of target data transitionwidth : `float` the width (in the same units as `sample_rate` of the transition from stop-band to pass-band gpass : `float` the maximum loss in the passband (dB) gstop : `float` the minimum attenuation in the stopband (dB) Returns ------- numtaps : `int` the number of taps for an FIR filter Notes ----- Credit: http://dsp.stackexchange.com/a/31077/8223
3.901435
3.801296
1.026344
return ( isinstance(zpktup, (tuple, list)) and len(zpktup) == 3 and isinstance(zpktup[0], (list, tuple, numpy.ndarray)) and isinstance(zpktup[1], (list, tuple, numpy.ndarray)) and isinstance(zpktup[2], float))
def is_zpk(zpktup)
Determin whether the given tuple is a ZPK-format filter definition Returns ------- iszpk : `bool` `True` if the ``zpktup`` looks like a ZPK-format filter definition, otherwise `False`
1.918355
2.351125
0.815931
nsamp = transfer.size ncorner = ncorner if ncorner else 0 out = transfer.copy() out[0:ncorner] = 0 out[ncorner:nsamp] *= planck(nsamp-ncorner, nleft=5, nright=5) return out
def truncate_transfer(transfer, ncorner=None)
Smoothly zero the edges of a frequency domain transfer function Parameters ---------- transfer : `numpy.ndarray` transfer function to start from, must have at least ten samples ncorner : `int`, optional number of extra samples to zero off at low frequency, default: `None` Returns ------- out : `numpy.ndarray` the smoothly truncated transfer function Notes ----- By default, the input transfer function will have five samples tapered off at the left and right boundaries. If `ncorner` is not `None`, then `ncorner` extra samples will be zeroed on the left as a hard highpass filter. See :func:`~gwpy.signal.window.planck` for more information.
5.30097
4.068417
1.302957
out = impulse.copy() trunc_start = int(ntaps / 2) trunc_stop = out.size - trunc_start window = signal.get_window(window, ntaps) out[0:trunc_start] *= window[trunc_start:ntaps] out[trunc_stop:out.size] *= window[0:trunc_start] out[trunc_start:trunc_stop] = 0 return out
def truncate_impulse(impulse, ntaps, window='hanning')
Smoothly truncate a time domain impulse response Parameters ---------- impulse : `numpy.ndarray` the impulse response to start from ntaps : `int` number of taps in the final filter window : `str`, `numpy.ndarray`, optional window function to truncate with, default: ``'hanning'`` see :func:`scipy.signal.get_window` for details on acceptable formats Returns ------- out : `numpy.ndarray` the smoothly truncated impulse response
2.36139
2.627901
0.898584
# truncate and highpass the transfer function transfer = truncate_transfer(transfer, ncorner=ncorner) # compute and truncate the impulse response impulse = npfft.irfft(transfer) impulse = truncate_impulse(impulse, ntaps=ntaps, window=window) # wrap around and normalise to construct the filter out = numpy.roll(impulse, int(ntaps/2 - 1))[0:ntaps] return out
def fir_from_transfer(transfer, ntaps, window='hanning', ncorner=None)
Design a Type II FIR filter given an arbitrary transfer function Parameters ---------- transfer : `numpy.ndarray` transfer function to start from, must have at least ten samples ntaps : `int` number of taps in the final filter, must be an even number window : `str`, `numpy.ndarray`, optional window function to truncate with, default: ``'hanning'`` see :func:`scipy.signal.get_window` for details on acceptable formats ncorner : `int`, optional number of extra samples to zero off at low frequency, default: `None` Returns ------- out : `numpy.ndarray` A time domain FIR filter of length `ntaps` Notes ----- The final FIR filter will use `~numpy.fft.rfft` FFT normalisation. If `ncorner` is not `None`, then `ncorner` extra samples will be zeroed on the left as a hard highpass filter. See Also -------- scipy.signal.remez an alternative FIR filter design using the Remez exchange algorithm
4.962808
5.403699
0.918409
zeros = numpy.array(zeros, dtype=float, copy=False) zeros = zeros[numpy.isfinite(zeros)] poles = numpy.array(poles, dtype=float, copy=False) gain = gain # convert from Hz to rad/s if needed unit = Unit(unit) if unit == Unit('Hz'): zeros *= -2 * pi poles *= -2 * pi elif unit != Unit('rad/s'): raise ValueError("zpk can only be given with unit='Hz' " "or 'rad/s'") # convert to Z-domain via bilinear transform fs = 2 * Quantity(fs, 'Hz').value dpoles = (1 + poles/fs) / (1 - poles/fs) dzeros = (1 + zeros/fs) / (1 - zeros/fs) dzeros = numpy.concatenate(( dzeros, -numpy.ones(len(dpoles) - len(dzeros)), )) dgain = gain * numpy.prod(fs - zeros)/numpy.prod(fs - poles) return dzeros, dpoles, dgain
def bilinear_zpk(zeros, poles, gain, fs=1.0, unit='Hz')
Convert an analogue ZPK filter to digital using a bilinear transform Parameters ---------- zeros : array-like list of zeros poles : array-like list of poles gain : `float` filter gain fs : `float`, `~astropy.units.Quantity` sampling rate at which to evaluate bilinear transform, default: 1. unit : `str`, `~astropy.units.Unit` unit of inputs, one or 'Hz' or 'rad/s', default: ``'Hz'`` Returns ------- zpk : `tuple` digital version of input zpk
3.353146
3.391759
0.988616
if analog and not sample_rate: raise ValueError("Must give sample_rate frequency to convert " "analog filter to digital") # unpack filter if isinstance(args, tuple) and len(args) == 1: # either packed defintion ((z, p, k)) or simple definition (lti,) args = args[0] # parse FIR filter if isinstance(args, numpy.ndarray) and args.ndim == 1: # fir b, a = args, [1.] if analog: return 'ba', signal.bilinear(b, a) return 'ba', (b, a) # parse IIR filter if isinstance(args, LinearTimeInvariant): lti = args elif (isinstance(args, numpy.ndarray) and args.ndim == 2 and args.shape[1] == 6): lti = signal.lti(*signal.sos2zpk(args)) else: lti = signal.lti(*args) # convert to zpk format try: lti = lti.to_zpk() except AttributeError: # scipy < 0.18, doesn't matter pass # convert to digital components if analog: return 'zpk', bilinear_zpk(lti.zeros, lti.poles, lti.gain, fs=sample_rate) # return zpk return 'zpk', (lti.zeros, lti.poles, lti.gain)
def parse_filter(args, analog=False, sample_rate=None)
Parse arbitrary input args into a TF or ZPK filter definition Parameters ---------- args : `tuple`, `~scipy.signal.lti` filter definition, normally just captured positional ``*args`` from a function call analog : `bool`, optional `True` if filter definition has analogue coefficients sample_rate : `float`, optional sampling frequency at which to convert analogue filter to digital via bilinear transform, required if ``analog=True`` Returns ------- ftype : `str` either ``'ba'`` or ``'zpk'`` filt : `tuple` the filter components for the returned `ftype`, either a 2-tuple for with transfer function components, or a 3-tuple for ZPK
4.216587
3.743515
1.126371
sample_rate = _as_float(sample_rate) frequency = _as_float(frequency) if fstop is None: fstop = min(frequency * 1.5, sample_rate/2.) if type == 'iir': return _design_iir(frequency, fstop, sample_rate, gpass, gstop, **kwargs) return _design_fir(frequency, fstop, sample_rate, gpass, gstop, **kwargs)
def lowpass(frequency, sample_rate, fstop=None, gpass=2, gstop=30, type='iir', **kwargs)
Design a low-pass filter for the given cutoff frequency Parameters ---------- frequency : `float` corner frequency of low-pass filter (Hertz) sample_rate : `float` sampling rate of target data (Hertz) fstop : `float`, optional edge-frequency of stop-band (Hertz) gpass : `float`, optional, default: 2 the maximum loss in the passband (dB) gstop : `float`, optional, default: 30 the minimum attenuation in the stopband (dB) type : `str`, optional, default: ``'iir'`` the filter type, either ``'iir'`` or ``'fir'`` **kwargs other keyword arguments are passed directly to :func:`~scipy.signal.iirdesign` or :func:`~scipy.signal.firwin` Returns ------- filter the formatted filter. the output format for an IIR filter depends on the input arguments, default is a tuple of `(zeros, poles, gain)` Notes ----- By default a digital filter is returned, meaning the zeros and poles are given in the Z-domain in units of radians/sample. Examples -------- To create a low-pass filter at 1000 Hz for 4096 Hz-sampled data: >>> from gwpy.signal.filter_design import lowpass >>> lp = lowpass(1000, 4096) To view the filter, you can use the `~gwpy.plot.BodePlot`: >>> from gwpy.plot import BodePlot >>> plot = BodePlot(lp, sample_rate=4096) >>> plot.show()
2.183033
3.100175
0.704164
sample_rate = _as_float(sample_rate) frequency = _as_float(frequency) if fstop is None: fstop = frequency * 2/3. if type == 'iir': return _design_iir(frequency, fstop, sample_rate, gpass, gstop, **kwargs) return _design_fir(frequency, fstop, sample_rate, gpass, gstop, **kwargs)
def highpass(frequency, sample_rate, fstop=None, gpass=2, gstop=30, type='iir', **kwargs)
Design a high-pass filter for the given cutoff frequency Parameters ---------- frequency : `float` corner frequency of high-pass filter sample_rate : `float` sampling rate of target data fstop : `float`, optional edge-frequency of stop-band gpass : `float`, optional, default: 2 the maximum loss in the passband (dB) gstop : `float`, optional, default: 30 the minimum attenuation in the stopband (dB) type : `str`, optional, default: ``'iir'`` the filter type, either ``'iir'`` or ``'fir'`` **kwargs other keyword arguments are passed directly to :func:`~scipy.signal.iirdesign` or :func:`~scipy.signal.firwin` Returns ------- filter the formatted filter. the output format for an IIR filter depends on the input arguments, default is a tuple of `(zeros, poles, gain)` Notes ----- By default a digital filter is returned, meaning the zeros and poles are given in the Z-domain in units of radians/sample. Examples -------- To create a high-pass filter at 100 Hz for 4096 Hz-sampled data: >>> from gwpy.signal.filter_design import highpass >>> hp = highpass(100, 4096) To view the filter, you can use the `~gwpy.plot.BodePlot`: >>> from gwpy.plot import BodePlot >>> plot = BodePlot(hp, sample_rate=4096) >>> plot.show()
2.294548
3.298662
0.6956
sample_rate = _as_float(sample_rate) flow = _as_float(flow) fhigh = _as_float(fhigh) if fstop is None: fstop = (flow * 2/3., min(fhigh * 1.5, sample_rate/2.)) fstop = (_as_float(fstop[0]), _as_float(fstop[1])) if type == 'iir': return _design_iir((flow, fhigh), fstop, sample_rate, gpass, gstop, **kwargs) return _design_fir((flow, fhigh), fstop, sample_rate, gpass, gstop, pass_zero=False, **kwargs)
def bandpass(flow, fhigh, sample_rate, fstop=None, gpass=2, gstop=30, type='iir', **kwargs)
Design a band-pass filter for the given cutoff frequencies Parameters ---------- flow : `float` lower corner frequency of pass band fhigh : `float` upper corner frequency of pass band sample_rate : `float` sampling rate of target data fstop : `tuple` of `float`, optional `(low, high)` edge-frequencies of stop band gpass : `float`, optional, default: 2 the maximum loss in the passband (dB) gstop : `float`, optional, default: 30 the minimum attenuation in the stopband (dB) type : `str`, optional, default: ``'iir'`` the filter type, either ``'iir'`` or ``'fir'`` **kwargs other keyword arguments are passed directly to :func:`~scipy.signal.iirdesign` or :func:`~scipy.signal.firwin` Returns ------- filter the formatted filter. the output format for an IIR filter depends on the input arguments, default is a tuple of `(zeros, poles, gain)` Notes ----- By default a digital filter is returned, meaning the zeros and poles are given in the Z-domain in units of radians/sample. Examples -------- To create a band-pass filter for 100-1000 Hz for 4096 Hz-sampled data: >>> from gwpy.signal.filter_design import bandpass >>> bp = bandpass(100, 1000, 4096) To view the filter, you can use the `~gwpy.plot.BodePlot`: >>> from gwpy.plot import BodePlot >>> plot = BodePlot(bp, sample_rate=4096) >>> plot.show()
2.361049
2.788706
0.846647
frequency = Quantity(frequency, 'Hz').value sample_rate = Quantity(sample_rate, 'Hz').value nyq = 0.5 * sample_rate df = 1.0 # pylint: disable=invalid-name df2 = 0.1 low1 = (frequency - df)/nyq high1 = (frequency + df)/nyq low2 = (frequency - df2)/nyq high2 = (frequency + df2)/nyq if type == 'iir': kwargs.setdefault('gpass', 1) kwargs.setdefault('gstop', 10) kwargs.setdefault('ftype', 'ellip') return signal.iirdesign([low1, high1], [low2, high2], output='zpk', **kwargs) else: raise NotImplementedError("Generating %r notch filters has not been " "implemented yet" % type)
def notch(frequency, sample_rate, type='iir', **kwargs)
Design a ZPK notch filter for the given frequency and sampling rate Parameters ---------- frequency : `float`, `~astropy.units.Quantity` frequency (default in Hertz) at which to apply the notch sample_rate : `float`, `~astropy.units.Quantity` number of samples per second for `TimeSeries` to which this notch filter will be applied type : `str`, optional, default: 'iir' type of filter to apply, currently only 'iir' is supported **kwargs other keyword arguments to pass to `scipy.signal.iirdesign` Returns ------- zpk : `tuple` of `complex` or `float` the filter components in digital zero-pole-gain format See Also -------- scipy.signal.iirdesign for details on the IIR filter design method Notes ----- By default a digital filter is returned, meaning the zeros and poles are given in the Z-domain in units of radians/sample. Examples -------- To create a low-pass filter at 1000 Hz for 4096 Hz-sampled data: >>> from gwpy.signal.filter_design import notch >>> n = notch(100, 4096) To view the filter, you can use the `~gwpy.plot.BodePlot`: >>> from gwpy.plot import BodePlot >>> plot = BodePlot(n, sample_rate=4096) >>> plot.show()
2.736363
3.373411
0.811156
zeros, poles, gains = zip(*zpks) return (numpy.concatenate(zeros), numpy.concatenate(poles), reduce(operator.mul, gains, 1))
def concatenate_zpks(*zpks)
Concatenate a list of zero-pole-gain (ZPK) filters Parameters ---------- *zpks one or more zero-pole-gain format, each one should be a 3-`tuple` containing an array of zeros, an array of poles, and a gain `float` Returns ------- zeros : `numpy.ndarray` the concatenated array of zeros poles : `numpy.ndarray` the concatenated array of poles gain : `float` the overall gain Examples -------- Create a lowpass and a highpass filter, and combine them: >>> from gwpy.signal.filter_design import ( ... highpass, lowpass, concatenate_zpks) >>> hp = highpass(100, 4096) >>> lp = lowpass(1000, 4096) >>> zpk = concatenate_zpks(hp, lp) Plot the filter: >>> from gwpy.plot import BodePlot >>> plot = BodePlot(zpk, sample_rate=4096) >>> plot.show()
3.439507
5.898059
0.583159
# calculate PSD freqs, psd_ = scipy.signal.welch( timeseries.value, noverlap=noverlap, fs=timeseries.sample_rate.decompose().value, nperseg=segmentlength, **kwargs ) # generate FrequencySeries and return unit = scale_timeseries_unit( timeseries.unit, kwargs.get('scaling', 'density'), ) return FrequencySeries( psd_, unit=unit, frequencies=freqs, name=timeseries.name, epoch=timeseries.epoch, channel=timeseries.channel, )
def welch(timeseries, segmentlength, noverlap=None, **kwargs)
Calculate a PSD of this `TimeSeries` using Welch's method.
3.745118
3.761041
0.995767
kwargs.pop('noverlap', None) return welch(timeseries, segmentlength, noverlap=0, **kwargs)
def bartlett(timeseries, segmentlength, **kwargs)
Calculate a PSD using Bartlett's method
4.887519
4.947351
0.987906
if scipy_version <= '1.1.9999': raise ValueError( "median average PSD estimation requires scipy >= 1.2.0", ) kwargs.setdefault('average', 'median') return welch(timeseries, segmentlength, **kwargs)
def median(timeseries, segmentlength, **kwargs)
Calculate a PSD using Welch's method with a median average
7.892239
6.297098
1.253314
stepsize = segmentlength - noverlap if noverlap: numsegs = 1 + int((timeseries.size - segmentlength) / float(noverlap)) else: numsegs = int(timeseries.size // segmentlength) tmpdata = numpy.ndarray((numsegs, int(segmentlength//2 + 1))) for i in range(numsegs): tmpdata[i, :] = welch( timeseries[i*stepsize:i*stepsize+segmentlength], segmentlength) std = tmpdata.std(axis=0) mean = tmpdata.mean(axis=0) return FrequencySeries(std/mean, unit='', copy=False, f0=0, epoch=timeseries.epoch, df=timeseries.sample_rate.value/segmentlength, channel=timeseries.channel, name='Rayleigh spectrum of %s' % timeseries.name)
def rayleigh(timeseries, segmentlength, noverlap=0)
Calculate a Rayleigh statistic spectrum Parameters ---------- timeseries : `~gwpy.timeseries.TimeSeries` input `TimeSeries` data. segmentlength : `int` number of samples in single average. noverlap : `int` number of samples to overlap between segments, defaults to 50%. Returns ------- spectrum : `~gwpy.frequencyseries.FrequencySeries` average power `FrequencySeries`
3.470109
3.490954
0.994029
# calculate CSD try: freqs, csd_ = scipy.signal.csd( timeseries.value, other.value, noverlap=noverlap, fs=timeseries.sample_rate.decompose().value, nperseg=segmentlength, **kwargs) except AttributeError as exc: exc.args = ('{}, scipy>=0.16 is required'.format(str(exc)),) raise # generate FrequencySeries and return unit = scale_timeseries_unit(timeseries.unit, kwargs.get('scaling', 'density')) return FrequencySeries( csd_, unit=unit, frequencies=freqs, name=str(timeseries.name)+'---'+str(other.name), epoch=timeseries.epoch, channel=timeseries.channel)
def csd(timeseries, other, segmentlength, noverlap=None, **kwargs)
Calculate the CSD of two `TimeSeries` using Welch's method Parameters ---------- timeseries : `~gwpy.timeseries.TimeSeries` time-series of data other : `~gwpy.timeseries.TimeSeries` time-series of data segmentlength : `int` number of samples in single average. noverlap : `int` number of samples to overlap between segments, defaults to 50%. **kwargs other keyword arguments are passed to :meth:`scipy.signal.csd` Returns ------- spectrum : `~gwpy.frequencyseries.FrequencySeries` average power `FrequencySeries` See also -------- scipy.signal.csd
4.275764
4.403127
0.971074
return units.Quantity(self.span[1] - self.span[0], self.xunit, dtype=float)
def duration(self)
Duration of this series in seconds :type: `~astropy.units.Quantity` scalar
10.121028
9.605877
1.053629
from .io.core import read as timeseries_reader return timeseries_reader(cls, source, *args, **kwargs)
def read(cls, source, *args, **kwargs)
Read data into a `TimeSeries` Arguments and keywords depend on the output format, see the online documentation for full details for each format, the parameters below are common to most formats. Parameters ---------- source : `str`, `list` Source of data, any of the following: - `str` path of single data file, - `str` path of LAL-format cache file, - `list` of paths. name : `str`, `~gwpy.detector.Channel` the name of the channel to read, or a `Channel` object. start : `~gwpy.time.LIGOTimeGPS`, `float`, `str`, optional GPS start time of required data, defaults to start of data found; any input parseable by `~gwpy.time.to_gps` is fine end : `~gwpy.time.LIGOTimeGPS`, `float`, `str`, optional GPS end time of required data, defaults to end of data found; any input parseable by `~gwpy.time.to_gps` is fine format : `str`, optional source format identifier. If not given, the format will be detected if possible. See below for list of acceptable formats. nproc : `int`, optional number of parallel processes to use, serial process by default. pad : `float`, optional value with which to fill gaps in the source data, by default gaps will result in a `ValueError`. Notes -----
6.433658
8.484526
0.758281
return cls.DictClass.fetch( [channel], start, end, host=host, port=port, verbose=verbose, connection=connection, verify=verify, pad=pad, scaled=scaled, allow_tape=allow_tape, type=type, dtype=dtype)[str(channel)]
def fetch(cls, channel, start, end, host=None, port=None, verbose=False, connection=None, verify=False, pad=None, allow_tape=None, scaled=None, type=None, dtype=None)
Fetch data from NDS Parameters ---------- channel : `str`, `~gwpy.detector.Channel` the data channel for which to query start : `~gwpy.time.LIGOTimeGPS`, `float`, `str` GPS start time of required data, any input parseable by `~gwpy.time.to_gps` is fine end : `~gwpy.time.LIGOTimeGPS`, `float`, `str` GPS end time of required data, any input parseable by `~gwpy.time.to_gps` is fine host : `str`, optional URL of NDS server to use, if blank will try any server (in a relatively sensible order) to get the data port : `int`, optional port number for NDS server query, must be given with `host` verify : `bool`, optional, default: `False` check channels exist in database before asking for data scaled : `bool`, optional apply slope and bias calibration to ADC data, for non-ADC data this option has no effect connection : `nds2.connection`, optional open NDS connection to use verbose : `bool`, optional print verbose output about NDS progress, useful for debugging; if ``verbose`` is specified as a string, this defines the prefix for the progress meter type : `int`, optional NDS2 channel type integer dtype : `type`, `numpy.dtype`, `str`, optional identifier for desired output data type
2.936253
4.127588
0.711373
return cls.DictClass.find( [channel], start, end, frametype=frametype, verbose=verbose, pad=pad, scaled=scaled, dtype=dtype, nproc=nproc, **readargs )[str(channel)]
def find(cls, channel, start, end, frametype=None, pad=None, scaled=None, dtype=None, nproc=1, verbose=False, **readargs)
Find and read data from frames for a channel Parameters ---------- channel : `str`, `~gwpy.detector.Channel` the name of the channel to read, or a `Channel` object. start : `~gwpy.time.LIGOTimeGPS`, `float`, `str` GPS start time of required data, any input parseable by `~gwpy.time.to_gps` is fine end : `~gwpy.time.LIGOTimeGPS`, `float`, `str` GPS end time of required data, any input parseable by `~gwpy.time.to_gps` is fine frametype : `str`, optional name of frametype in which this channel is stored, will search for containing frame types if necessary pad : `float`, optional value with which to fill gaps in the source data, by default gaps will result in a `ValueError`. scaled : `bool`, optional apply slope and bias calibration to ADC data, for non-ADC data this option has no effect. nproc : `int`, optional, default: `1` number of parallel processes to use, serial process by default. dtype : `numpy.dtype`, `str`, `type`, or `dict` numeric data type for returned data, e.g. `numpy.float`, or `dict` of (`channel`, `dtype`) pairs allow_tape : `bool`, optional, default: `True` allow reading from frame files on (slow) magnetic tape verbose : `bool`, optional print verbose output about read progress, if ``verbose`` is specified as a string, this defines the prefix for the progress meter **readargs any other keyword arguments to be passed to `.read()`
3.408367
4.886894
0.69745
return cls.DictClass.get( [channel], start, end, pad=pad, scaled=scaled, dtype=dtype, verbose=verbose, allow_tape=allow_tape, **kwargs)[str(channel)]
def get(cls, channel, start, end, pad=None, scaled=None, dtype=None, verbose=False, allow_tape=None, **kwargs)
Get data for this channel from frames or NDS This method dynamically accesses either frames on disk, or a remote NDS2 server to find and return data for the given interval Parameters ---------- channel : `str`, `~gwpy.detector.Channel` the name of the channel to read, or a `Channel` object. start : `~gwpy.time.LIGOTimeGPS`, `float`, `str` GPS start time of required data, any input parseable by `~gwpy.time.to_gps` is fine end : `~gwpy.time.LIGOTimeGPS`, `float`, `str` GPS end time of required data, any input parseable by `~gwpy.time.to_gps` is fine pad : `float`, optional value with which to fill gaps in the source data, by default gaps will result in a `ValueError`. scaled : `bool`, optional apply slope and bias calibration to ADC data, for non-ADC data this option has no effect dtype : `numpy.dtype`, `str`, `type`, or `dict` numeric data type for returned data, e.g. `numpy.float`, or `dict` of (`channel`, `dtype`) pairs nproc : `int`, optional, default: `1` number of parallel processes to use, serial process by default. allow_tape : `bool`, optional, default: `None` allow the use of frames that are held on tape, default is `None` to attempt to allow the `TimeSeries.fetch` method to intelligently select a server that doesn't use tapes for data storage (doesn't always work), but to eventually allow retrieving data from tape if required verbose : `bool`, optional print verbose output about data access progress, if ``verbose`` is specified as a string, this defines the prefix for the progress meter **kwargs other keyword arguments to pass to either :meth:`.find` (for direct GWF file access) or :meth:`.fetch` for remote NDS2 access See Also -------- TimeSeries.fetch for grabbing data from a remote NDS2 server TimeSeries.find for discovering and reading data from local GWF files
3.683791
5.750955
0.640553
kwargs.update(figsize=figsize, xscale=xscale) return super(TimeSeriesBase, self).plot(method=method, **kwargs)
def plot(self, method='plot', figsize=(12, 4), xscale='auto-gps', **kwargs)
Plot the data for this timeseries Returns ------- figure : `~matplotlib.figure.Figure` the newly created figure, with populated Axes. See Also -------- matplotlib.pyplot.figure for documentation of keyword arguments used to create the figure matplotlib.figure.Figure.add_subplot for documentation of keyword arguments used to create the axes matplotlib.axes.Axes.plot for documentation of keyword arguments used in rendering the data
3.381173
5.855945
0.577391
# get Channel from buffer channel = Channel.from_nds2(buffer_.channel) # set default metadata metadata.setdefault('channel', channel) metadata.setdefault('epoch', LIGOTimeGPS(buffer_.gps_seconds, buffer_.gps_nanoseconds)) metadata.setdefault('sample_rate', channel.sample_rate) metadata.setdefault('unit', channel.unit) metadata.setdefault('name', buffer_.name) # unwrap data scaled = _dynamic_scaled(scaled, channel.name) slope = buffer_.signal_slope offset = buffer_.signal_offset null_scaling = slope == 1. and offset == 0. if scaled and not null_scaling: data = buffer_.data.copy() * slope + offset copy = False else: data = buffer_.data # construct new TimeSeries-like object return cls(data, copy=copy, **metadata)
def from_nds2_buffer(cls, buffer_, scaled=None, copy=True, **metadata)
Construct a new series from an `nds2.buffer` object **Requires:** |nds2|_ Parameters ---------- buffer_ : `nds2.buffer` the input NDS2-client buffer to read scaled : `bool`, optional apply slope and bias calibration to ADC data, for non-ADC data this option has no effect copy : `bool`, optional if `True`, copy the contained data array to new to a new array **metadata any other metadata keyword arguments to pass to the `TimeSeries` constructor Returns ------- timeseries : `TimeSeries` a new `TimeSeries` containing the data from the `nds2.buffer`, and the appropriate metadata
4.67448
4.597291
1.01679
from ..utils.lal import from_lal_unit try: unit = from_lal_unit(lalts.sampleUnits) except (TypeError, ValueError) as exc: warnings.warn("%s, defaulting to 'dimensionless'" % str(exc)) unit = None channel = Channel(lalts.name, sample_rate=1/lalts.deltaT, unit=unit, dtype=lalts.data.data.dtype) out = cls(lalts.data.data, channel=channel, t0=lalts.epoch, dt=lalts.deltaT, unit=unit, name=lalts.name, copy=False) if copy: return out.copy() return out
def from_lal(cls, lalts, copy=True)
Generate a new TimeSeries from a LAL TimeSeries of any type.
3.291742
3.256352
1.010868
import lal from ..utils.lal import (find_typed_function, to_lal_unit) # map unit try: unit = to_lal_unit(self.unit) except ValueError as e: warnings.warn("%s, defaulting to lal.DimensionlessUnit" % str(e)) unit = lal.DimensionlessUnit # create TimeSeries create = find_typed_function(self.dtype, 'Create', 'TimeSeries') lalts = create(self.name, lal.LIGOTimeGPS(self.epoch.gps), 0, self.dt.value, unit, self.shape[0]) lalts.data.data = self.value return lalts
def to_lal(self)
Convert this `TimeSeries` into a LAL TimeSeries.
4.572064
4.25942
1.073401
return cls(pycbcseries.data, t0=pycbcseries.start_time, dt=pycbcseries.delta_t, copy=copy)
def from_pycbc(cls, pycbcseries, copy=True)
Convert a `pycbc.types.timeseries.TimeSeries` into a `TimeSeries` Parameters ---------- pycbcseries : `pycbc.types.timeseries.TimeSeries` the input PyCBC `~pycbc.types.timeseries.TimeSeries` array copy : `bool`, optional, default: `True` if `True`, copy these data to a new array Returns ------- timeseries : `TimeSeries` a GWpy version of the input timeseries
3.727287
5.824279
0.639957
from pycbc import types return types.TimeSeries(self.value, delta_t=self.dt.to('s').value, epoch=self.epoch.gps, copy=copy)
def to_pycbc(self, copy=True)
Convert this `TimeSeries` into a PyCBC `~pycbc.types.timeseries.TimeSeries` Parameters ---------- copy : `bool`, optional, default: `True` if `True`, copy these data to a new array Returns ------- timeseries : `~pycbc.types.timeseries.TimeSeries` a PyCBC representation of this `TimeSeries`
7.882447
8.773445
0.898444
self.sort(key=lambda ts: ts.t0.value) i = j = 0 N = len(self) while j < N: this = self[j] j += 1 if j < N and this.is_contiguous(self[j]) == 1: while j < N and this.is_contiguous(self[j]): try: this = self[i] = this.append(self[j]) except ValueError as exc: if 'cannot resize this array' in str(exc): this = this.copy() this = self[i] = this.append(self[j]) else: raise j += 1 else: self[i] = this i += 1 del self[i:] return self
def coalesce(self)
Merge contiguous elements of this list into single objects This method implicitly sorts and potentially shortens this list.
3.282956
2.966443
1.106698
if not self: return self.EntryClass(numpy.empty((0,) * self.EntryClass._ndim)) self.sort(key=lambda t: t.epoch.gps) out = self[0].copy() for series in self[1:]: out.append(series, gap=gap, pad=pad) return out
def join(self, pad=None, gap=None)
Concatenate all of the elements of this list into a single object Parameters ---------- pad : `float`, optional, default: `0.0` value with which to pad gaps gap : `str`, optional, default: `'raise'` what to do if there are gaps in the data, one of - ``'raise'`` - raise a `ValueError` - ``'ignore'`` - remove gap and join data - ``'pad'`` - pad gap with zeros If `pad` is given and is not `None`, the default is ``'pad'``, otherwise ``'raise'``. Returns ------- series : `gwpy.types.TimeSeriesBase` subclass a single series containing all data from each entry in this list See Also -------- TimeSeries.append for details on how the individual series are concatenated together
6.185891
5.366964
1.152587
out = type(self)() for series in self: out.append(series.copy()) return out
def copy(self)
Return a copy of this list with each element copied to new memory
4.863253
5.073966
0.958472
# noop if pytype in LAL_TYPE_FROM_STR: return pytype # convert type code if pytype in LAL_TYPE_STR: return LAL_TYPE_STR[pytype] # convert python type try: dtype = numpy.dtype(pytype) return LAL_TYPE_STR_FROM_NUMPY[dtype.type] except (TypeError, KeyError): raise ValueError("Failed to map {!r} to LAL type string")
def to_lal_type_str(pytype)
Convert the input python type to a LAL type string Examples -------- To convert a python type: >>> from gwpy.utils.lal import to_lal_type_str >>> to_lal_type_str(float) 'REAL8' To convert a `numpy.dtype`: >>> import numpy >>> to_lal_type_str(numpy.dtype('uint32')) 'UINT4' To convert a LAL type code: >>> to_lal_type_str(11) 'REAL8' Raises ------ KeyError if the input doesn't map to a LAL type string
3.876065
4.707574
0.823368
laltype = to_lal_type_str(pytype) return getattr(module, '{0}{1}{2}'.format(prefix, laltype, suffix))
def find_typed_function(pytype, prefix, suffix, module=lal)
Returns the lal method for the correct type Parameters ---------- pytype : `type`, `numpy.dtype` the python type, or dtype, to map prefix : `str` the function name prefix (before the type tag) suffix : `str` the function name suffix (after the type tag) Raises ------ AttributeError if the function is not found Examples -------- >>> from gwpy.utils.lal import find_typed_function >>> find_typed_function(float, 'Create', 'Sequence') <built-in function CreateREAL8Sequence>
3.473382
5.507236
0.630694
if isinstance(aunit, string_types): aunit = units.Unit(aunit) aunit = aunit.decompose() lunit = lal.Unit() for base, power in zip(aunit.bases, aunit.powers): # try this base try: lalbase = LAL_UNIT_FROM_ASTROPY[base] except KeyError: lalbase = None # otherwise loop through the equivalent bases for eqbase in base.find_equivalent_units(): try: lalbase = LAL_UNIT_FROM_ASTROPY[eqbase] except KeyError: continue # if we didn't find anything, raise an exception if lalbase is None: raise ValueError("LAL has no unit corresponding to %r" % base) lunit *= lalbase ** power return lunit
def to_lal_unit(aunit)
Convert the input unit into a `LALUnit` For example:: >>> u = to_lal_unit('m**2 / kg ** 4') >>> print(u) m^2 kg^-4 Parameters ---------- aunit : `~astropy.units.Unit`, `str` the input unit Returns ------- unit : `LALUnit` the LALUnit representation of the input Raises ------ ValueError if LAL doesn't understand the base units for the input
3.159775
3.335467
0.947326
return reduce(operator.mul, ( units.Unit(str(LAL_UNIT_INDEX[i])) ** exp for i, exp in enumerate(lunit.unitNumerator)))
def from_lal_unit(lunit)
Convert a LALUnit` into a `~astropy.units.Unit` Parameters ---------- lunit : `lal.Unit` the input unit Returns ------- unit : `~astropy.units.Unit` the Astropy representation of the input Raises ------ TypeError if ``lunit`` cannot be converted to `lal.Unit` ValueError if Astropy doesn't understand the base units for the input
12.421934
15.761868
0.7881
gps = to_gps(gps) return lal.LIGOTimeGPS(gps.gpsSeconds, gps.gpsNanoSeconds)
def to_lal_ligotimegps(gps)
Convert the given GPS time to a `lal.LIGOTimeGPS` object Parameters ---------- gps : `~gwpy.time.LIGOTimeGPS`, `float`, `str` input GPS time, can be anything parsable by :meth:`~gwpy.time.to_gps` Returns ------- ligotimegps : `lal.LIGOTimeGPS` a SWIG-LAL `~lal.LIGOTimeGPS` representation of the given GPS time
4.579853
6.549998
0.699214
from ligo.lw.lsctables import gpsproperty as GpsProperty # get properties for row object rowvars = vars(tabletype.RowType) # build list of real column names for fancy properties extracols = {} for key in columns: prop = rowvars[key] if isinstance(prop, GpsProperty): extracols[key] = (prop.s_name, prop.ns_name) return extracols
def _get_property_columns(tabletype, columns)
Returns list of GPS columns required to read gpsproperties for a table Examples -------- >>> _get_property_columns(lsctables.SnglBurstTable, ['peak']) ['peak_time', 'peak_time_ns']
11.538714
12.333529
0.935557
# set default keywords if rename is None: rename = {} if columns is None: columns = llwtable.columnnames # extract columns from LIGO_LW table as astropy.table.Column data = [] for colname in columns: arr = _get_column(llwtable, colname) # transform to astropy.table.Column copythis = isinstance(arr, numpy.ndarray) data.append(to_astropy_column(arr, apytable.Column, copy=copythis, use_numpy_dtype=use_numpy_dtypes, name=rename.get(colname, colname))) # build table and return return apytable(data, copy=False, meta={'tablename': str(llwtable.Name)})
def to_astropy_table(llwtable, apytable, copy=False, columns=None, use_numpy_dtypes=False, rename=None)
Convert a :class:`~ligo.lw.table.Table` to an `~astropy.tableTable` This method is designed as an internal method to be attached to :class:`~ligo.lw.table.Table` objects as `__astropy_table__`. Parameters ---------- llwtable : :class:`~ligo.lw.table.Table` the LIGO_LW table to convert from apytable : `type` `astropy.table.Table` class or subclass copy : `bool`, optional if `True` copy the input data, otherwise return a reference, default: `False` columns : `list` of `str`, optional the columns to populate, if not given, all columns present in the table are mapped use_map_dtypes : `bool`, optional force column `dtypes rename : `dict`, optional dict of ('old name', 'new name') pairs to rename columns from the original LIGO_LW table Returns ------- table : `EventTable` a view of the original data
3.870007
3.836527
1.008727
if dtype is None: # try and find dtype dtype = _get_column_dtype(llwcol) if use_numpy_dtype and numpy.dtype(dtype).type is numpy.object_: # dtype maps to 'object' in numpy, try and resolve real numpy type try: dtype = NUMPY_TYPE_MAP[dtype] except KeyError: # try subclass matches (mainly for ilwdchar) for key in NUMPY_TYPE_MAP: if issubclass(dtype, key): dtype = NUMPY_TYPE_MAP[key] break else: # no subclass matches, raise raise TypeError("no mapping from object type %r to numpy " "type" % dtype) try: return cls(data=llwcol, copy=copy, dtype=dtype, **kwargs) except TypeError: # numpy tries to cast ilwdchar to int via long, which breaks if dtype is numpy.int_ and isinstance(llwcol[0], ilwdchar_types): return cls(data=map(dtype, llwcol), copy=False, dtype=dtype, **kwargs) # any other error, raise raise
def to_astropy_column(llwcol, cls, copy=False, dtype=None, use_numpy_dtype=False, **kwargs)
Convert a :class:`~ligo.lw.table.Column` to `astropy.table.Column` Parameters ----------- llwcol : :class:`~ligo.lw.table.Column`, `numpy.ndarray`, iterable the LIGO_LW column to convert, or an iterable cls : `~astropy.table.Column` the Astropy `~astropy.table.Column` or subclass to convert to copy : `bool`, optional if `True` copy the input data, otherwise return a reference, default: `False` dtype : `type`, optional the data type to convert to when creating the `~astropy.table.Column` use_numpy_dtype : `bool`, optional convert object type to numpy dtype, default: `False`, only used with ``dtype=None`` **kwargs other keyword arguments are passed to the `~astropy.table.Column` creator Returns ------- column : `~astropy.table.Column` an Astropy version of the given LIGO_LW column
4.409325
4.685298
0.941098
try: # maybe its a numpy array already! dtype = llwcol.dtype if dtype is numpy.dtype('O'): # don't convert raise AttributeError return dtype except AttributeError: # dang try: # ligo.lw.table.Column llwtype = llwcol.parentNode.validcolumns[llwcol.Name] except AttributeError: # not a column try: return type(llwcol[0]) except IndexError: return None else: # map column type str to python type from ligo.lw.types import (ToPyType, ToNumPyType) try: return ToNumPyType[llwtype] except KeyError: return ToPyType[llwtype]
def _get_column_dtype(llwcol)
Get the data type of a LIGO_LW `Column` Parameters ---------- llwcol : :class:`~ligo.lw.table.Column`, `numpy.ndarray`, iterable a LIGO_LW column, a numpy array, or an iterable Returns ------- dtype : `type`, None the object data type for values in the given column, `None` is returned if ``llwcol`` is a `numpy.ndarray` with `numpy.object_` dtype, or no data type can be parsed (e.g. empty list)
6.007092
5.319817
1.129192
from ligo.lw import lsctables # create new LIGO_LW table columns = table.columns.keys() cls = lsctables.TableByName[tablename] llwcolumns = list(columns) for col, llwcols in _get_property_columns(cls, columns).items(): idx = llwcolumns.index(col) llwcolumns.pop(idx) for name in llwcols[::-1]: llwcolumns.insert(idx, name) llwtable = lsctables.New(cls, columns=llwcolumns) # map rows across for row in table: llwrow = llwtable.RowType() for name in columns: setattr(llwrow, name, to_ligolw_table_type(row[name], llwtable, name)) llwtable.append(llwrow) return llwtable
def table_to_ligolw(table, tablename)
Convert a `astropy.table.Table` to a :class:`ligo.lw.table.Table`
4.528176
4.407097
1.027474
from ligo.lw import table as ligolw_table from ligo.lw.lsctables import TableByName # -- keyword handling ----------------------- # separate keywords for reading and converting from LIGO_LW to Astropy read_kw = kwargs # rename for readability convert_kw = { 'rename': None, 'use_numpy_dtypes': False, } for key in filter(kwargs.__contains__, convert_kw): convert_kw[key] = kwargs.pop(key) if convert_kw['rename'] is None: convert_kw['rename'] = {} # allow user to specify LIGO_LW columns to read to provide the # desired output columns try: columns = list(kwargs.pop('columns')) except KeyError: columns = None try: read_kw['columns'] = list(kwargs.pop('ligolw_columns')) except KeyError: read_kw['columns'] = columns convert_kw['columns'] = columns or read_kw['columns'] if tablename: tableclass = TableByName[ligolw_table.Table.TableName(tablename)] # work out if fancy property columns are required # means 'peak_time' and 'peak_time_ns' will get read if 'peak' # is requested if convert_kw['columns'] is not None: readcols = set(read_kw['columns']) propcols = _get_property_columns(tableclass, convert_kw['columns']) for col in propcols: try: readcols.remove(col) except KeyError: continue readcols.update(propcols[col]) read_kw['columns'] = list(readcols) # -- read ----------------------------------- return Table(read_ligolw_table(source, tablename=tablename, **read_kw), **convert_kw)
def read_table(source, tablename=None, **kwargs)
Read a `Table` from one or more LIGO_LW XML documents source : `file`, `str`, :class:`~ligo.lw.ligolw.Document`, `list` one or more open files, file paths, or LIGO_LW `Document` objects tablename : `str`, optional the `Name` of the relevant `Table` to read, if not given a table will be returned if only one exists in the document(s) **kwargs keyword arguments for the read, or conversion functions See Also -------- gwpy.io.ligolw.read_table for details of keyword arguments for the read operation gwpy.table.io.ligolw.to_astropy_table for details of keyword arguments for the conversion operation
4.837564
4.379889
1.104495
if tablename is None: # try and get tablename from metadata tablename = table.meta.get('tablename', None) if tablename is None: # panic raise ValueError("please pass ``tablename=`` to specify the target " "LIGO_LW Table Name") try: llwtable = table_to_ligolw( table, tablename, ilwdchar_compat=ilwdchar_compat or False, ) except LigolwElementError as exc: if ilwdchar_compat is not None: raise try: llwtable = table_to_ligolw(table, tablename, ilwdchar_compat=True) except Exception: raise exc return write_ligolw_tables(target, [llwtable], **kwargs)
def write_table(table, target, tablename=None, ilwdchar_compat=None, **kwargs)
Write a `~astropy.table.Table` to file in LIGO_LW XML format This method will attempt to write in the new `ligo.lw` format (if ``ilwdchar_compat`` is ``None`` or ``False``), but will fall back to the older `glue.ligolw` (in that order) if that fails (if ``ilwdchar_compat`` is ``None`` or ``True``).
4.115284
3.529426
1.165993
xarr, yarr = loadtxt(input_, unpack=unpack, **kwargs) return array_type(yarr, xindex=xarr)
def read_ascii_series(input_, array_type=Series, unpack=True, **kwargs)
Read a `Series` from an ASCII file Parameters ---------- input : `str`, `file` file to read array_type : `type` desired return type
4.797181
8.383334
0.572228
xarr = series.xindex.value yarr = series.value return savetxt(output, column_stack((xarr, yarr)), **kwargs)
def write_ascii_series(series, output, **kwargs)
Write a `Series` to a file in ASCII format Parameters ---------- series : :class:`~gwpy.data.Series` data series to write output : `str`, `file` file to write to See also -------- numpy.savetxt for documentation of keyword arguments
6.328854
11.368656
0.556693
def _read(filepath, **kwargs): kwgs = defaults.copy() kwgs.update(kwargs) return read_ascii_series(filepath, array_type=array_type, **kwgs) def _write(series, output, **kwargs): kwgs = defaults.copy() kwgs.update(kwargs) return write_ascii_series(series, output, **kwgs) io_registry.register_reader(format, array_type, _read) io_registry.register_writer(format, array_type, _write) if identify: io_registry.register_identifier(format, array_type, identify_factory(format))
def register_ascii_series_io(array_type, format='txt', identify=True, **defaults)
Register ASCII read/write/identify methods for the given array
2.089514
2.159263
0.967698
if types is not None and isinstance(value, tuple(types)): out = dict((c, value) for c in channels) elif isinstance(value, (tuple, list)): out = dict(zip(channels, value)) elif value is None: out = dict() elif isinstance(value, dict): out = value.copy() else: return None if astype is not None: return dict((key, astype(out[key])) for key in out) return out
def channel_dict_kwarg(value, channels, types=None, astype=None)
Format the given kwarg value in a dict with one value per channel Parameters ---------- value : any type keyword argument value as given by user channels : `list` list of channels being read types : `list` of `type` list of valid object types for value astype : `type` output type for `dict` values Returns ------- dict : `dict` `dict` of values, one value per channel key, if parsing is successful None : `None` `None`, if parsing was unsuccessful
2.214252
2.444598
0.905773
# import the frame library here to have any ImportErrors occur early try: return importlib.import_module('.%s' % library, package=package) except ImportError as exc: exc.args = ('Cannot import %s frame API: %s' % (library, str(exc)),) raise
def import_gwf_library(library, package=__package__)
Utility method to import the relevant timeseries.io.gwf frame API This is just a wrapper around :meth:`importlib.import_module` with a slightly nicer error message
5.792087
5.388669
1.074864
for lib in APIS: try: import_gwf_library(lib) except ImportError: continue else: return lib raise ImportError("no GWF API available, please install a third-party GWF " "library ({}) and try again".format(', '.join(APIS)))
def get_default_gwf_api()
Return the preferred GWF library Examples -------- If you have |LDAStools.frameCPP|_ installed: >>> from gwpy.timeseries.io.gwf import get_default_gwf_api >>> get_default_gwf_api() 'framecpp' Or, if you don't have |lalframe|_: >>> get_default_gwf_api() 'lalframe' Otherwise: >>> get_default_gwf_api() ImportError: no GWF API available, please install a third-party GWF library (framecpp, lalframe) and try again
5.561563
3.260001
1.706001
def read_(*args, **kwargs): fmt = 'gwf.{}'.format(get_default_gwf_api()) reader = get_reader(fmt, container) return reader(*args, **kwargs) def write_(*args, **kwargs): fmt = 'gwf.{}'.format(get_default_gwf_api()) writer = get_writer(fmt, container) return writer(*args, **kwargs) register_identifier('gwf', container, identify_gwf) register_reader('gwf', container, read_) register_writer('gwf', container, write_)
def register_gwf_format(container)
Register I/O methods for `format='gwf'` The created methods loop through the registered sub-formats. Parameters ---------- container : `Series`, `dict` series class or series dict class to register
2.603933
2.945639
0.883996
if kwargs.pop('verbose', False) is True: gprint(*args, **kwargs)
def print_verbose(*args, **kwargs)
Utility to print something only if verbose=True is given
6.118538
4.582882
1.335085
value = str(value) try: if not connection.set_parameter(parameter, value): raise ValueError("invalid parameter or value") except (AttributeError, ValueError) as exc: warnings.warn( 'failed to set {}={!r}: {}'.format(parameter, value, str(exc)), io_nds2.NDSWarning) else: print_verbose( ' [{}] set {}={!r}'.format( connection.get_host(), parameter, value), verbose=verbose, )
def set_parameter(connection, parameter, value, verbose=False)
Set a parameter for the connection, handling errors as warnings
4.642523
4.556242
1.018937
span = ts.span pada = max(int((span[0] - start) * ts.sample_rate.value), 0) padb = max(int((end - span[1]) * ts.sample_rate.value), 0) if pada or padb: return ts.pad((pada, padb), mode='constant', constant_values=(pad,)) return ts
def _pad_series(ts, pad, start, end)
Pad a timeseries to match the specified [start, end) limits To cover a gap in data returned from NDS
4.027621
4.108647
0.980279
channel = Channel.from_nds2(ndschan) nsamp = int((end - start) * channel.sample_rate.value) return series_class(numpy_ones(nsamp) * value, t0=start, sample_rate=channel.sample_rate, unit=channel.unit, channel=channel)
def _create_series(ndschan, value, start, end, series_class=TimeSeries)
Create a timeseries to cover the specified [start, end) limits To cover a gap in data returned from NDS
5.304218
5.927905
0.894788
allsegs = io_nds2.get_availability(channels, start, end, connection=connection) return allsegs.intersection(allsegs.keys())
def _get_data_segments(channels, start, end, connection)
Get available data segments for the given channels
12.878062
11.532697
1.116657
gitdir = '.git' return os.path.isdir(gitdir) and ( os.path.isdir(os.path.join(gitdir, 'objects')) and os.path.isdir(os.path.join(gitdir, 'refs')) and os.path.exists(os.path.join(gitdir, 'HEAD')) )
def in_git_clone()
Returns `True` if the current directory is a git repository Logic is 'borrowed' from :func:`git.repo.fun.is_git_dir`
1.896981
1.903202
0.996731