_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q31400
pvector_field
train
def pvector_field(item_type, optional=False, initial=()): """ Create checked ``PVector`` field. :param item_type: The required type for the items in the vector. :param optional: If true, ``None`` can be used as a value for this field. :param initial: Initial value to pass to factory if no value is given for the field. :return: A ``field`` containing a ``CheckedPVector`` of the given type. """ return _sequence_field(CheckedPVector, item_type, optional, initial)
python
{ "resource": "" }
q31401
_restore_pmap_field_pickle
train
def _restore_pmap_field_pickle(key_type, value_type, data): """Unpickling function for auto-generated PMap field types.""" type_ = _pmap_field_types[key_type, value_type] return _restore_pickle(type_, data)
python
{ "resource": "" }
q31402
_make_pmap_field_type
train
def _make_pmap_field_type(key_type, value_type): """Create a subclass of CheckedPMap with the given key and value types.""" type_ = _pmap_field_types.get((key_type, value_type)) if type_ is not None: return type_ class TheMap(CheckedPMap): __key_type__ = key_type __value_type__ = value_type def __reduce__(self): return (_restore_pmap_field_pickle, (self.__key_type__, self.__value_type__, dict(self))) TheMap.__name__ = "{0}To{1}PMap".format( _types_to_names(TheMap._checked_key_types), _types_to_names(TheMap._checked_value_types)) _pmap_field_types[key_type, value_type] = TheMap return TheMap
python
{ "resource": "" }
q31403
pmap_field
train
def pmap_field(key_type, value_type, optional=False, invariant=PFIELD_NO_INVARIANT): """ Create a checked ``PMap`` field. :param key: The required type for the keys of the map. :param value: The required type for the values of the map. :param optional: If true, ``None`` can be used as a value for this field. :param invariant: Pass-through to ``field``. :return: A ``field`` containing a ``CheckedPMap``. """ TheMap = _make_pmap_field_type(key_type, value_type) if optional: def factory(argument): if argument is None: return None else: return TheMap.create(argument) else: factory = TheMap.create return field(mandatory=True, initial=TheMap(), type=optional_type(TheMap) if optional else TheMap, factory=factory, invariant=invariant)
python
{ "resource": "" }
q31404
PyKeyboardMeta.tap_key
train
def tap_key(self, character='', n=1, interval=0): """Press and release a given character key n times.""" for i in range(n): self.press_key(character) self.release_key(character) time.sleep(interval)
python
{ "resource": "" }
q31405
PyKeyboardMeta.type_string
train
def type_string(self, char_string, interval=0): """ A convenience method for typing longer strings of characters. Generates as few Shift events as possible.""" shift = False for char in char_string: if self.is_char_shifted(char): if not shift: # Only press Shift as needed time.sleep(interval) self.press_key(self.shift_key) shift = True #In order to avoid tap_key pressing Shift, we need to pass the #unshifted form of the character if char in '<>?:"{}|~!@#$%^&*()_+': ch_index = '<>?:"{}|~!@#$%^&*()_+'.index(char) unshifted_char = ",./;'[]\\`1234567890-="[ch_index] else: unshifted_char = char.lower() time.sleep(interval) self.tap_key(unshifted_char) else: # Unshifted already if shift and char != ' ': # Only release Shift as needed self.release_key(self.shift_key) shift = False time.sleep(interval) self.tap_key(char) if shift: # Turn off Shift if it's still ON self.release_key(self.shift_key)
python
{ "resource": "" }
q31406
keysym_definitions
train
def keysym_definitions(): """Yields all keysym definitions parsed as tuples. """ for keysym_line in keysym_lines(): # As described in the input text, the format of a line is: # 0x20 U0020 . # space /* optional comment */ keysym_number, codepoint, status, _, name_part = [ p.strip() for p in keysym_line.split(None, 4)] name = name_part.split()[0] yield (int(keysym_number, 16), codepoint[1:], status, name)
python
{ "resource": "" }
q31407
PyKeyboard.release_key
train
def release_key(self, character=''): """ Release a given character key. """ try: shifted = self.is_char_shifted(character) except AttributeError: win32api.keybd_event(character, 0, KEYEVENTF_KEYUP, 0) else: if shifted: win32api.keybd_event(self.shift_key, 0, KEYEVENTF_KEYUP, 0) char_vk = win32api.VkKeyScan(character) win32api.keybd_event(char_vk, 0, KEYEVENTF_KEYUP, 0)
python
{ "resource": "" }
q31408
PyKeyboard._handle_key
train
def _handle_key(self, character, event): """Handles either a key press or release, depending on ``event``. :param character: The key to handle. See :meth:`press_key` and :meth:`release_key` for information about this parameter. :param event: The *Xlib* event. This should be either :attr:`Xlib.X.KeyPress` or :attr:`Xlib.X.KeyRelease` """ try: # Detect uppercase or shifted character shifted = self.is_char_shifted(character) except AttributeError: # Handle the case of integer keycode argument with display_manager(self.display) as d: fake_input(d, event, character) else: with display_manager(self.display) as d: if shifted: fake_input(d, event, self.shift_key) keycode = self.lookup_character_keycode(character) fake_input(d, event, keycode)
python
{ "resource": "" }
q31409
PyKeyboardEvent.stop
train
def stop(self): """Stop listening for keyboard input events.""" self.state = False with display_manager(self.display) as d: d.record_disable_context(self.ctx) d.ungrab_keyboard(X.CurrentTime) with display_manager(self.display2): d.record_disable_context(self.ctx) d.ungrab_keyboard(X.CurrentTime)
python
{ "resource": "" }
q31410
PyKeyboardEvent.lookup_char_from_keycode
train
def lookup_char_from_keycode(self, keycode): """ This will conduct a lookup of the character or string associated with a given keycode. """ #TODO: Logic should be strictly adapted from X11's src/KeyBind.c #Right now the logic is based off of #http://tronche.com/gui/x/xlib/input/keyboard-encoding.html #Which I suspect is not the whole story and may likely cause bugs keysym_index = 0 #TODO: Display's Keysyms per keycode count? Do I need this? #If the Num_Lock is on, and the keycode corresponds to the keypad if self.modifiers['Num_Lock'] and keycode in self.keypad_keycodes: if self.modifiers['Shift'] or self.modifiers['Shift_Lock']: keysym_index = 0 else: keysym_index = 1 elif not self.modifiers['Shift'] and self.modifiers['Caps_Lock']: #Use the first keysym if uppercase or uncased #Use the uppercase keysym if the first is lowercase (second) keysym_index = 0 keysym = self.display.keycode_to_keysym(keycode, keysym_index) #TODO: Support Unicode, Greek, and special latin characters if keysym & 0x7f == keysym and chr(keysym) in 'abcdefghijklmnopqrstuvwxyz': keysym_index = 1 elif self.modifiers['Shift'] and self.modifiers['Caps_Lock']: keysym_index = 1 keysym = self.display.keycode_to_keysym(keycode, keysym_index) #TODO: Support Unicode, Greek, and special latin characters if keysym & 0x7f == keysym and chr(keysym) in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ': keysym_index = 0 elif self.modifiers['Shift'] or self.modifiers['Shift_Lock']: keysym_index = 1 if self.modifiers['Mode_switch']: keysym_index += 2 #Finally! Get the keysym keysym = self.display.keycode_to_keysym(keycode, keysym_index) #If the character is ascii printable, return that character if keysym & 0x7f == keysym and self.ascii_printable(keysym): return chr(keysym) #If the character was not printable, look for its name try: char = self.keysym_to_string[keysym] except KeyError: print('Unable to determine character.') print('Keycode: {0} KeySym {1}'.format(keycode, keysym)) return None else: return char
python
{ "resource": "" }
q31411
PyKeyboardEvent.get_translation_dicts
train
def get_translation_dicts(self): """ Returns dictionaries for the translation of keysyms to strings and from strings to keysyms. """ keysym_to_string_dict = {} string_to_keysym_dict = {} #XK loads latin1 and miscellany on its own; load latin2-4 and greek Xlib.XK.load_keysym_group('latin2') Xlib.XK.load_keysym_group('latin3') Xlib.XK.load_keysym_group('latin4') Xlib.XK.load_keysym_group('greek') #Make a standard dict and the inverted dict for string, keysym in Xlib.XK.__dict__.items(): if string.startswith('XK_'): string_to_keysym_dict[string[3:]] = keysym keysym_to_string_dict[keysym] = string[3:] return keysym_to_string_dict, string_to_keysym_dict
python
{ "resource": "" }
q31412
PyKeyboardEvent.ascii_printable
train
def ascii_printable(self, keysym): """ If the keysym corresponds to a non-printable ascii character this will return False. If it is printable, then True will be returned. ascii 11 (vertical tab) and ascii 12 are printable, chr(11) and chr(12) will return '\x0b' and '\x0c' respectively. """ if 0 <= keysym < 9: return False elif 13 < keysym < 32: return False elif keysym > 126: return False else: return True
python
{ "resource": "" }
q31413
Clickonacci.click
train
def click(self, x, y, button, press): '''Print Fibonacci numbers when the left click is pressed.''' if button == 1: if press: print(self.fibo.next()) else: # Exit if any other mouse button used self.stop()
python
{ "resource": "" }
q31414
PyMouseMeta.click
train
def click(self, x, y, button=1, n=1): """ Click a mouse button n times on a given x, y. Button is defined as 1 = left, 2 = right, 3 = middle. """ for i in range(n): self.press(x, y, button) self.release(x, y, button)
python
{ "resource": "" }
q31415
NodeMixin.siblings
train
def siblings(self): """ Tuple of nodes with the same parent. >>> from anytree import Node >>> udo = Node("Udo") >>> marc = Node("Marc", parent=udo) >>> lian = Node("Lian", parent=marc) >>> loui = Node("Loui", parent=marc) >>> lazy = Node("Lazy", parent=marc) >>> udo.siblings () >>> marc.siblings () >>> lian.siblings (Node('/Udo/Marc/Loui'), Node('/Udo/Marc/Lazy')) >>> loui.siblings (Node('/Udo/Marc/Lian'), Node('/Udo/Marc/Lazy')) """ parent = self.parent if parent is None: return tuple() else: return tuple([node for node in parent.children if node != self])
python
{ "resource": "" }
q31416
NodeMixin.height
train
def height(self): """ Number of edges on the longest path to a leaf `Node`. >>> from anytree import Node >>> udo = Node("Udo") >>> marc = Node("Marc", parent=udo) >>> lian = Node("Lian", parent=marc) >>> udo.height 2 >>> marc.height 1 >>> lian.height 0 """ if self.__children_: return max([child.height for child in self.__children_]) + 1 else: return 0
python
{ "resource": "" }
q31417
JsonImporter.import_
train
def import_(self, data): """Read JSON from `data`.""" return self.__import(json.loads(data, **self.kwargs))
python
{ "resource": "" }
q31418
JsonImporter.read
train
def read(self, filehandle): """Read JSON from `filehandle`.""" return self.__import(json.load(filehandle, **self.kwargs))
python
{ "resource": "" }
q31419
Walker.walk
train
def walk(self, start, end): """ Walk from `start` node to `end` node. Returns: (upwards, common, downwards): `upwards` is a list of nodes to go upward to. `common` top node. `downwards` is a list of nodes to go downward to. Raises: WalkError: on no common root node. >>> from anytree import Node, RenderTree, AsciiStyle >>> f = Node("f") >>> b = Node("b", parent=f) >>> a = Node("a", parent=b) >>> d = Node("d", parent=b) >>> c = Node("c", parent=d) >>> e = Node("e", parent=d) >>> g = Node("g", parent=f) >>> i = Node("i", parent=g) >>> h = Node("h", parent=i) >>> print(RenderTree(f, style=AsciiStyle())) Node('/f') |-- Node('/f/b') | |-- Node('/f/b/a') | +-- Node('/f/b/d') | |-- Node('/f/b/d/c') | +-- Node('/f/b/d/e') +-- Node('/f/g') +-- Node('/f/g/i') +-- Node('/f/g/i/h') Create a walker: >>> w = Walker() This class is made for walking: >>> w.walk(f, f) ((), Node('/f'), ()) >>> w.walk(f, b) ((), Node('/f'), (Node('/f/b'),)) >>> w.walk(b, f) ((Node('/f/b'),), Node('/f'), ()) >>> w.walk(h, e) ((Node('/f/g/i/h'), Node('/f/g/i'), Node('/f/g')), Node('/f'), (Node('/f/b'), Node('/f/b/d'), Node('/f/b/d/e'))) >>> w.walk(d, e) ((), Node('/f/b/d'), (Node('/f/b/d/e'),)) For a proper walking the nodes need to be part of the same tree: >>> w.walk(Node("a"), Node("b")) Traceback (most recent call last): ... anytree.walker.WalkError: Node('/a') and Node('/b') are not part of the same tree. """ s = start.path e = end.path if start.root != end.root: msg = "%r and %r are not part of the same tree." % (start, end) raise WalkError(msg) # common c = Walker.__calc_common(s, e) assert c[0] is start.root len_c = len(c) # up if start is c[-1]: up = tuple() else: up = tuple(reversed(s[len_c:])) # down if end is c[-1]: down = tuple() else: down = e[len_c:] return up, c[-1], down
python
{ "resource": "" }
q31420
JsonExporter.export
train
def export(self, node): """Return JSON for tree starting at `node`.""" dictexporter = self.dictexporter or DictExporter() data = dictexporter.export(node) return json.dumps(data, **self.kwargs)
python
{ "resource": "" }
q31421
JsonExporter.write
train
def write(self, node, filehandle): """Write JSON to `filehandle` starting at `node`.""" dictexporter = self.dictexporter or DictExporter() data = dictexporter.export(node) return json.dump(data, filehandle, **self.kwargs)
python
{ "resource": "" }
q31422
findall
train
def findall(node, filter_=None, stop=None, maxlevel=None, mincount=None, maxcount=None): """ Search nodes matching `filter_` but stop at `maxlevel` or `stop`. Return tuple with matching nodes. Args: node: top node, start searching. Keyword Args: filter_: function called with every `node` as argument, `node` is returned if `True`. stop: stop iteration at `node` if `stop` function returns `True` for `node`. maxlevel (int): maximum decending in the node hierarchy. mincount (int): minimum number of nodes. maxcount (int): maximum number of nodes. Example tree: >>> from anytree import Node, RenderTree, AsciiStyle >>> f = Node("f") >>> b = Node("b", parent=f) >>> a = Node("a", parent=b) >>> d = Node("d", parent=b) >>> c = Node("c", parent=d) >>> e = Node("e", parent=d) >>> g = Node("g", parent=f) >>> i = Node("i", parent=g) >>> h = Node("h", parent=i) >>> print(RenderTree(f, style=AsciiStyle()).by_attr()) f |-- b | |-- a | +-- d | |-- c | +-- e +-- g +-- i +-- h >>> findall(f, filter_=lambda node: node.name in ("a", "b")) (Node('/f/b'), Node('/f/b/a')) >>> findall(f, filter_=lambda node: d in node.path) (Node('/f/b/d'), Node('/f/b/d/c'), Node('/f/b/d/e')) The number of matches can be limited: >>> findall(f, filter_=lambda node: d in node.path, mincount=4) # doctest: +ELLIPSIS Traceback (most recent call last): ... anytree.search.CountError: Expecting at least 4 elements, but found 3. ... Node('/f/b/d/e')) >>> findall(f, filter_=lambda node: d in node.path, maxcount=2) # doctest: +ELLIPSIS Traceback (most recent call last): ... anytree.search.CountError: Expecting 2 elements at maximum, but found 3. ... Node('/f/b/d/e')) """ return _findall(node, filter_=filter_, stop=stop, maxlevel=maxlevel, mincount=mincount, maxcount=maxcount)
python
{ "resource": "" }
q31423
findall_by_attr
train
def findall_by_attr(node, value, name="name", maxlevel=None, mincount=None, maxcount=None): """ Search nodes with attribute `name` having `value` but stop at `maxlevel`. Return tuple with matching nodes. Args: node: top node, start searching. value: value which need to match Keyword Args: name (str): attribute name need to match maxlevel (int): maximum decending in the node hierarchy. mincount (int): minimum number of nodes. maxcount (int): maximum number of nodes. Example tree: >>> from anytree import Node, RenderTree, AsciiStyle >>> f = Node("f") >>> b = Node("b", parent=f) >>> a = Node("a", parent=b) >>> d = Node("d", parent=b) >>> c = Node("c", parent=d) >>> e = Node("e", parent=d) >>> g = Node("g", parent=f) >>> i = Node("i", parent=g) >>> h = Node("h", parent=i) >>> print(RenderTree(f, style=AsciiStyle()).by_attr()) f |-- b | |-- a | +-- d | |-- c | +-- e +-- g +-- i +-- h >>> findall_by_attr(f, "d") (Node('/f/b/d'),) """ return _findall(node, filter_=lambda n: _filter_by_name(n, name, value), maxlevel=maxlevel, mincount=mincount, maxcount=maxcount)
python
{ "resource": "" }
q31424
Resolver.get
train
def get(self, node, path): """ Return instance at `path`. An example module tree: >>> from anytree import Node >>> top = Node("top", parent=None) >>> sub0 = Node("sub0", parent=top) >>> sub0sub0 = Node("sub0sub0", parent=sub0) >>> sub0sub1 = Node("sub0sub1", parent=sub0) >>> sub1 = Node("sub1", parent=top) A resolver using the `name` attribute: >>> r = Resolver('name') Relative paths: >>> r.get(top, "sub0/sub0sub0") Node('/top/sub0/sub0sub0') >>> r.get(sub1, "..") Node('/top') >>> r.get(sub1, "../sub0/sub0sub1") Node('/top/sub0/sub0sub1') >>> r.get(sub1, ".") Node('/top/sub1') >>> r.get(sub1, "") Node('/top/sub1') >>> r.get(top, "sub2") Traceback (most recent call last): ... anytree.resolver.ChildResolverError: Node('/top') has no child sub2. Children are: 'sub0', 'sub1'. Absolute paths: >>> r.get(sub0sub0, "/top") Node('/top') >>> r.get(sub0sub0, "/top/sub0") Node('/top/sub0') >>> r.get(sub0sub0, "/") Traceback (most recent call last): ... anytree.resolver.ResolverError: root node missing. root is '/top'. >>> r.get(sub0sub0, "/bar") Traceback (most recent call last): ... anytree.resolver.ResolverError: unknown root node '/bar'. root is '/top'. """ node, parts = self.__start(node, path) for part in parts: if part == "..": node = node.parent elif part in ("", "."): pass else: node = self.__get(node, part) return node
python
{ "resource": "" }
q31425
Resolver.glob
train
def glob(self, node, path): """ Return instances at `path` supporting wildcards. Behaves identical to :any:`get`, but accepts wildcards and returns a list of found nodes. * `*` matches any characters, except '/'. * `?` matches a single character, except '/'. An example module tree: >>> from anytree import Node >>> top = Node("top", parent=None) >>> sub0 = Node("sub0", parent=top) >>> sub0sub0 = Node("sub0", parent=sub0) >>> sub0sub1 = Node("sub1", parent=sub0) >>> sub1 = Node("sub1", parent=top) >>> sub1sub0 = Node("sub0", parent=sub1) A resolver using the `name` attribute: >>> r = Resolver('name') Relative paths: >>> r.glob(top, "sub0/sub?") [Node('/top/sub0/sub0'), Node('/top/sub0/sub1')] >>> r.glob(sub1, ".././*") [Node('/top/sub0'), Node('/top/sub1')] >>> r.glob(top, "*/*") [Node('/top/sub0/sub0'), Node('/top/sub0/sub1'), Node('/top/sub1/sub0')] >>> r.glob(top, "*/sub0") [Node('/top/sub0/sub0'), Node('/top/sub1/sub0')] >>> r.glob(top, "sub1/sub1") Traceback (most recent call last): ... anytree.resolver.ChildResolverError: Node('/top/sub1') has no child sub1. Children are: 'sub0'. Non-matching wildcards are no error: >>> r.glob(top, "bar*") [] >>> r.glob(top, "sub2") Traceback (most recent call last): ... anytree.resolver.ChildResolverError: Node('/top') has no child sub2. Children are: 'sub0', 'sub1'. Absolute paths: >>> r.glob(sub0sub0, "/top/*") [Node('/top/sub0'), Node('/top/sub1')] >>> r.glob(sub0sub0, "/") Traceback (most recent call last): ... anytree.resolver.ResolverError: root node missing. root is '/top'. >>> r.glob(sub0sub0, "/bar") Traceback (most recent call last): ... anytree.resolver.ResolverError: unknown root node '/bar'. root is '/top'. """ node, parts = self.__start(node, path) return self.__glob(node, parts)
python
{ "resource": "" }
q31426
DotExporter.to_dotfile
train
def to_dotfile(self, filename): """ Write graph to `filename`. >>> from anytree import Node >>> root = Node("root") >>> s0 = Node("sub0", parent=root) >>> s0b = Node("sub0B", parent=s0) >>> s0a = Node("sub0A", parent=s0) >>> s1 = Node("sub1", parent=root) >>> s1a = Node("sub1A", parent=s1) >>> s1b = Node("sub1B", parent=s1) >>> s1c = Node("sub1C", parent=s1) >>> s1ca = Node("sub1Ca", parent=s1c) >>> from anytree.exporter import DotExporter >>> DotExporter(root).to_dotfile("tree.dot") The generated file should be handed over to the `dot` tool from the http://www.graphviz.org/ package:: $ dot tree.dot -T png -o tree.png """ with codecs.open(filename, "w", "utf-8") as file: for line in self: file.write("%s\n" % line)
python
{ "resource": "" }
q31427
DotExporter.to_picture
train
def to_picture(self, filename): """ Write graph to a temporary file and invoke `dot`. The output file type is automatically detected from the file suffix. *`graphviz` needs to be installed, before usage of this method.* """ fileformat = path.splitext(filename)[1][1:] with NamedTemporaryFile("wb", delete=False) as dotfile: dotfilename = dotfile.name for line in self: dotfile.write(("%s\n" % line).encode("utf-8")) dotfile.flush() cmd = ["dot", dotfilename, "-T", fileformat, "-o", filename] check_call(cmd) try: remove(dotfilename) except Exception: # pragma: no cover msg = 'Could not remove temporary file %s' % dotfilename logging.getLogger(__name__).warn(msg)
python
{ "resource": "" }
q31428
commonancestors
train
def commonancestors(*nodes): """ Determine common ancestors of `nodes`. >>> from anytree import Node >>> udo = Node("Udo") >>> marc = Node("Marc", parent=udo) >>> lian = Node("Lian", parent=marc) >>> dan = Node("Dan", parent=udo) >>> jet = Node("Jet", parent=dan) >>> jan = Node("Jan", parent=dan) >>> joe = Node("Joe", parent=dan) >>> commonancestors(jet, joe) (Node('/Udo'), Node('/Udo/Dan')) >>> commonancestors(jet, marc) (Node('/Udo'),) >>> commonancestors(jet) (Node('/Udo'), Node('/Udo/Dan')) >>> commonancestors() () """ ancestors = [node.ancestors for node in nodes] common = [] for parentnodes in zip(*ancestors): parentnode = parentnodes[0] if all([parentnode is p for p in parentnodes[1:]]): common.append(parentnode) else: break return tuple(common)
python
{ "resource": "" }
q31429
leftsibling
train
def leftsibling(node): """ Return Left Sibling of `node`. >>> from anytree import Node >>> dan = Node("Dan") >>> jet = Node("Jet", parent=dan) >>> jan = Node("Jan", parent=dan) >>> joe = Node("Joe", parent=dan) >>> leftsibling(dan) >>> leftsibling(jet) >>> leftsibling(jan) Node('/Dan/Jet') >>> leftsibling(joe) Node('/Dan/Jan') """ if node.parent: pchildren = node.parent.children idx = pchildren.index(node) if idx: return pchildren[idx - 1] else: return None else: return None
python
{ "resource": "" }
q31430
rightsibling
train
def rightsibling(node): """ Return Right Sibling of `node`. >>> from anytree import Node >>> dan = Node("Dan") >>> jet = Node("Jet", parent=dan) >>> jan = Node("Jan", parent=dan) >>> joe = Node("Joe", parent=dan) >>> rightsibling(dan) >>> rightsibling(jet) Node('/Dan/Jan') >>> rightsibling(jan) Node('/Dan/Joe') >>> rightsibling(joe) """ if node.parent: pchildren = node.parent.children idx = pchildren.index(node) try: return pchildren[idx + 1] except IndexError: return None else: return None
python
{ "resource": "" }
q31431
DictExporter.export
train
def export(self, node): """Export tree starting at `node`.""" attriter = self.attriter or (lambda attr_values: attr_values) return self.__export(node, self.dictcls, attriter, self.childiter)
python
{ "resource": "" }
q31432
compute_u_val_for_sky_loc_stat
train
def compute_u_val_for_sky_loc_stat(hplus, hcross, hphccorr, hpnorm=None, hcnorm=None, indices=None): """The max-over-sky location detection statistic maximizes over a phase, an amplitude and the ratio of F+ and Fx, encoded in a variable called u. Here we return the value of u for the given indices. """ if indices is not None: hplus = hplus[indices] hcross = hcross[indices] if hpnorm is not None: hplus = hplus * hpnorm if hcnorm is not None: hcross = hcross * hcnorm # Sanity checking in func. above should already have identified any points # which are bad, and should be used to construct indices for input here hplus_magsq = numpy.real(hplus) * numpy.real(hplus) + \ numpy.imag(hplus) * numpy.imag(hplus) hcross_magsq = numpy.real(hcross) * numpy.real(hcross) + \ numpy.imag(hcross) * numpy.imag(hcross) rho_pluscross = numpy.real(hplus) * numpy.real(hcross) + \ numpy.imag(hplus)*numpy.imag(hcross) a = hphccorr * hplus_magsq - rho_pluscross b = hplus_magsq - hcross_magsq c = rho_pluscross - hphccorr * hcross_magsq sq_root = b*b - 4*a*c sq_root = sq_root**0.5 sq_root = -sq_root # Catch the a->0 case bad_lgc = (a == 0) dbl_bad_lgc = numpy.logical_and(c == 0, b == 0) dbl_bad_lgc = numpy.logical_and(bad_lgc, dbl_bad_lgc) # Initialize u u = sq_root * 0. # In this case u is completely degenerate, so set it to 1 u[dbl_bad_lgc] = 1. # If a->0 avoid overflow by just setting to a large value u[bad_lgc & ~dbl_bad_lgc] = 1E17 # Otherwise normal statistic u[~bad_lgc] = (-b[~bad_lgc] + sq_root[~bad_lgc]) / (2*a[~bad_lgc]) snr_cplx = hplus * u + hcross coa_phase = numpy.angle(snr_cplx) return u, coa_phase
python
{ "resource": "" }
q31433
make_frequency_series
train
def make_frequency_series(vec): """Return a frequency series of the input vector. If the input is a frequency series it is returned, else if the input vector is a real time series it is fourier transformed and returned as a frequency series. Parameters ---------- vector : TimeSeries or FrequencySeries Returns ------- Frequency Series: FrequencySeries A frequency domain version of the input vector. """ if isinstance(vec, FrequencySeries): return vec if isinstance(vec, TimeSeries): N = len(vec) n = N/2+1 delta_f = 1.0 / N / vec.delta_t vectilde = FrequencySeries(zeros(n, dtype=complex_same_precision_as(vec)), delta_f=delta_f, copy=False) fft(vec, vectilde) return vectilde else: raise TypeError("Can only convert a TimeSeries to a FrequencySeries")
python
{ "resource": "" }
q31434
sigmasq_series
train
def sigmasq_series(htilde, psd=None, low_frequency_cutoff=None, high_frequency_cutoff=None): """Return a cumulative sigmasq frequency series. Return a frequency series containing the accumulated power in the input up to that frequency. Parameters ---------- htilde : TimeSeries or FrequencySeries The input vector psd : {None, FrequencySeries}, optional The psd used to weight the accumulated power. low_frequency_cutoff : {None, float}, optional The frequency to begin accumulating power. If None, start at the beginning of the vector. high_frequency_cutoff : {None, float}, optional The frequency to stop considering accumulated power. If None, continue until the end of the input vector. Returns ------- Frequency Series: FrequencySeries A frequency series containing the cumulative sigmasq. """ htilde = make_frequency_series(htilde) N = (len(htilde)-1) * 2 norm = 4.0 * htilde.delta_f kmin, kmax = get_cutoff_indices(low_frequency_cutoff, high_frequency_cutoff, htilde.delta_f, N) sigma_vec = FrequencySeries(zeros(len(htilde), dtype=real_same_precision_as(htilde)), delta_f = htilde.delta_f, copy=False) mag = htilde.squared_norm() if psd is not None: mag /= psd sigma_vec[kmin:kmax] = mag[kmin:kmax].cumsum() return sigma_vec*norm
python
{ "resource": "" }
q31435
sigma
train
def sigma(htilde, psd = None, low_frequency_cutoff=None, high_frequency_cutoff=None): """ Return the sigma of the waveform. See sigmasq for more details. Parameters ---------- htilde : TimeSeries or FrequencySeries The input vector containing a waveform. psd : {None, FrequencySeries}, optional The psd used to weight the accumulated power. low_frequency_cutoff : {None, float}, optional The frequency to begin considering waveform power. high_frequency_cutoff : {None, float}, optional The frequency to stop considering waveform power. Returns ------- sigmasq: float """ return sqrt(sigmasq(htilde, psd, low_frequency_cutoff, high_frequency_cutoff))
python
{ "resource": "" }
q31436
get_cutoff_indices
train
def get_cutoff_indices(flow, fhigh, df, N): """ Gets the indices of a frequency series at which to stop an overlap calculation. Parameters ---------- flow: float The frequency (in Hz) of the lower index. fhigh: float The frequency (in Hz) of the upper index. df: float The frequency step (in Hz) of the frequency series. N: int The number of points in the **time** series. Can be odd or even. Returns ------- kmin: int kmax: int """ if flow: kmin = int(flow / df) if kmin < 0: err_msg = "Start frequency cannot be negative. " err_msg += "Supplied value and kmin {} and {}".format(flow, kmin) raise ValueError(err_msg) else: kmin = 1 if fhigh: kmax = int(fhigh / df ) if kmax > int((N + 1)/2.): kmax = int((N + 1)/2.) else: # int() truncates towards 0, so this is # equivalent to the floor of the float kmax = int((N + 1)/2.) if kmax <= kmin: err_msg = "Kmax cannot be less than or equal to kmin. " err_msg += "Provided values of freqencies (min,max) were " err_msg += "{} and {} ".format(flow, fhigh) err_msg += "corresponding to (kmin, kmax) of " err_msg += "{} and {}.".format(kmin, kmax) raise ValueError(err_msg) return kmin,kmax
python
{ "resource": "" }
q31437
smear
train
def smear(idx, factor): """ This function will take as input an array of indexes and return every unique index within the specified factor of the inputs. E.g.: smear([5,7,100],2) = [3,4,5,6,7,8,9,98,99,100,101,102] Parameters ----------- idx : numpy.array of ints The indexes to be smeared. factor : idx The factor by which to smear out the input array. Returns -------- new_idx : numpy.array of ints The smeared array of indexes. """ s = [idx] for i in range(factor+1): a = i - factor/2 s += [idx + a] return numpy.unique(numpy.concatenate(s))
python
{ "resource": "" }
q31438
matched_filter
train
def matched_filter(template, data, psd=None, low_frequency_cutoff=None, high_frequency_cutoff=None, sigmasq=None): """ Return the complex snr. Return the complex snr, along with its associated normalization of the template, matched filtered against the data. Parameters ---------- template : TimeSeries or FrequencySeries The template waveform data : TimeSeries or FrequencySeries The strain data to be filtered. psd : FrequencySeries The noise weighting of the filter. low_frequency_cutoff : {None, float}, optional The frequency to begin the filter calculation. If None, begin at the first frequency after DC. high_frequency_cutoff : {None, float}, optional The frequency to stop the filter calculation. If None, continue to the the nyquist frequency. sigmasq : {None, float}, optional The template normalization. If none, this value is calculated internally. Returns ------- snr : TimeSeries A time series containing the complex snr. """ snr, _, norm = matched_filter_core(template, data, psd=psd, low_frequency_cutoff=low_frequency_cutoff, high_frequency_cutoff=high_frequency_cutoff, h_norm=sigmasq) return snr * norm
python
{ "resource": "" }
q31439
overlap
train
def overlap(vec1, vec2, psd=None, low_frequency_cutoff=None, high_frequency_cutoff=None, normalized=True): """ Return the overlap between the two TimeSeries or FrequencySeries. Parameters ---------- vec1 : TimeSeries or FrequencySeries The input vector containing a waveform. vec2 : TimeSeries or FrequencySeries The input vector containing a waveform. psd : Frequency Series A power spectral density to weight the overlap. low_frequency_cutoff : {None, float}, optional The frequency to begin the overlap. high_frequency_cutoff : {None, float}, optional The frequency to stop the overlap. normalized : {True, boolean}, optional Set if the overlap is normalized. If true, it will range from 0 to 1. Returns ------- overlap: float """ return overlap_cplx(vec1, vec2, psd=psd, \ low_frequency_cutoff=low_frequency_cutoff,\ high_frequency_cutoff=high_frequency_cutoff,\ normalized=normalized).real
python
{ "resource": "" }
q31440
overlap_cplx
train
def overlap_cplx(vec1, vec2, psd=None, low_frequency_cutoff=None, high_frequency_cutoff=None, normalized=True): """Return the complex overlap between the two TimeSeries or FrequencySeries. Parameters ---------- vec1 : TimeSeries or FrequencySeries The input vector containing a waveform. vec2 : TimeSeries or FrequencySeries The input vector containing a waveform. psd : Frequency Series A power spectral density to weight the overlap. low_frequency_cutoff : {None, float}, optional The frequency to begin the overlap. high_frequency_cutoff : {None, float}, optional The frequency to stop the overlap. normalized : {True, boolean}, optional Set if the overlap is normalized. If true, it will range from 0 to 1. Returns ------- overlap: complex """ htilde = make_frequency_series(vec1) stilde = make_frequency_series(vec2) kmin, kmax = get_cutoff_indices(low_frequency_cutoff, high_frequency_cutoff, stilde.delta_f, (len(stilde)-1) * 2) if psd: inner = (htilde[kmin:kmax]).weighted_inner(stilde[kmin:kmax], psd[kmin:kmax]) else: inner = (htilde[kmin:kmax]).inner(stilde[kmin:kmax]) if normalized: sig1 = sigma(vec1, psd=psd, low_frequency_cutoff=low_frequency_cutoff, high_frequency_cutoff=high_frequency_cutoff) sig2 = sigma(vec2, psd=psd, low_frequency_cutoff=low_frequency_cutoff, high_frequency_cutoff=high_frequency_cutoff) norm = 1 / sig1 / sig2 else: norm = 1 return 4 * htilde.delta_f * inner * norm
python
{ "resource": "" }
q31441
quadratic_interpolate_peak
train
def quadratic_interpolate_peak(left, middle, right): """ Interpolate the peak and offset using a quadratic approximation Parameters ---------- left : numpy array Values at a relative bin value of [-1] middle : numpy array Values at a relative bin value of [0] right : numpy array Values at a relative bin value of [1] Returns ------- bin_offset : numpy array Array of bins offsets, each in the range [-1/2, 1/2] peak_values : numpy array Array of the estimated peak values at the interpolated offset """ bin_offset = 1.0/2.0 * (left - right) / (left - 2 * middle + right) peak_value = middle + 0.25 * (left - right) * bin_offset return bin_offset, peak_value
python
{ "resource": "" }
q31442
followup_event_significance
train
def followup_event_significance(ifo, data_reader, bank, template_id, coinc_times, coinc_threshold=0.005, lookback=150, duration=0.095): """ Followup an event in another detector and determine its significance """ from pycbc.waveform import get_waveform_filter_length_in_time tmplt = bank.table[template_id] length_in_time = get_waveform_filter_length_in_time(tmplt['approximant'], tmplt) # calculate onsource time range from pycbc.detector import Detector onsource_start = -numpy.inf onsource_end = numpy.inf fdet = Detector(ifo) for cifo in coinc_times: time = coinc_times[cifo] dtravel = Detector(cifo).light_travel_time_to_detector(fdet) if time - dtravel > onsource_start: onsource_start = time - dtravel if time + dtravel < onsource_end: onsource_end = time + dtravel # Source must be within this time window to be considered a possible # coincidence onsource_start -= coinc_threshold onsource_end += coinc_threshold # Calculate how much time needed to calculate significance trim_pad = (data_reader.trim_padding * data_reader.strain.delta_t) bdur = int(lookback + 2.0 * trim_pad + length_in_time) if bdur > data_reader.strain.duration * .75: bdur = data_reader.strain.duration * .75 # Require all strain be valid within lookback time if data_reader.state is not None: state_start_time = data_reader.strain.end_time \ - data_reader.reduced_pad * data_reader.strain.delta_t - bdur if not data_reader.state.is_extent_valid(state_start_time, bdur): return None, None, None, None # We won't require that all DQ checks be valid for now, except at # onsource time. if data_reader.dq is not None: dq_start_time = onsource_start - duration / 2.0 dq_duration = onsource_end - onsource_start + duration if not data_reader.dq.is_extent_valid(dq_start_time, dq_duration): return None, None, None, None # Calculate SNR time series for this duration htilde = bank.get_template(template_id, min_buffer=bdur) stilde = data_reader.overwhitened_data(htilde.delta_f) sigma2 = htilde.sigmasq(stilde.psd) snr, _, norm = matched_filter_core(htilde, stilde, h_norm=sigma2) # Find peak in on-source and determine p-value onsrc = snr.time_slice(onsource_start, onsource_end) peak = onsrc.abs_arg_max() peak_time = peak * snr.delta_t + onsrc.start_time peak_value = abs(onsrc[peak]) bstart = float(snr.start_time) + length_in_time + trim_pad bkg = abs(snr.time_slice(bstart, onsource_start)).numpy() window = int((onsource_end - onsource_start) * snr.sample_rate) nsamples = int(len(bkg) / window) peaks = bkg[:nsamples*window].reshape(nsamples, window).max(axis=1) pvalue = (1 + (peaks >= peak_value).sum()) / float(1 + nsamples) # Return recentered source SNR for bayestar, along with p-value, and trig baysnr = snr.time_slice(peak_time - duration / 2.0, peak_time + duration / 2.0) logging.info('Adding %s to candidate, pvalue %s, %s samples', ifo, pvalue, nsamples) return baysnr * norm, peak_time, pvalue, sigma2
python
{ "resource": "" }
q31443
compute_followup_snr_series
train
def compute_followup_snr_series(data_reader, htilde, trig_time, duration=0.095, check_state=True, coinc_window=0.05): """Given a StrainBuffer, a template frequency series and a trigger time, compute a portion of the SNR time series centered on the trigger for its rapid sky localization and followup. If the trigger time is too close to the boundary of the valid data segment the SNR series is calculated anyway and might be slightly contaminated by filter and wrap-around effects. For reasonable durations this will only affect a small fraction of the triggers and probably in a negligible way. Parameters ---------- data_reader : StrainBuffer The StrainBuffer object to read strain data from. htilde : FrequencySeries The frequency series containing the template waveform. trig_time : {float, lal.LIGOTimeGPS} The trigger time. duration : float (optional) Duration of the computed SNR series in seconds. If omitted, it defaults to twice the Earth light travel time plus 10 ms of timing uncertainty. check_state : boolean If True, and the detector was offline or flagged for bad data quality at any point during the inspiral, then return (None, None) instead. coinc_window : float (optional) Maximum possible time between coincident triggers at different detectors. This is needed to properly determine data padding. Returns ------- snr : TimeSeries The portion of SNR around the trigger. None if the detector is offline or has bad data quality, and check_state is True. """ if check_state: # was the detector observing for the full amount of involved data? state_start_time = trig_time - duration / 2 - htilde.length_in_time state_end_time = trig_time + duration / 2 state_duration = state_end_time - state_start_time if data_reader.state is not None: if not data_reader.state.is_extent_valid(state_start_time, state_duration): return None # was the data quality ok for the full amount of involved data? dq_start_time = state_start_time - data_reader.dq_padding dq_duration = state_duration + 2 * data_reader.dq_padding if data_reader.dq is not None: if not data_reader.dq.is_extent_valid(dq_start_time, dq_duration): return None stilde = data_reader.overwhitened_data(htilde.delta_f) snr, _, norm = matched_filter_core(htilde, stilde, h_norm=htilde.sigmasq(stilde.psd)) valid_end = int(len(snr) - data_reader.trim_padding) valid_start = int(valid_end - data_reader.blocksize * snr.sample_rate) half_dur_samples = int(snr.sample_rate * duration / 2) coinc_samples = int(snr.sample_rate * coinc_window) valid_start -= half_dur_samples + coinc_samples valid_end += half_dur_samples if valid_start < 0 or valid_end > len(snr)-1: raise ValueError(('Requested SNR duration ({0} s)' ' too long').format(duration)) # Onsource slice for Bayestar followup onsource_idx = float(trig_time - snr.start_time) * snr.sample_rate onsource_idx = int(round(onsource_idx)) onsource_slice = slice(onsource_idx - half_dur_samples, onsource_idx + half_dur_samples + 1) return snr[onsource_slice] * norm
python
{ "resource": "" }
q31444
LiveBatchMatchedFilter.combine_results
train
def combine_results(self, results): """Combine results from different batches of filtering""" result = {} for key in results[0]: result[key] = numpy.concatenate([r[key] for r in results]) return result
python
{ "resource": "" }
q31445
LiveBatchMatchedFilter.process_all
train
def process_all(self): """Process every batch group and return as single result""" results = [] veto_info = [] while 1: result, veto = self._process_batch() if result is False: return False if result is None: break results.append(result) veto_info += veto result = self.combine_results(results) if self.max_triggers_in_batch: sort = result['snr'].argsort()[::-1][:self.max_triggers_in_batch] for key in result: result[key] = result[key][sort] tmp = veto_info veto_info = [tmp[i] for i in sort] result = self._process_vetoes(result, veto_info) return result
python
{ "resource": "" }
q31446
LiveBatchMatchedFilter._process_vetoes
train
def _process_vetoes(self, results, veto_info): """Calculate signal based vetoes""" chisq = numpy.array(numpy.zeros(len(veto_info)), numpy.float32, ndmin=1) dof = numpy.array(numpy.zeros(len(veto_info)), numpy.uint32, ndmin=1) sg_chisq = numpy.array(numpy.zeros(len(veto_info)), numpy.float32, ndmin=1) results['chisq'] = chisq results['chisq_dof'] = dof results['sg_chisq'] = sg_chisq keep = [] for i, (snrv, norm, l, htilde, stilde) in enumerate(veto_info): correlate(htilde, stilde, htilde.cout) c, d = self.power_chisq.values(htilde.cout, snrv, norm, stilde.psd, [l], htilde) chisq[i] = c[0] / d[0] dof[i] = d[0] sgv = self.sg_chisq.values(stilde, htilde, stilde.psd, snrv, norm, c, d, [l]) if sgv is not None: sg_chisq[i] = sgv[0] if self.newsnr_threshold: newsnr = ranking.newsnr(results['snr'][i], chisq[i]) if newsnr >= self.newsnr_threshold: keep.append(i) if self.newsnr_threshold: keep = numpy.array(keep, dtype=numpy.uint32) for key in results: results[key] = results[key][keep] return results
python
{ "resource": "" }
q31447
calc_psd_variation
train
def calc_psd_variation(strain, psd_short_segment, psd_long_segment, short_psd_duration, short_psd_stride, psd_avg_method, low_freq, high_freq): """Calculates time series of PSD variability This function first splits the segment up into 512 second chunks. It then calculates the PSD over this 512 second period as well as in 4 second chunks throughout each 512 second period. Next the function estimates how different the 4 second PSD is to the 512 second PSD and produces a timeseries of this variability. Parameters ---------- strain : TimeSeries Input strain time series to estimate PSDs psd_short_segment : {float, 8} Duration of the short segments for PSD estimation in seconds. psd_long_segment : {float, 512} Duration of the long segments for PSD estimation in seconds. short_psd_duration : {float, 4} Duration of the segments for PSD estimation in seconds. short_psd_stride : {float, 2} Separation between PSD estimation segments in seconds. psd_avg_method : {string, 'median'} Method for averaging PSD estimation segments. low_freq : {float, 20} Minimum frequency to consider the comparison between PSDs. high_freq : {float, 480} Maximum frequency to consider the comparison between PSDs. Returns ------- psd_var : TimeSeries Time series of the variability in the PSD estimation """ # Calculate strain precision if strain.precision == 'single': fs_dtype = numpy.float32 elif strain.precision == 'double': fs_dtype = numpy.float64 # Convert start and end times immediately to floats start_time = numpy.float(strain.start_time) end_time = numpy.float(strain.end_time) # Find the times of the long segments times_long = numpy.arange(start_time, end_time, psd_long_segment) # Set up the empty time series for the PSD variation estimate psd_var = TimeSeries(zeros(int(numpy.ceil((end_time - start_time) / psd_short_segment))), delta_t=psd_short_segment, copy=False, epoch=start_time) ind = 0 for tlong in times_long: # Calculate PSD for long segment and separate the long segment in to # overlapping shorter segments if tlong + psd_long_segment <= end_time: psd_long = pycbc.psd.welch( strain.time_slice(tlong, tlong + psd_long_segment), seg_len=int(short_psd_duration * strain.sample_rate), seg_stride=int(short_psd_stride * strain.sample_rate), avg_method=psd_avg_method) times_short = numpy.arange(tlong, tlong + psd_long_segment, psd_short_segment) else: psd_long = pycbc.psd.welch( strain.time_slice(end_time - psd_long_segment, end_time), seg_len=int(short_psd_duration * strain.sample_rate), seg_stride=int(short_psd_stride * strain.sample_rate), avg_method=psd_avg_method) times_short = numpy.arange(tlong, end_time, psd_short_segment) # Calculate the PSD of the shorter segments psd_short = [] for tshort in times_short: if tshort + psd_short_segment <= end_time: pshort = pycbc.psd.welch( strain.time_slice(tshort, tshort + psd_short_segment), seg_len=int(short_psd_duration * strain.sample_rate), seg_stride=int(short_psd_stride * strain.sample_rate), avg_method=psd_avg_method) else: pshort = pycbc.psd.welch( strain.time_slice(tshort - psd_short_segment, end_time), seg_len=int(short_psd_duration * strain.sample_rate), seg_stride=int(short_psd_stride * strain.sample_rate), avg_method=psd_avg_method) psd_short.append(pshort) # Estimate the range of the PSD to compare kmin = int(low_freq / psd_long.delta_f) kmax = int(high_freq / psd_long.delta_f) # Comapre the PSD of the short segment to the long segment # The weight factor gives the rough response of a cbc template across # the defined frequency range given the expected PSD (i.e. long PSD) # Then integrate the weighted ratio of the actual PSD (i.e. short PSD) # with the expected PSD (i.e. long PSD) over the specified frequency # range freqs = FrequencySeries(psd_long.sample_frequencies, delta_f=psd_long.delta_f, epoch=psd_long.epoch, dtype=fs_dtype) weight = numpy.array( freqs[kmin:kmax]**(-7./3.) / psd_long[kmin:kmax]) weight /= weight.sum() diff = numpy.array([(weight * numpy.array(p_short[kmin:kmax] / psd_long[kmin:kmax])).sum() for p_short in psd_short]) # Store variation value for i, val in enumerate(diff): psd_var[ind+i] = val ind = ind+len(diff) return psd_var
python
{ "resource": "" }
q31448
find_trigger_value
train
def find_trigger_value(psd_var, idx, start, sample_rate): """ Find the PSD variation value at a particular time Parameters ---------- psd_var : TimeSeries Time series of the varaibility in the PSD estimation idx : numpy.ndarray Time indices of the triggers start : float GPS start time sample_rate : float Sample rate defined in ini file Returns ------- vals : Array PSD variation value at a particular time """ # Find gps time of the trigger time = start + idx / sample_rate # Find where in the psd variation time series the trigger belongs ind = numpy.digitize(time, psd_var.sample_times) ind -= 1 vals = psd_var[ind] return vals
python
{ "resource": "" }
q31449
setup_foreground_minifollowups
train
def setup_foreground_minifollowups(workflow, coinc_file, single_triggers, tmpltbank_file, insp_segs, insp_data_name, insp_anal_name, dax_output, out_dir, tags=None): """ Create plots that followup the Nth loudest coincident injection from a statmap produced HDF file. Parameters ---------- workflow: pycbc.workflow.Workflow The core workflow instance we are populating coinc_file: single_triggers: list of pycbc.workflow.File A list cointaining the file objects associated with the merged single detector trigger files for each ifo. tmpltbank_file: pycbc.workflow.File The file object pointing to the HDF format template bank insp_segs: SegFile The segment file containing the data read and analyzed by each inspiral job. insp_data_name: str The name of the segmentlist storing data read. insp_anal_name: str The name of the segmentlist storing data analyzed. out_dir: path The directory to store minifollowups result plots and files tags: {None, optional} Tags to add to the minifollowups executables Returns ------- layout: list A list of tuples which specify the displayed file layout for the minifollops plots. """ logging.info('Entering minifollowups module') if not workflow.cp.has_section('workflow-minifollowups'): logging.info('There is no [workflow-minifollowups] section in configuration file') logging.info('Leaving minifollowups') return tags = [] if tags is None else tags makedir(dax_output) # turn the config file into a File class config_path = os.path.abspath(dax_output + '/' + '_'.join(tags) + 'foreground_minifollowup.ini') workflow.cp.write(open(config_path, 'w')) config_file = wdax.File(os.path.basename(config_path)) config_file.PFN(urlparse.urljoin('file:', urllib.pathname2url(config_path)), site='local') exe = Executable(workflow.cp, 'foreground_minifollowup', ifos=workflow.ifos, out_dir=dax_output) node = exe.create_node() node.add_input_opt('--config-files', config_file) node.add_input_opt('--bank-file', tmpltbank_file) node.add_input_opt('--statmap-file', coinc_file) node.add_multiifo_input_list_opt('--single-detector-triggers', single_triggers) node.add_input_opt('--inspiral-segments', insp_segs) node.add_opt('--inspiral-data-read-name', insp_data_name) node.add_opt('--inspiral-data-analyzed-name', insp_anal_name) node.new_output_file_opt(workflow.analysis_time, '.dax', '--output-file', tags=tags) node.new_output_file_opt(workflow.analysis_time, '.dax.map', '--output-map', tags=tags) node.new_output_file_opt(workflow.analysis_time, '.tc.txt', '--transformation-catalog', tags=tags) name = node.output_files[0].name map_file = node.output_files[1] tc_file = node.output_files[2] node.add_opt('--workflow-name', name) node.add_opt('--output-dir', out_dir) workflow += node # execute this in a sub-workflow fil = node.output_files[0] # determine if a staging site has been specified try: staging_site = workflow.cp.get('workflow-foreground_minifollowups', 'staging-site') except: staging_site = None job = dax.DAX(fil) job.addArguments('--basename %s' % os.path.splitext(os.path.basename(name))[0]) Workflow.set_job_properties(job, map_file, tc_file, staging_site=staging_site) workflow._adag.addJob(job) dep = dax.Dependency(parent=node._dax_node, child=job) workflow._adag.addDependency(dep) logging.info('Leaving minifollowups module')
python
{ "resource": "" }
q31450
make_plot_waveform_plot
train
def make_plot_waveform_plot(workflow, params, out_dir, ifos, exclude=None, require=None, tags=None): """ Add plot_waveform jobs to the workflow. """ tags = [] if tags is None else tags makedir(out_dir) name = 'single_template_plot' secs = requirestr(workflow.cp.get_subsections(name), require) secs = excludestr(secs, exclude) files = FileList([]) for tag in secs: node = PlotExecutable(workflow.cp, 'plot_waveform', ifos=ifos, out_dir=out_dir, tags=[tag] + tags).create_node() node.add_opt('--mass1', "%.6f" % params['mass1']) node.add_opt('--mass2', "%.6f" % params['mass2']) node.add_opt('--spin1z',"%.6f" % params['spin1z']) node.add_opt('--spin2z',"%.6f" % params['spin2z']) if 'u_vals' in params: # Precessing options node.add_opt('--spin1x',"%.6f" % params['spin1x']) node.add_opt('--spin2x',"%.6f" % params['spin2x']) node.add_opt('--spin1y',"%.6f" % params['spin1y']) node.add_opt('--spin2y',"%.6f" % params['spin2y']) node.add_opt('--inclination',"%.6f" % params['inclination']) node.add_opt('--u-val', "%.6f" % params['u_vals']) node.new_output_file_opt(workflow.analysis_time, '.png', '--output-file') workflow += node files += node.output_files return files
python
{ "resource": "" }
q31451
make_sngl_ifo
train
def make_sngl_ifo(workflow, sngl_file, bank_file, trigger_id, out_dir, ifo, tags=None, rank=None): """Setup a job to create sngl detector sngl ifo html summary snippet. """ tags = [] if tags is None else tags makedir(out_dir) name = 'page_snglinfo' files = FileList([]) node = PlotExecutable(workflow.cp, name, ifos=[ifo], out_dir=out_dir, tags=tags).create_node() node.add_input_opt('--single-trigger-file', sngl_file) node.add_input_opt('--bank-file', bank_file) node.add_opt('--trigger-id', str(trigger_id)) if rank is not None: node.add_opt('--n-loudest', str(rank)) node.add_opt('--instrument', ifo) node.new_output_file_opt(workflow.analysis_time, '.html', '--output-file') workflow += node files += node.output_files return files
python
{ "resource": "" }
q31452
make_qscan_plot
train
def make_qscan_plot(workflow, ifo, trig_time, out_dir, injection_file=None, data_segments=None, time_window=100, tags=None): """ Generate a make_qscan node and add it to workflow. This function generates a single node of the singles_timefreq executable and adds it to the current workflow. Parent/child relationships are set by the input/output files automatically. Parameters ----------- workflow: pycbc.workflow.core.Workflow The workflow class that stores the jobs that will be run. ifo: str Which interferometer are we using? trig_time: int The time of the trigger being followed up. out_dir: str Location of directory to output to injection_file: pycbc.workflow.File (optional, default=None) If given, add the injections in the file to strain before making the plot. data_segments: ligo.segments.segmentlist (optional, default=None) The list of segments for which data exists and can be read in. If given the start/end times given to singles_timefreq will be adjusted if [trig_time - time_window, trig_time + time_window] does not completely lie within a valid data segment. A ValueError will be raised if the trig_time is not within a valid segment, or if it is not possible to find 2*time_window (plus the padding) of continuous data around the trigger. This **must** be coalesced. time_window: int (optional, default=None) The amount of data (not including padding) that will be read in by the singles_timefreq job. The default value of 100s should be fine for most cases. tags: list (optional, default=None) List of tags to add to the created nodes, which determine file naming. """ tags = [] if tags is None else tags makedir(out_dir) name = 'plot_qscan' curr_exe = PlotQScanExecutable(workflow.cp, name, ifos=[ifo], out_dir=out_dir, tags=tags) node = curr_exe.create_node() # Determine start/end times, using data segments if needed. # Begin by choosing "optimal" times start = trig_time - time_window end = trig_time + time_window # Then if data_segments is available, check against that, and move if # needed if data_segments is not None: # Assumes coalesced, so trig_time can only be within one segment for seg in data_segments: if trig_time in seg: data_seg = seg break elif trig_time == -1.0: node.add_opt('--gps-start-time', int(trig_time)) node.add_opt('--gps-end-time', int(trig_time)) node.add_opt('--center-time', trig_time) caption_string = "'No trigger in %s'" % ifo node.add_opt('--plot-caption', caption_string) node.new_output_file_opt(workflow.analysis_time, '.png', '--output-file') workflow += node return node.output_files else: err_msg = "Trig time {} ".format(trig_time) err_msg += "does not seem to lie within any data segments. " err_msg += "This shouldn't be possible, please ask for help!" raise ValueError(err_msg) # Check for pad-data if curr_exe.has_opt('pad-data'): pad_data = int(curr_exe.get_opt('pad-data')) else: pad_data = 0 # We only read data that's available. The code must handle the case # of not much data being available. if end > (data_seg[1] - pad_data): end = data_seg[1] - pad_data if start < (data_seg[0] + pad_data): start = data_seg[0] + pad_data node.add_opt('--gps-start-time', int(start)) node.add_opt('--gps-end-time', int(end)) node.add_opt('--center-time', trig_time) if injection_file is not None: node.add_input_opt('--injection-file', injection_file) node.new_output_file_opt(workflow.analysis_time, '.png', '--output-file') workflow += node return node.output_files
python
{ "resource": "" }
q31453
make_singles_timefreq
train
def make_singles_timefreq(workflow, single, bank_file, trig_time, out_dir, veto_file=None, time_window=10, data_segments=None, tags=None): """ Generate a singles_timefreq node and add it to workflow. This function generates a single node of the singles_timefreq executable and adds it to the current workflow. Parent/child relationships are set by the input/output files automatically. Parameters ----------- workflow: pycbc.workflow.core.Workflow The workflow class that stores the jobs that will be run. single: pycbc.workflow.core.File instance The File object storing the single-detector triggers to followup. bank_file: pycbc.workflow.core.File instance The File object storing the template bank. trig_time: int The time of the trigger being followed up. out_dir: str Location of directory to output to veto_file: pycbc.workflow.core.File (optional, default=None) If given use this file to veto triggers to determine the loudest event. FIXME: Veto files *should* be provided a definer argument and not just assume that all segments should be read. time_window: int (optional, default=None) The amount of data (not including padding) that will be read in by the singles_timefreq job. The default value of 10s should be fine for most cases. data_segments: ligo.segments.segmentlist (optional, default=None) The list of segments for which data exists and can be read in. If given the start/end times given to singles_timefreq will be adjusted if [trig_time - time_window, trig_time + time_window] does not completely lie within a valid data segment. A ValueError will be raised if the trig_time is not within a valid segment, or if it is not possible to find 2*time_window (plus the padding) of continuous data around the trigger. This **must** be coalesced. tags: list (optional, default=None) List of tags to add to the created nodes, which determine file naming. """ tags = [] if tags is None else tags makedir(out_dir) name = 'plot_singles_timefreq' curr_exe = SingleTimeFreqExecutable(workflow.cp, name, ifos=[single.ifo], out_dir=out_dir, tags=tags) node = curr_exe.create_node() node.add_input_opt('--trig-file', single) node.add_input_opt('--bank-file', bank_file) # Determine start/end times, using data segments if needed. # Begin by choosing "optimal" times start = trig_time - time_window end = trig_time + time_window # Then if data_segments is available, check against that, and move if # needed if data_segments is not None: # Assumes coalesced, so trig_time can only be within one segment for seg in data_segments: if trig_time in seg: data_seg = seg break elif trig_time == -1.0: node.add_opt('--gps-start-time', int(trig_time)) node.add_opt('--gps-end-time', int(trig_time)) node.add_opt('--center-time', trig_time) if veto_file: node.add_input_opt('--veto-file', veto_file) node.add_opt('--detector', single.ifo) node.new_output_file_opt(workflow.analysis_time, '.png', '--output-file') workflow += node return node.output_files else: err_msg = "Trig time {} ".format(trig_time) err_msg += "does not seem to lie within any data segments. " err_msg += "This shouldn't be possible, please ask for help!" raise ValueError(err_msg) # Check for pad-data if curr_exe.has_opt('pad-data'): pad_data = int(curr_exe.get_opt('pad-data')) else: pad_data = 0 if abs(data_seg) < (2 * time_window + 2 * pad_data): tl = 2 * time_window + 2 * pad_data err_msg = "I was asked to use {} seconds of data ".format(tl) err_msg += "to run a plot_singles_timefreq job. However, I have " err_msg += "only {} seconds available.".format(abs(data_seg)) raise ValueError(err_msg) if data_seg[0] > (start - pad_data): start = data_seg[0] + pad_data end = start + 2 * time_window if data_seg[1] < (end + pad_data): end = data_seg[1] - pad_data start = end - 2 * time_window # Sanity check, shouldn't get here! if data_seg[0] > (start - pad_data): err_msg = "I shouldn't be here! Go ask Ian what he broke." raise ValueError(err_msg) node.add_opt('--gps-start-time', int(start)) node.add_opt('--gps-end-time', int(end)) node.add_opt('--center-time', trig_time) if veto_file: node.add_input_opt('--veto-file', veto_file) node.add_opt('--detector', single.ifo) node.new_output_file_opt(workflow.analysis_time, '.png', '--output-file') workflow += node return node.output_files
python
{ "resource": "" }
q31454
JointDistribution.apply_boundary_conditions
train
def apply_boundary_conditions(self, **params): """Applies each distributions' boundary conditions to the given list of parameters, returning a new list with the conditions applied. Parameters ---------- **params : Keyword arguments should give the parameters to apply the conditions to. Returns ------- dict A dictionary of the parameters after each distribution's `apply_boundary_conditions` function has been applied. """ for dist in self.distributions: params.update(dist.apply_boundary_conditions(**params)) return params
python
{ "resource": "" }
q31455
JointDistribution.rvs
train
def rvs(self, size=1): """ Rejection samples the parameter space. """ # create output FieldArray out = record.FieldArray(size, dtype=[(arg, float) for arg in self.variable_args]) # loop until enough samples accepted n = 0 while n < size: # draw samples samples = {} for dist in self.distributions: draw = dist.rvs(1) for param in dist.params: samples[param] = draw[param][0] vals = numpy.array([samples[arg] for arg in self.variable_args]) # determine if all parameter values are in prior space # if they are then add to output if self(**dict(zip(self.variable_args, vals))) > -numpy.inf: out[n] = vals n += 1 return out
python
{ "resource": "" }
q31456
_check_lal_pars
train
def _check_lal_pars(p): """ Create a laldict object from the dictionary of waveform parameters Parameters ---------- p: dictionary The dictionary of lalsimulation paramaters Returns ------- laldict: LalDict The lal type dictionary to pass to the lalsimulation waveform functions. """ lal_pars = lal.CreateDict() #nonGRparams can be straightforwardly added if needed, however they have to # be invoked one by one if p['phase_order']!=-1: lalsimulation.SimInspiralWaveformParamsInsertPNPhaseOrder(lal_pars,int(p['phase_order'])) if p['amplitude_order']!=-1: lalsimulation.SimInspiralWaveformParamsInsertPNAmplitudeOrder(lal_pars,int(p['amplitude_order'])) if p['spin_order']!=-1: lalsimulation.SimInspiralWaveformParamsInsertPNSpinOrder(lal_pars,int(p['spin_order'])) if p['tidal_order']!=-1: lalsimulation.SimInspiralWaveformParamsInsertPNTidalOrder(lal_pars, p['tidal_order']) if p['eccentricity_order']!=-1: lalsimulation.SimInspiralWaveformParamsInsertPNEccentricityOrder(lal_pars, p['eccentricity_order']) if p['lambda1'] is not None: lalsimulation.SimInspiralWaveformParamsInsertTidalLambda1(lal_pars, p['lambda1']) if p['lambda2'] is not None: lalsimulation.SimInspiralWaveformParamsInsertTidalLambda2(lal_pars, p['lambda2']) if p['lambda_octu1'] is not None: lalsimulation.SimInspiralWaveformParamsInsertTidalOctupolarLambda1(lal_pars, p['lambda_octu1']) if p['lambda_octu2'] is not None: lalsimulation.SimInspiralWaveformParamsInsertTidalOctupolarLambda2(lal_pars, p['lambda_octu2']) if p['quadfmode1'] is not None: lalsimulation.SimInspiralWaveformParamsInsertTidalQuadrupolarFMode1(lal_pars, p['quadfmode1']) if p['quadfmode2'] is not None: lalsimulation.SimInspiralWaveformParamsInsertTidalQuadrupolarFMode2(lal_pars, p['quadfmode2']) if p['octufmode1'] is not None: lalsimulation.SimInspiralWaveformParamsInsertTidalOctupolarFMode1(lal_pars, p['octufmode1']) if p['octufmode2'] is not None: lalsimulation.SimInspiralWaveformParamsInsertTidalOctupolarFMode2(lal_pars, p['octufmode2']) if p['dquad_mon1'] is not None: lalsimulation.SimInspiralWaveformParamsInsertdQuadMon1(lal_pars, p['dquad_mon1']) if p['dquad_mon2'] is not None: lalsimulation.SimInspiralWaveformParamsInsertdQuadMon2(lal_pars, p['dquad_mon2']) if p['numrel_data']: lalsimulation.SimInspiralWaveformParamsInsertNumRelData(lal_pars, str(p['numrel_data'])) if p['modes_choice']: lalsimulation.SimInspiralWaveformParamsInsertModesChoice(lal_pars, p['modes_choice']) if p['frame_axis']: lalsimulation.SimInspiralWaveformParamsInsertFrameAxis(lal_pars, p['frame_axis']) if p['side_bands']: lalsimulation.SimInspiralWaveformParamsInsertSideband(lal_pars, p['side_bands']) if p['mode_array'] is not None: ma = lalsimulation.SimInspiralCreateModeArray() for l,m in p['mode_array']: lalsimulation.SimInspiralModeArrayActivateMode(ma, l, m) lalsimulation.SimInspiralWaveformParamsInsertModeArray(lal_pars, ma) return lal_pars
python
{ "resource": "" }
q31457
_spintaylor_aligned_prec_swapper
train
def _spintaylor_aligned_prec_swapper(**p): """ SpinTaylorF2 is only single spin, it also struggles with anti-aligned spin waveforms. This construct chooses between the aligned-twospin TaylorF2 model and the precessing singlespin SpinTaylorF2 models. If aligned spins are given, use TaylorF2, if nonaligned spins are given use SpinTaylorF2. In the case of nonaligned doublespin systems the code will fail at the waveform generator level. """ orig_approximant = p['approximant'] if p['spin2x'] == 0 and p['spin2y'] == 0 and p['spin1x'] == 0 and \ p['spin1y'] == 0: p['approximant'] = 'TaylorF2' else: p['approximant'] = 'SpinTaylorF2' hp, hc = _lalsim_fd_waveform(**p) p['approximant'] = orig_approximant return hp, hc
python
{ "resource": "" }
q31458
get_obj_attrs
train
def get_obj_attrs(obj): """ Return a dictionary built from the attributes of the given object. """ pr = {} if obj is not None: if isinstance(obj, numpy.core.records.record): for name in obj.dtype.names: pr[name] = getattr(obj, name) elif hasattr(obj, '__dict__') and obj.__dict__: pr = obj.__dict__ elif hasattr(obj, '__slots__'): for slot in obj.__slots__: if hasattr(obj, slot): pr[slot] = getattr(obj, slot) elif isinstance(obj, dict): pr = obj.copy() else: for name in dir(obj): try: value = getattr(obj, name) if not name.startswith('__') and not inspect.ismethod(value): pr[name] = value except: continue return pr
python
{ "resource": "" }
q31459
get_fd_waveform_sequence
train
def get_fd_waveform_sequence(template=None, **kwds): """Return values of the waveform evaluated at the sequence of frequency points. Parameters ---------- template: object An object that has attached properties. This can be used to substitute for keyword arguments. A common example would be a row in an xml table. {params} Returns ------- hplustilde: Array The plus phase of the waveform in frequency domain evaluated at the frequency points. hcrosstilde: Array The cross phase of the waveform in frequency domain evaluated at the frequency points. """ kwds['delta_f'] = -1 kwds['f_lower'] = -1 p = props(template, required_args=fd_required_args, **kwds) lal_pars = _check_lal_pars(p) hp, hc = lalsimulation.SimInspiralChooseFDWaveformSequence(float(p['coa_phase']), float(pnutils.solar_mass_to_kg(p['mass1'])), float(pnutils.solar_mass_to_kg(p['mass2'])), float(p['spin1x']), float(p['spin1y']), float(p['spin1z']), float(p['spin2x']), float(p['spin2y']), float(p['spin2z']), float(p['f_ref']), pnutils.megaparsecs_to_meters(float(p['distance'])), float(p['inclination']), lal_pars, _lalsim_enum[p['approximant']], p['sample_points'].lal()) return Array(hp.data.data), Array(hc.data.data)
python
{ "resource": "" }
q31460
get_td_waveform
train
def get_td_waveform(template=None, **kwargs): """Return the plus and cross polarizations of a time domain waveform. Parameters ---------- template: object An object that has attached properties. This can be used to subsitute for keyword arguments. A common example would be a row in an xml table. {params} Returns ------- hplus: TimeSeries The plus polarization of the waveform. hcross: TimeSeries The cross polarization of the waveform. """ input_params = props(template, required_args=td_required_args, **kwargs) wav_gen = td_wav[type(_scheme.mgr.state)] if input_params['approximant'] not in wav_gen: raise ValueError("Approximant %s not available" % (input_params['approximant'])) return wav_gen[input_params['approximant']](**input_params)
python
{ "resource": "" }
q31461
get_fd_waveform
train
def get_fd_waveform(template=None, **kwargs): """Return a frequency domain gravitational waveform. Parameters ---------- template: object An object that has attached properties. This can be used to substitute for keyword arguments. A common example would be a row in an xml table. {params} Returns ------- hplustilde: FrequencySeries The plus phase of the waveform in frequency domain. hcrosstilde: FrequencySeries The cross phase of the waveform in frequency domain. """ input_params = props(template, required_args=fd_required_args, **kwargs) wav_gen = fd_wav[type(_scheme.mgr.state)] if input_params['approximant'] not in wav_gen: raise ValueError("Approximant %s not available" % (input_params['approximant'])) try: ffunc = input_params.pop('f_final_func') if ffunc != '': # convert the frequency function to a value input_params['f_final'] = pnutils.named_frequency_cutoffs[ffunc]( input_params) # if the f_final is < f_lower, raise a NoWaveformError if 'f_final' in input_params and \ (input_params['f_lower']+input_params['delta_f'] >= input_params['f_final']): raise NoWaveformError("cannot generate waveform: f_lower >= f_final") except KeyError: pass return wav_gen[input_params['approximant']](**input_params)
python
{ "resource": "" }
q31462
get_interpolated_fd_waveform
train
def get_interpolated_fd_waveform(dtype=numpy.complex64, return_hc=True, **params): """ Return a fourier domain waveform approximant, using interpolation """ def rulog2(val): return 2.0 ** numpy.ceil(numpy.log2(float(val))) orig_approx = params['approximant'] params['approximant'] = params['approximant'].replace('_INTERP', '') df = params['delta_f'] if 'duration' not in params: duration = get_waveform_filter_length_in_time(**params) elif params['duration'] > 0: duration = params['duration'] else: err_msg = "Waveform duration must be greater than 0." raise ValueError(err_msg) #FIXME We should try to get this length directly somehow # I think this number should be conservative ringdown_padding = 0.5 df_min = 1.0 / rulog2(duration + ringdown_padding) # FIXME: I don't understand this, but waveforms with df_min < 0.5 will chop # off the inspiral when using ringdown_padding - 0.5. # Also, if ringdown_padding is set to a very small # value we can see cases where the ringdown is chopped. if df_min > 0.5: df_min = 0.5 params['delta_f'] = df_min hp, hc = get_fd_waveform(**params) hp = hp.astype(dtype) if return_hc: hc = hc.astype(dtype) else: hc = None f_end = get_waveform_end_frequency(**params) if f_end is None: f_end = (len(hp) - 1) * hp.delta_f if 'f_final' in params and params['f_final'] > 0: f_end_params = params['f_final'] if f_end is not None: f_end = min(f_end_params, f_end) n_min = int(rulog2(f_end / df_min)) + 1 if n_min < len(hp): hp = hp[:n_min] if hc is not None: hc = hc[:n_min] offset = int(ringdown_padding * (len(hp)-1)*2 * hp.delta_f) hp = interpolate_complex_frequency(hp, df, zeros_offset=offset, side='left') if hc is not None: hc = interpolate_complex_frequency(hc, df, zeros_offset=offset, side='left') params['approximant'] = orig_approx return hp, hc
python
{ "resource": "" }
q31463
get_sgburst_waveform
train
def get_sgburst_waveform(template=None, **kwargs): """Return the plus and cross polarizations of a time domain sine-Gaussian burst waveform. Parameters ---------- template: object An object that has attached properties. This can be used to subsitute for keyword arguments. A common example would be a row in an xml table. approximant : string A string that indicates the chosen approximant. See `td_approximants` for available options. q : float The quality factor of a sine-Gaussian burst frequency : float The centre-frequency of a sine-Gaussian burst delta_t : float The time step used to generate the waveform hrss : float The strain rss amplitude: float The strain amplitude Returns ------- hplus: TimeSeries The plus polarization of the waveform. hcross: TimeSeries The cross polarization of the waveform. """ input_params = props_sgburst(template,**kwargs) for arg in sgburst_required_args: if arg not in input_params: raise ValueError("Please provide " + str(arg)) return _lalsim_sgburst_waveform(**input_params)
python
{ "resource": "" }
q31464
get_imr_length
train
def get_imr_length(approx, **kwds): """Call through to pnutils to obtain IMR waveform durations """ m1 = float(kwds['mass1']) m2 = float(kwds['mass2']) s1z = float(kwds['spin1z']) s2z = float(kwds['spin2z']) f_low = float(kwds['f_lower']) # 10% margin of error is incorporated in the pnutils function return pnutils.get_imr_duration(m1, m2, s1z, s2z, f_low, approximant=approx)
python
{ "resource": "" }
q31465
get_waveform_filter
train
def get_waveform_filter(out, template=None, **kwargs): """Return a frequency domain waveform filter for the specified approximant """ n = len(out) input_params = props(template, **kwargs) if input_params['approximant'] in filter_approximants(_scheme.mgr.state): wav_gen = filter_wav[type(_scheme.mgr.state)] htilde = wav_gen[input_params['approximant']](out=out, **input_params) htilde.resize(n) htilde.chirp_length = get_waveform_filter_length_in_time(**input_params) htilde.length_in_time = htilde.chirp_length return htilde if input_params['approximant'] in fd_approximants(_scheme.mgr.state): wav_gen = fd_wav[type(_scheme.mgr.state)] duration = get_waveform_filter_length_in_time(**input_params) hp, _ = wav_gen[input_params['approximant']](duration=duration, return_hc=False, **input_params) hp.resize(n) out[0:len(hp)] = hp[:] hp = FrequencySeries(out, delta_f=hp.delta_f, copy=False) hp.length_in_time = hp.chirp_length = duration return hp elif input_params['approximant'] in td_approximants(_scheme.mgr.state): wav_gen = td_wav[type(_scheme.mgr.state)] hp, _ = wav_gen[input_params['approximant']](**input_params) # taper the time series hp if required if 'taper' in input_params.keys() and \ input_params['taper'] is not None: hp = wfutils.taper_timeseries(hp, input_params['taper'], return_lal=False) return td_waveform_to_fd_waveform(hp, out=out) else: raise ValueError("Approximant %s not available" % (input_params['approximant']))
python
{ "resource": "" }
q31466
get_two_pol_waveform_filter
train
def get_two_pol_waveform_filter(outplus, outcross, template, **kwargs): """Return a frequency domain waveform filter for the specified approximant. Unlike get_waveform_filter this function returns both h_plus and h_cross components of the waveform, which are needed for searches where h_plus and h_cross are not related by a simple phase shift. """ n = len(outplus) # If we don't have an inclination column alpha3 might be used if not hasattr(template, 'inclination') and 'inclination' not in kwargs: if hasattr(template, 'alpha3'): kwargs['inclination'] = template.alpha3 input_params = props(template, **kwargs) if input_params['approximant'] in fd_approximants(_scheme.mgr.state): wav_gen = fd_wav[type(_scheme.mgr.state)] hp, hc = wav_gen[input_params['approximant']](**input_params) hp.resize(n) hc.resize(n) outplus[0:len(hp)] = hp[:] hp = FrequencySeries(outplus, delta_f=hp.delta_f, copy=False) outcross[0:len(hc)] = hc[:] hc = FrequencySeries(outcross, delta_f=hc.delta_f, copy=False) hp.chirp_length = get_waveform_filter_length_in_time(**input_params) hp.length_in_time = hp.chirp_length hc.chirp_length = hp.chirp_length hc.length_in_time = hp.length_in_time return hp, hc elif input_params['approximant'] in td_approximants(_scheme.mgr.state): # N: number of time samples required N = (n-1)*2 delta_f = 1.0 / (N * input_params['delta_t']) wav_gen = td_wav[type(_scheme.mgr.state)] hp, hc = wav_gen[input_params['approximant']](**input_params) # taper the time series hp if required if 'taper' in input_params.keys() and \ input_params['taper'] is not None: hp = wfutils.taper_timeseries(hp, input_params['taper'], return_lal=False) hc = wfutils.taper_timeseries(hc, input_params['taper'], return_lal=False) # total duration of the waveform tmplt_length = len(hp) * hp.delta_t # for IMR templates the zero of time is at max amplitude (merger) # thus the start time is minus the duration of the template from # lower frequency cutoff to merger, i.e. minus the 'chirp time' tChirp = - float( hp.start_time ) # conversion from LIGOTimeGPS hp.resize(N) hc.resize(N) k_zero = int(hp.start_time / hp.delta_t) hp.roll(k_zero) hc.roll(k_zero) hp_tilde = FrequencySeries(outplus, delta_f=delta_f, copy=False) hc_tilde = FrequencySeries(outcross, delta_f=delta_f, copy=False) fft(hp.astype(real_same_precision_as(hp_tilde)), hp_tilde) fft(hc.astype(real_same_precision_as(hc_tilde)), hc_tilde) hp_tilde.length_in_time = tmplt_length hp_tilde.chirp_length = tChirp hc_tilde.length_in_time = tmplt_length hc_tilde.chirp_length = tChirp return hp_tilde, hc_tilde else: raise ValueError("Approximant %s not available" % (input_params['approximant']))
python
{ "resource": "" }
q31467
get_template_amplitude_norm
train
def get_template_amplitude_norm(template=None, **kwargs): """ Return additional constant template normalization. This only affects the effective distance calculation. Returns None for all templates with a physically meaningful amplitude. """ input_params = props(template,**kwargs) approximant = kwargs['approximant'] if approximant in _template_amplitude_norms: return _template_amplitude_norms[approximant](**input_params) else: return None
python
{ "resource": "" }
q31468
get_waveform_filter_precondition
train
def get_waveform_filter_precondition(approximant, length, delta_f): """Return the data preconditioning factor for this approximant. """ if approximant in _filter_preconditions: return _filter_preconditions[approximant](length, delta_f) else: return None
python
{ "resource": "" }
q31469
get_waveform_filter_norm
train
def get_waveform_filter_norm(approximant, psd, length, delta_f, f_lower): """ Return the normalization vector for the approximant """ if approximant in _filter_norms: return _filter_norms[approximant](psd, length, delta_f, f_lower) else: return None
python
{ "resource": "" }
q31470
get_waveform_end_frequency
train
def get_waveform_end_frequency(template=None, **kwargs): """Return the stop frequency of a template """ input_params = props(template,**kwargs) approximant = kwargs['approximant'] if approximant in _filter_ends: return _filter_ends[approximant](**input_params) else: return None
python
{ "resource": "" }
q31471
get_waveform_filter_length_in_time
train
def get_waveform_filter_length_in_time(approximant, template=None, **kwargs): """For filter templates, return the length in time of the template. """ kwargs = props(template, **kwargs) if approximant in _filter_time_lengths: return _filter_time_lengths[approximant](**kwargs) else: return None
python
{ "resource": "" }
q31472
MarginalizedGaussianNoise._extra_stats
train
def _extra_stats(self): """Adds ``loglr``, ``optimal_snrsq`` and matched filter snrsq in each detector to the default stats.""" return ['loglr'] + \ ['{}_optimal_snrsq'.format(det) for det in self._data] + \ ['{}_matchedfilter_snrsq'.format(det) for det in self._data]
python
{ "resource": "" }
q31473
MarginalizedGaussianNoise._margtime_mfsnr
train
def _margtime_mfsnr(template, data): """Returns a time series for the matched filter SNR assuming that the template and data have both been normalised and whitened. """ snr = matched_filter_core(template, data, h_norm=1, psd=None) hd_i = snr[0].numpy().real return hd_i
python
{ "resource": "" }
q31474
MarginalizedGaussianNoise._margtimedist_loglr
train
def _margtimedist_loglr(self, mf_snr, opt_snr): """Returns the log likelihood ratio marginalized over time and distance. """ logl = special.logsumexp(mf_snr, b=self._deltat) logl_marg = logl/self._dist_array opt_snr_marg = opt_snr/self._dist_array**2 return special.logsumexp(logl_marg - 0.5*opt_snr_marg, b=self._deltad*self.dist_prior)
python
{ "resource": "" }
q31475
MarginalizedGaussianNoise._margtimephase_loglr
train
def _margtimephase_loglr(self, mf_snr, opt_snr): """Returns the log likelihood ratio marginalized over time and phase. """ return special.logsumexp(numpy.log(special.i0(mf_snr)), b=self._deltat) - 0.5*opt_snr
python
{ "resource": "" }
q31476
MarginalizedGaussianNoise._margdistphase_loglr
train
def _margdistphase_loglr(self, mf_snr, opt_snr): """Returns the log likelihood ratio marginalized over distance and phase. """ logl = numpy.log(special.i0(mf_snr)) logl_marg = logl/self._dist_array opt_snr_marg = opt_snr/self._dist_array**2 return special.logsumexp(logl_marg - 0.5*opt_snr_marg, b=self._deltad*self.dist_prior)
python
{ "resource": "" }
q31477
MarginalizedGaussianNoise._margdist_loglr
train
def _margdist_loglr(self, mf_snr, opt_snr): """Returns the log likelihood ratio marginalized over distance. """ mf_snr_marg = mf_snr/self._dist_array opt_snr_marg = opt_snr/self._dist_array**2 return special.logsumexp(mf_snr_marg - 0.5*opt_snr_marg, b=self._deltad*self.dist_prior)
python
{ "resource": "" }
q31478
MarginalizedGaussianNoise._margtime_loglr
train
def _margtime_loglr(self, mf_snr, opt_snr): """Returns the log likelihood ratio marginalized over time. """ return special.logsumexp(mf_snr, b=self._deltat) - 0.5*opt_snr
python
{ "resource": "" }
q31479
add_workflow_command_line_group
train
def add_workflow_command_line_group(parser): """ The standard way of initializing a ConfigParser object in workflow will be to do it from the command line. This is done by giving a --local-config-files filea.ini fileb.ini filec.ini command. You can also set config file override commands on the command line. This will be most useful when setting (for example) start and end times, or active ifos. This is done by --config-overrides section1:option1:value1 section2:option2:value2 ... This can also be given as --config-overrides section1:option1 where the value will be left as ''. To remove a configuration option, use the command line argument --config-delete section1:option1 which will delete option1 from [section1] or --config-delete section1 to delete all of the options in [section1] Deletes are implemented before overrides. This function returns an argparse OptionGroup to ensure these options are parsed correctly and can then be sent directly to initialize an WorkflowConfigParser. Parameters ----------- parser : argparse.ArgumentParser instance The initialized argparse instance to add the workflow option group to. """ workflowArgs = parser.add_argument_group('Configuration', 'Options needed for parsing ' 'config file(s).') workflowArgs.add_argument("--config-files", nargs="+", action='store', metavar="CONFIGFILE", help="List of config files to be used in " "analysis.") workflowArgs.add_argument("--config-overrides", nargs="*", action='store', metavar="SECTION:OPTION:VALUE", help="List of section,option,value combinations to " "add into the configuration file. Normally the gps " "start and end times might be provided this way, " "and user specific locations (ie. output directories). " "This can also be provided as SECTION:OPTION or " "SECTION:OPTION: both of which indicate that the " "corresponding value is left blank.") workflowArgs.add_argument("--config-delete", nargs="*", action='store', metavar="SECTION:OPTION", help="List of section,option combinations to delete " "from the configuration file. This can also be " "provided as SECTION which deletes the enture section" " from the configuration file or SECTION:OPTION " "which deletes a specific option from a given " "section.")
python
{ "resource": "" }
q31480
WorkflowConfigParser.perform_exe_expansion
train
def perform_exe_expansion(self): """ This function will look through the executables section of the ConfigParser object and replace any values using macros with full paths. For any values that look like ${which:lalapps_tmpltbank} will be replaced with the equivalent of which(lalapps_tmpltbank) Otherwise values will be unchanged. """ # Only works on executables section if self.has_section('executables'): for option, value in self.items('executables'): # Check the value newStr = self.interpolate_exe(value) if newStr != value: self.set('executables', option, newStr)
python
{ "resource": "" }
q31481
WorkflowConfigParser.interpolate_exe
train
def interpolate_exe(self, testString): """ Replace testString with a path to an executable based on the format. If this looks like ${which:lalapps_tmpltbank} it will return the equivalent of which(lalapps_tmpltbank) Otherwise it will return an unchanged string. Parameters ----------- testString : string The input string Returns -------- newString : string The output string. """ # First check if any interpolation is needed and abort if not testString = testString.strip() if not (testString.startswith('${') and testString.endswith('}')): return testString # This may not be an exe interpolation, so even if it has ${XXX} form # I may not have to do anything newString = testString # Strip the ${ and } testString = testString[2:-1] testList = testString.split(':') # Maybe we can add a few different possibilities for substitution if len(testList) == 2: if testList[0] == 'which': newString = distutils.spawn.find_executable(testList[1]) if not newString: errmsg = "Cannot find exe %s in your path " %(testList[1]) errmsg += "and you specified ${which:%s}." %(testList[1]) raise ValueError(errmsg) return newString
python
{ "resource": "" }
q31482
WorkflowConfigParser.get_subsections
train
def get_subsections(self, section_name): """ Return a list of subsections for the given section name """ # Keep only subsection names subsections = [sec[len(section_name)+1:] for sec in self.sections()\ if sec.startswith(section_name + '-')] for sec in subsections: sp = sec.split('-') # This is unusual, but a format [section-subsection-tag] is okay. Just # check that [section-subsection] section exists. If not it is possible # the user is trying to use an subsection name with '-' in it if (len(sp) > 1) and not self.has_section('%s-%s' % (section_name, sp[0])): raise ValueError( "Workflow uses the '-' as a delimiter so " "this is interpreted as section-subsection-tag. " "While checking section %s, no section with " "name %s-%s was found. " "If you did not intend to use tags in an " "'advanced user' manner, or do not understand what " "this means, don't use dashes in section " "names. So [injection-nsbhinj] is good. " "[injection-nsbh-inj] is not." % (sec, sp[0], sp[1])) if len(subsections) > 0: return [sec.split('-')[0] for sec in subsections] elif self.has_section(section_name): return [''] else: return []
python
{ "resource": "" }
q31483
WorkflowConfigParser.interpolate_string
train
def interpolate_string(self, testString, section): """ Take a string and replace all example of ExtendedInterpolation formatting within the string with the exact value. For values like ${example} this is replaced with the value that corresponds to the option called example ***in the same section*** For values like ${common|example} this is replaced with the value that corresponds to the option example in the section [common]. Note that in the python3 config parser this is ${common:example} but python2.7 interprets the : the same as a = and this breaks things Nested interpolation is not supported here. Parameters ---------- testString : String The string to parse and interpolate section : String The current section of the ConfigParser object Returns ---------- testString : String Interpolated string """ # First check if any interpolation is needed and abort if not reObj = re.search(r"\$\{.*?\}", testString) while reObj: # Not really sure how this works, but this will obtain the first # instance of a string contained within ${....} repString = (reObj).group(0)[2:-1] # Need to test which of the two formats we have splitString = repString.split('|') if len(splitString) == 1: try: testString = testString.replace('${'+repString+'}',\ self.get(section,splitString[0])) except ConfigParser.NoOptionError: print("Substitution failed") raise if len(splitString) == 2: try: testString = testString.replace('${'+repString+'}',\ self.get(splitString[0],splitString[1])) except ConfigParser.NoOptionError: print("Substitution failed") raise reObj = re.search(r"\$\{.*?\}", testString) return testString
python
{ "resource": "" }
q31484
WorkflowConfigParser.add_options_to_section
train
def add_options_to_section(self ,section, items, overwrite_options=False): """ Add a set of options and values to a section of a ConfigParser object. Will throw an error if any of the options being added already exist, this behaviour can be overridden if desired Parameters ---------- section : string The name of the section to add options+values to items : list of tuples Each tuple contains (at [0]) the option and (at [1]) the value to add to the section of the ini file overwrite_options : Boolean, optional By default this function will throw a ValueError if an option exists in both the original section in the ConfigParser *and* in the provided items. This will override so that the options+values given in items will replace the original values if the value is set to True. Default = True """ # Sanity checking if not self.has_section(section): raise ValueError('Section %s not present in ConfigParser.' \ %(section,)) # Check for duplicate options first for option,value in items: if not overwrite_options: if option in self.options(section): raise ValueError('Option exists in both original ' + \ 'ConfigParser section [%s] and ' %(section,) + \ 'input list: %s' %(option,)) self.set(section,option,value)
python
{ "resource": "" }
q31485
WorkflowConfigParser.check_duplicate_options
train
def check_duplicate_options(self, section1, section2, raise_error=False): """ Check for duplicate options in two sections, section1 and section2. Will return a list of the duplicate options. Parameters ---------- section1 : string The name of the first section to compare section2 : string The name of the second section to compare raise_error : Boolean, optional (default=False) If True, raise an error if duplicates are present. Returns ---------- duplicates : List List of duplicate options """ # Sanity checking if not self.has_section(section1): raise ValueError('Section %s not present in ConfigParser.'\ %(section1,) ) if not self.has_section(section2): raise ValueError('Section %s not present in ConfigParser.'\ %(section2,) ) items1 = self.options(section1) items2 = self.options(section2) # The list comprehension here creates a list of all duplicate items duplicates = [x for x in items1 if x in items2] if duplicates and raise_error: raise ValueError('The following options appear in both section ' +\ '%s and %s: %s' \ %(section1,section2,' '.join(duplicates))) return duplicates
python
{ "resource": "" }
q31486
WorkflowConfigParser.add_config_opts_to_parser
train
def add_config_opts_to_parser(parser): """Adds options for configuration files to the given parser.""" parser.add_argument("--config-files", type=str, nargs="+", required=True, help="A file parsable by " "pycbc.workflow.WorkflowConfigParser.") parser.add_argument("--config-overrides", type=str, nargs="+", default=None, metavar="SECTION:OPTION:VALUE", help="List of section:option:value combinations " "to add into the configuration file.")
python
{ "resource": "" }
q31487
WorkflowConfigParser.from_cli
train
def from_cli(cls, opts): """Loads a config file from the given options, with overrides and deletes applied. """ # read configuration file logging.info("Reading configuration file") if opts.config_overrides is not None: overrides = [override.split(":") for override in opts.config_overrides] else: overrides = None if opts.config_delete is not None: deletes = [delete.split(":") for delete in opts.config_delete] else: deletes = None return cls(opts.config_files, overrides, deleteTuples=deletes)
python
{ "resource": "" }
q31488
fft
train
def fft(invec, outvec): """ Fourier transform from invec to outvec. Perform a fourier transform. The type of transform is determined by the dtype of invec and outvec. Parameters ---------- invec : TimeSeries or FrequencySeries The input vector. outvec : TimeSeries or FrequencySeries The output. """ prec, itype, otype = _check_fft_args(invec, outvec) _check_fwd_args(invec, itype, outvec, otype, 1, None) # The following line is where all the work is done: backend = get_backend() backend.fft(invec, outvec, prec, itype, otype) # For a forward FFT, the length of the *input* vector is the length # we should divide by, whether C2C or R2HC transform if isinstance(invec, _TimeSeries): outvec._epoch = invec._epoch outvec._delta_f = 1.0/(invec._delta_t * len(invec)) outvec *= invec._delta_t elif isinstance(invec, _FrequencySeries): outvec._epoch = invec._epoch outvec._delta_t = 1.0/(invec._delta_f * len(invec)) outvec *= invec._delta_f
python
{ "resource": "" }
q31489
ifft
train
def ifft(invec, outvec): """ Inverse fourier transform from invec to outvec. Perform an inverse fourier transform. The type of transform is determined by the dtype of invec and outvec. Parameters ---------- invec : TimeSeries or FrequencySeries The input vector. outvec : TimeSeries or FrequencySeries The output. """ prec, itype, otype = _check_fft_args(invec, outvec) _check_inv_args(invec, itype, outvec, otype, 1, None) # The following line is where all the work is done: backend = get_backend() backend.ifft(invec, outvec, prec, itype, otype) # For an inverse FFT, the length of the *output* vector is the length # we should divide by, whether C2C or HC2R transform if isinstance(invec, _TimeSeries): outvec._epoch = invec._epoch outvec._delta_f = 1.0/(invec._delta_t * len(outvec)) outvec *= invec._delta_t elif isinstance(invec,_FrequencySeries): outvec._epoch = invec._epoch outvec._delta_t = 1.0/(invec._delta_f * len(outvec)) outvec *= invec._delta_f
python
{ "resource": "" }
q31490
render_workflow_html_template
train
def render_workflow_html_template(filename, subtemplate, filelists, **kwargs): """ Writes a template given inputs from the workflow generator. Takes a list of tuples. Each tuple is a pycbc File object. Also the name of the subtemplate to render and the filename of the output. """ dirnam = os.path.dirname(filename) makedir(dirnam) try: filenames = [f.name for filelist in filelists for f in filelist if f is not None] except TypeError: filenames = [] # render subtemplate subtemplate_dir = pycbc.results.__path__[0] + '/templates/wells' env = Environment(loader=FileSystemLoader(subtemplate_dir)) env.globals.update(get_embedded_config=get_embedded_config) env.globals.update(path_exists=os.path.exists) env.globals.update(len=len) subtemplate = env.get_template(subtemplate) context = {'filelists' : filelists, 'dir' : dirnam} context.update(kwargs) output = subtemplate.render(context) # save as html page kwds = {'render-function' : 'render_tmplt', 'filenames' : ','.join(filenames)} save_html_with_metadata(str(output), filename, None, kwds)
python
{ "resource": "" }
q31491
get_embedded_config
train
def get_embedded_config(filename): """ Attempt to load config data attached to file """ def check_option(self, section, name): return (self.has_section(section) and (self.has_option(section, name) or (name in self.defaults()))) try: cp = pycbc.results.load_metadata_from_file(filename) except TypeError: cp = ConfigParser() cp.check_option = types.MethodType(check_option, cp) return cp
python
{ "resource": "" }
q31492
setup_template_render
train
def setup_template_render(path, config_path): """ This function is the gateway for rendering a template for a file. """ # initialization cp = get_embedded_config(path) output = '' filename = os.path.basename(path) # use meta-data if not empty for rendering if cp.has_option(filename, 'render-function'): render_function_name = cp.get(filename, 'render-function') render_function = eval(render_function_name) output = render_function(path, cp) # read configuration file for rendering elif os.path.exists(config_path): cp.read(config_path) # render template if cp.has_option(filename, 'render-function'): render_function_name = cp.get(filename, 'render-function') render_function = eval(render_function_name) output = render_function(path, cp) else: output = render_default(path, cp) # if no configuration file is present # then render the default template else: output = render_default(path, cp) return output
python
{ "resource": "" }
q31493
render_default
train
def render_default(path, cp): """ This is the default function that will render a template to a string of HTML. The string will be for a drop-down tab that contains a link to the file. If the file extension requires information to be read, then that is passed to the content variable (eg. a segmentlistdict). """ # define filename and slug from path filename = os.path.basename(path) slug = filename.replace('.', '_') # initializations content = None if path.endswith('.xml') or path.endswith('.xml.gz'): # segment or veto file return a segmentslistdict instance try: wf_file = SegFile.from_segment_xml(path) # FIXME: This is a dictionary, but the code wants a segmentlist # for now I just coalesce. wf_file.return_union_seglist() except Exception as e: print('No segment table found in %s : %s' % (path, e)) # render template template_dir = pycbc.results.__path__[0] + '/templates/files' env = Environment(loader=FileSystemLoader(template_dir)) env.globals.update(abs=abs) env.globals.update(open=open) env.globals.update(path_exists=os.path.exists) template = env.get_template('file_default.html') context = {'path' : path, 'filename' : filename, 'slug' : slug, 'cp' : cp, 'content' : content} output = template.render(context) return output
python
{ "resource": "" }
q31494
render_glitchgram
train
def render_glitchgram(path, cp): """ Render a glitchgram file template. """ # define filename and slug from path filename = os.path.basename(path) slug = filename.replace('.', '_') # render template template_dir = pycbc.results.__path__[0] + '/templates/files' env = Environment(loader=FileSystemLoader(template_dir)) env.globals.update(abs=abs) template = env.get_template(cp.get(filename, 'template')) context = {'filename' : filename, 'slug' : slug, 'cp' : cp} output = template.render(context) return output
python
{ "resource": "" }
q31495
get_available_detectors
train
def get_available_detectors(): """Return list of detectors known in the currently sourced lalsuite. This function will query lalsuite about which detectors are known to lalsuite. Detectors are identified by a two character string e.g. 'K1', but also by a longer, and clearer name, e.g. KAGRA. This function returns both. As LAL doesn't really expose this functionality we have to make some assumptions about how this information is stored in LAL. Therefore while we hope this function will work correctly, it's possible it will need updating in the future. Better if lal would expose this information properly. """ ld = lal.__dict__ known_lal_names = [j for j in ld.keys() if "DETECTOR_PREFIX" in j] known_prefixes = [ld[k] for k in known_lal_names] known_names = [ld[k.replace('PREFIX', 'NAME')] for k in known_lal_names] return zip(known_prefixes, known_names)
python
{ "resource": "" }
q31496
Detector.light_travel_time_to_detector
train
def light_travel_time_to_detector(self, det): """ Return the light travel time from this detector Parameters ---------- det: Detector The other detector to determine the light travel time to. Returns ------- time: float The light travel time in seconds """ d = self.location - det.location return float(d.dot(d)**0.5 / constants.c.value)
python
{ "resource": "" }
q31497
Detector.antenna_pattern
train
def antenna_pattern(self, right_ascension, declination, polarization, t_gps): """Return the detector response. Parameters ---------- right_ascension: float or numpy.ndarray The right ascension of the source declination: float or numpy.ndarray The declination of the source polarization: float or numpy.ndarray The polarization angle of the source Returns ------- fplus: float or numpy.ndarray The plus polarization factor for this sky location / orientation fcross: float or numpy.ndarray The cross polarization factor for this sky location / orientation """ gha = self.gmst_estimate(t_gps) - right_ascension cosgha = cos(gha) singha = sin(gha) cosdec = cos(declination) sindec = sin(declination) cospsi = cos(polarization) sinpsi = sin(polarization) x0 = -cospsi * singha - sinpsi * cosgha * sindec x1 = -cospsi * cosgha + sinpsi * singha * sindec x2 = sinpsi * cosdec x = np.array([x0, x1, x2]) dx = self.response.dot(x) y0 = sinpsi * singha - cospsi * cosgha * sindec y1 = sinpsi * cosgha + cospsi * singha * sindec y2 = cospsi * cosdec y = np.array([y0, y1, y2]) dy = self.response.dot(y) if hasattr(dx, 'shape'): fplus = (x * dx - y * dy).sum(axis=0) fcross = (x * dy + y * dx).sum(axis=0) else: fplus = (x * dx - y * dy).sum() fcross = (x * dy + y * dx).sum() return fplus, fcross
python
{ "resource": "" }
q31498
Detector.time_delay_from_earth_center
train
def time_delay_from_earth_center(self, right_ascension, declination, t_gps): """Return the time delay from the earth center """ return self.time_delay_from_location(np.array([0, 0, 0]), right_ascension, declination, t_gps)
python
{ "resource": "" }
q31499
Detector.time_delay_from_location
train
def time_delay_from_location(self, other_location, right_ascension, declination, t_gps): """Return the time delay from the given location to detector for a signal with the given sky location In other words return `t1 - t2` where `t1` is the arrival time in this detector and `t2` is the arrival time in the other location. Parameters ---------- other_location : numpy.ndarray of coordinates A detector instance. right_ascension : float The right ascension (in rad) of the signal. declination : float The declination (in rad) of the signal. t_gps : float The GPS time (in s) of the signal. Returns ------- float The arrival time difference between the detectors. """ ra_angle = self.gmst_estimate(t_gps) - right_ascension cosd = cos(declination) e0 = cosd * cos(ra_angle) e1 = cosd * -sin(ra_angle) e2 = sin(declination) ehat = np.array([e0, e1, e2]) dx = other_location - self.location return dx.dot(ehat) / constants.c.value
python
{ "resource": "" }