sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def load(self): """ Load the object defined by the plugin entry point """ print("[DEBUG] Loading plugin {} from {}".format(self.name, self.source)) import pydoc path, attr = self.source.split(":") module = pydoc.locate(path) return getattr(module, attr)
Load the object defined by the plugin entry point
entailment
def _default_plugins(self): """ Get entry points to load any plugins installed. The build process should create an "entry_points.json" file with all of the data from the installed entry points. """ plugins = {} try: with open('entry_points.json') as f: entry_points = json.load(f) for ep, obj in entry_points.items(): plugins[ep] = [] for name, src in obj.items(): plugins[ep].append(Plugin(name=name, source=src)) except Exception as e: print("Failed to load entry points {}".format(e)) return plugins
Get entry points to load any plugins installed. The build process should create an "entry_points.json" file with all of the data from the installed entry points.
entailment
def start(self): """ Start the application's main event loop using either twisted or tornado. """ #: Schedule a load view if given and remote debugging is not active #: the remote debugging init call this after dev connection is ready if self.load_view and self.dev != "remote": self.deferred_call(self.load_view, self) self.loop.start()
Start the application's main event loop using either twisted or tornado.
entailment
def timed_call(self, ms, callback, *args, **kwargs): """ Invoke a callable on the main event loop thread at a specified time in the future. Parameters ---------- ms : int The time to delay, in milliseconds, before executing the callable. callback : callable The callable object to execute at some point in the future. *args, **kwargs Any additional positional and keyword arguments to pass to the callback. """ return self.loop.timed_call(ms, callback, *args, **kwargs)
Invoke a callable on the main event loop thread at a specified time in the future. Parameters ---------- ms : int The time to delay, in milliseconds, before executing the callable. callback : callable The callable object to execute at some point in the future. *args, **kwargs Any additional positional and keyword arguments to pass to the callback.
entailment
def add_done_callback(self, future, callback): """ Add a callback on a future object put here so it can be implemented with different event loops. Parameters ----------- future: Future or Deferred Future implementation for the current EventLoop callback: callable Callback to invoke when the future is done """ if future is None: raise bridge.BridgeReferenceError( "Tried to add a callback to a nonexistent Future. " "Make sure you pass the `returns` argument to your JavaMethod") return self.loop.add_done_callback(future, callback)
Add a callback on a future object put here so it can be implemented with different event loops. Parameters ----------- future: Future or Deferred Future implementation for the current EventLoop callback: callable Callback to invoke when the future is done
entailment
def get_view(self): """ Get the root view to display. Make sure it is properly initialized. """ view = self.view if not view.is_initialized: view.initialize() if not view.proxy_is_active: view.activate_proxy() return view.proxy.widget
Get the root view to display. Make sure it is properly initialized.
entailment
def send_event(self, name, *args, **kwargs): """ Send an event to the native handler. This call is queued and batched. Parameters ---------- name : str The event name to be processed by MainActivity.processMessages. *args: args The arguments required by the event. **kwargs: kwargs Options for sending. These are: now: boolean Send the event now """ n = len(self._bridge_queue) # Add to queue self._bridge_queue.append((name, args)) if n == 0: # First event, send at next available time self._bridge_last_scheduled = time() self.deferred_call(self._bridge_send) return elif kwargs.get('now'): self._bridge_send(now=True) return # If it's been over 5 ms since we last scheduled, run now dt = time() - self._bridge_last_scheduled if dt > self._bridge_max_delay: self._bridge_send(now=True)
Send an event to the native handler. This call is queued and batched. Parameters ---------- name : str The event name to be processed by MainActivity.processMessages. *args: args The arguments required by the event. **kwargs: kwargs Options for sending. These are: now: boolean Send the event now
entailment
def _bridge_send(self, now=False): """ Send the events over the bridge to be processed by the native handler. Parameters ---------- now: boolean Send all pending events now instead of waiting for deferred calls to finish. Use this when you want to update the screen """ if len(self._bridge_queue): if self.debug: print("======== Py --> Native ======") for event in self._bridge_queue: print(event) print("===========================") self.dispatch_events(bridge.dumps(self._bridge_queue)) self._bridge_queue = []
Send the events over the bridge to be processed by the native handler. Parameters ---------- now: boolean Send all pending events now instead of waiting for deferred calls to finish. Use this when you want to update the screen
entailment
def process_events(self, data): """ The native implementation must use this call to """ events = bridge.loads(data) if self.debug: print("======== Py <-- Native ======") for event in events: print(event) print("===========================") for event in events: if event[0] == 'event': self.handle_event(event)
The native implementation must use this call to
entailment
def handle_event(self, event): """ When we get an 'event' type from the bridge handle it by invoking the handler and if needed sending back the result. """ result_id, ptr, method, args = event[1] obj = None result = None try: obj, handler = bridge.get_handler(ptr, method) result = handler(*[v for t, v in args]) except bridge.BridgeReferenceError as e: #: Log the event, don't blow up here msg = "Error processing event: {} - {}".format( event, e).encode("utf-8") print(msg) self.show_error(msg) except: #: Log the event, blow up in user's face msg = "Error processing event: {} - {}".format( event, traceback.format_exc()).encode("utf-8") print(msg) self.show_error(msg) raise finally: if result_id: if hasattr(obj, '__nativeclass__'): sig = getattr(type(obj), method).__returns__ else: sig = type(result).__name__ self.send_event( bridge.Command.RESULT, #: method result_id, bridge.msgpack_encoder(sig, result) #: args )
When we get an 'event' type from the bridge handle it by invoking the handler and if needed sending back the result.
entailment
def handle_error(self, callback): """ Called when an error occurs in an event loop callback. By default, sets the error view. """ self.loop.log_error(callback) msg = "\n".join([ "Exception in callback %r"%callback, traceback.format_exc() ]) self.show_error(msg.encode('utf-8'))
Called when an error occurs in an event loop callback. By default, sets the error view.
entailment
def start_dev_session(self): """ Start a client that attempts to connect to the dev server running on the host `app.dev` """ try: from .dev import DevServerSession session = DevServerSession.initialize(host=self.dev) session.start() #: Save a reference self._dev_session = session except: self.show_error(traceback.format_exc())
Start a client that attempts to connect to the dev server running on the host `app.dev`
entailment
def load_plugin_widgets(self): """ Pull widgets added via plugins using the `enaml_native_widgets` entry point. The entry point function must return a dictionary of Widget declarations to add to the core api. def install(): from charts.widgets.chart_view import BarChart, LineChart return { 'BarChart': BarChart, 'LineCart': LineChart, } """ from enamlnative.widgets import api for plugin in self.get_plugins(group='enaml_native_widgets'): get_widgets = plugin.load() for name, widget in iter(get_widgets()): #: Update the core api with these widgets setattr(api, name, widget)
Pull widgets added via plugins using the `enaml_native_widgets` entry point. The entry point function must return a dictionary of Widget declarations to add to the core api. def install(): from charts.widgets.chart_view import BarChart, LineChart return { 'BarChart': BarChart, 'LineCart': LineChart, }
entailment
def for_action(cls, action, callback, single_shot=True): """ Create a BroadcastReceiver that is invoked when the given action is received. Parameters ---------- action: String Action to receive callback: Callable Callback to invoke when the action is received single_shot: Bool Cleanup after one callback Returns ------- receiver: BroadcastReceiver The receiver that was created. You must hold on to this or the GC will clean it up. """ receiver = cls() activity = receiver.__app__.widget receiver.setReceiver(receiver.getId()) def on_receive(ctx, intent): callback(intent) receiver.onReceive.connect(on_receive) activity.registerReceiver(receiver, IntentFilter(action)) return receiver
Create a BroadcastReceiver that is invoked when the given action is received. Parameters ---------- action: String Action to receive callback: Callable Callback to invoke when the action is received single_shot: Bool Cleanup after one callback Returns ------- receiver: BroadcastReceiver The receiver that was created. You must hold on to this or the GC will clean it up.
entailment
def get(self, measurement_class): """Return the latest measurement for the given class or None if nothing has been received from the vehicle. """ name = Measurement.name_from_class(measurement_class) return self._construct_measurement(name)
Return the latest measurement for the given class or None if nothing has been received from the vehicle.
entailment
def add_source(self, source): """Add a vehicle data source to the instance. The Vehicle instance will be set as the callback of the source, and the source will be started if it is startable. (i.e. it has a ``start()`` method). """ if source is not None: self.sources.add(source) source.callback = self._receive if hasattr(source, 'start'): source.start()
Add a vehicle data source to the instance. The Vehicle instance will be set as the callback of the source, and the source will be started if it is startable. (i.e. it has a ``start()`` method).
entailment
def add_sink(self, sink): """Add a vehicle data sink to the instance. ``sink`` should be a sub-class of ``DataSink`` or at least have a ``receive(message, **kwargs)`` method. The sink will be started if it is startable. (i.e. it has a ``start()`` method). """ if sink is not None: self.sinks.add(sink) if hasattr(sink, 'start'): sink.start()
Add a vehicle data sink to the instance. ``sink`` should be a sub-class of ``DataSink`` or at least have a ``receive(message, **kwargs)`` method. The sink will be started if it is startable. (i.e. it has a ``start()`` method).
entailment
def register(self, measurement_class, callback): """Call the ``callback`` with any new values of ``measurement_class`` received. """ self.callbacks[Measurement.name_from_class(measurement_class) ].add(callback)
Call the ``callback`` with any new values of ``measurement_class`` received.
entailment
def unregister(self, measurement_class, callback): """Stop notifying ``callback`` of new values of ``measurement_class``. If the callback wasn't previously registered, this method will have no effect. """ self.callbacks[Measurement.name_from_class(measurement_class) ].remove(callback)
Stop notifying ``callback`` of new values of ``measurement_class``. If the callback wasn't previously registered, this method will have no effect.
entailment
def _send_complex_request(self, request): """Send a request via the USB control request endpoint, rather than as a bulk transfer. """ self.device.ctrl_transfer(0x40, self.COMPLEX_CONTROL_COMMAND, 0, 0, self.streamer.serialize_for_stream(request))
Send a request via the USB control request endpoint, rather than as a bulk transfer.
entailment
def out_endpoint(self): """Open a reference to the USB device's only OUT endpoint. This method assumes that the USB device configuration has already been set. """ if getattr(self, '_out_endpoint', None) is None: config = self.device.get_active_configuration() interface_number = config[(0, 0)].bInterfaceNumber interface = usb.util.find_descriptor(config, bInterfaceNumber=interface_number) self._out_endpoint = usb.util.find_descriptor(interface, custom_match = \ lambda e: \ usb.util.endpoint_direction(e.bEndpointAddress) == \ usb.util.ENDPOINT_OUT) if not self._out_endpoint: raise ControllerError( "Couldn't find OUT endpoint on the USB device") return self._out_endpoint
Open a reference to the USB device's only OUT endpoint. This method assumes that the USB device configuration has already been set.
entailment
def wait_for_responses(self): """Block the thread and wait for the response to the given request to arrive from the VI. If no matching response is received in COMMAND_RESPONSE_TIMEOUT_S seconds, returns anyway. """ self.thread.join(self.COMMAND_RESPONSE_TIMEOUT_S) self.running = False return self.responses
Block the thread and wait for the response to the given request to arrive from the VI. If no matching response is received in COMMAND_RESPONSE_TIMEOUT_S seconds, returns anyway.
entailment
def handle_responses(self): """Block and wait for responses to this object's original request, or until a timeout (self.COMMAND_RESPONSE_TIMEOUT_S). This function is handy to use as the target function for a thread. The responses received (or None if none was received before the timeout) is stored in a list at self.responses. """ while self.running: try: response = self.queue.get( timeout=self.COMMAND_RESPONSE_TIMEOUT_S) if self._response_matches_request(response): self.responses.append(response) if self.quit_after_first: self.running = False self.queue.task_done() except Empty: break
Block and wait for responses to this object's original request, or until a timeout (self.COMMAND_RESPONSE_TIMEOUT_S). This function is handy to use as the target function for a thread. The responses received (or None if none was received before the timeout) is stored in a list at self.responses.
entailment
def _response_matches_request(self, response): """Return true if the response is to a diagnostic request, and the bus, id, mode match. If the request was successful, the PID echo is also checked. """ # Accept success/failure command responses if super(DiagnosticResponseReceiver, self)._response_matches_request(response): return True if ('bus' in self.diagnostic_request and response.get('bus', None) != self.diagnostic_request['bus']): return False if (self.diagnostic_request['id'] != 0x7df and response.get('id', None) != self.diagnostic_request['id']): return False if (response.get('success', True) and response.get('pid', None) != self.diagnostic_request.get('pid', None)): return False return response.get('mode', None) == self.diagnostic_request['mode']
Return true if the response is to a diagnostic request, and the bus, id, mode match. If the request was successful, the PID echo is also checked.
entailment
def complex_request(self, request, wait_for_first_response=True): """Send a compound command request to the interface over the normal data channel. request - A dict storing the request to send to the VI. It will be serialized to the currently selected output format. wait_for_first_response - If true, this function will block waiting for a response from the VI and return it to the caller. Otherwise, it will send the command and return immediately and any response will be lost. """ receiver = self._prepare_response_receiver(request, receiver_class=CommandResponseReceiver) self._send_complex_request(request) responses = [] if wait_for_first_response: responses = receiver.wait_for_responses() return responses
Send a compound command request to the interface over the normal data channel. request - A dict storing the request to send to the VI. It will be serialized to the currently selected output format. wait_for_first_response - If true, this function will block waiting for a response from the VI and return it to the caller. Otherwise, it will send the command and return immediately and any response will be lost.
entailment
def create_diagnostic_request(self, message_id, mode, bus=None, pid=None, frequency=None, payload=None, wait_for_ack=True, wait_for_first_response=False, decoded_type=None): """Send a new diagnostic message request to the VI Required: message_id - The message ID (arbitration ID) for the request. mode - the diagnostic mode (or service). Optional: bus - The address of the CAN bus controller to send the request, either 1 or 2 for current VI hardware. pid - The parameter ID, or PID, for the request (e.g. for a mode 1 request). frequency - The frequency in hertz to add this as a recurring diagnostic requests. Must be greater than 0, or None if it is a one-time request. payload - A bytearray to send as the request's optional payload. Only single frame diagnostic requests are supported by the VI firmware in the current version, so the payload has a maximum length of 6. wait_for_ack - If True, will wait for an ACK of the command message. wait_for_first_response - If True, this function will block waiting for a diagnostic response to be received for the request. It will return either after timing out or after 1 matching response is received - there may be more responses to functional broadcast requests that arrive after returning. Returns a tuple of ([list of ACK responses to create request], [list of diagnostic responses received]) """ request = self._build_diagnostic_request(message_id, mode, bus, pid, frequency, payload, decoded_type) diag_response_receiver = None if wait_for_first_response: diag_response_receiver = self._prepare_response_receiver( request, DiagnosticResponseReceiver) request['action'] = 'add' ack_responses = self.complex_request(request, wait_for_ack) diag_responses = None if diag_response_receiver is not None: diag_responses = diag_response_receiver.wait_for_responses() return ack_responses, diag_responses
Send a new diagnostic message request to the VI Required: message_id - The message ID (arbitration ID) for the request. mode - the diagnostic mode (or service). Optional: bus - The address of the CAN bus controller to send the request, either 1 or 2 for current VI hardware. pid - The parameter ID, or PID, for the request (e.g. for a mode 1 request). frequency - The frequency in hertz to add this as a recurring diagnostic requests. Must be greater than 0, or None if it is a one-time request. payload - A bytearray to send as the request's optional payload. Only single frame diagnostic requests are supported by the VI firmware in the current version, so the payload has a maximum length of 6. wait_for_ack - If True, will wait for an ACK of the command message. wait_for_first_response - If True, this function will block waiting for a diagnostic response to be received for the request. It will return either after timing out or after 1 matching response is received - there may be more responses to functional broadcast requests that arrive after returning. Returns a tuple of ([list of ACK responses to create request], [list of diagnostic responses received])
entailment
def set_passthrough(self, bus, enabled): """Control the status of CAN message passthrough for a bus. Returns True if the command was successful. """ request = { "command": "passthrough", "bus": bus, "enabled": enabled } return self._check_command_response_status(request)
Control the status of CAN message passthrough for a bus. Returns True if the command was successful.
entailment
def set_payload_format(self, payload_format): """Set the payload format for messages sent to and from the VI. Returns True if the command was successful. """ request = { "command": "payload_format", "format": payload_format } status = self._check_command_response_status(request) # Always change the format regardless because if it was already in the # right format, the command will have failed. self.format = payload_format return status
Set the payload format for messages sent to and from the VI. Returns True if the command was successful.
entailment
def rtc_configuration(self, unix_time): """Set the Unix time if RTC is supported on the device. Returns True if the command was successful. """ request = { "command": "rtc_configuration", "unix_time": unix_time } status = self._check_command_response_status(request) return status
Set the Unix time if RTC is supported on the device. Returns True if the command was successful.
entailment
def modem_configuration(self, host, port): """Set the host:port for the Cellular device to send data to. Returns True if the command was successful. """ request = { "command": "modem_configuration", "host": host, "port": port } status = self._check_command_response_status(request) return status
Set the host:port for the Cellular device to send data to. Returns True if the command was successful.
entailment
def set_acceptance_filter_bypass(self, bus, bypass): """Control the status of CAN acceptance filter for a bus. Returns True if the command was successful. """ request = { "command": "af_bypass", "bus": bus, "bypass": bypass } return self._check_command_response_status(request)
Control the status of CAN acceptance filter for a bus. Returns True if the command was successful.
entailment
def sd_mount_status(self): """Request for SD Mount status if available. """ request = { "command": "sd_mount_status" } responses = self.complex_request(request) result = None if len(responses) > 0: result = responses[0].get('status') return result
Request for SD Mount status if available.
entailment
def write(self, **kwargs): """Serialize a raw or translated write request and send it to the VI, following the OpenXC message format. """ if 'id' in kwargs and 'data' in kwargs: result = self.write_raw(kwargs['id'], kwargs['data'], bus=kwargs.get('bus', None), frame_format=kwargs.get('frame_format', None)) else: result = self.write_translated(kwargs['name'], kwargs['value'], event=kwargs.get('event', None)) return result
Serialize a raw or translated write request and send it to the VI, following the OpenXC message format.
entailment
def write_translated(self, name, value, event=None): """Send a translated write request to the VI. """ data = {'name': name} if value is not None: data['value'] = self._massage_write_value(value) if event is not None: data['event'] = self._massage_write_value(event); message = self.streamer.serialize_for_stream(data) bytes_written = self.write_bytes(message) assert bytes_written == len(message) return bytes_written
Send a translated write request to the VI.
entailment
def write_raw(self, message_id, data, bus=None, frame_format=None): """Send a raw write request to the VI. """ if not isinstance(message_id, numbers.Number): try: message_id = int(message_id, 0) except ValueError: raise ValueError("ID must be numerical") data = {'id': message_id, 'data': data} if bus is not None: data['bus'] = bus if frame_format is not None: data['frame_format'] = frame_format message = self.streamer.serialize_for_stream(data) bytes_written = self.write_bytes(message) assert bytes_written == len(message) return bytes_written
Send a raw write request to the VI.
entailment
def _massage_write_value(cls, value): """Convert string values from command-line arguments into first-order Python boolean and float objects, if applicable. """ if not isinstance(value, numbers.Number): if value == "true": value = True elif value == "false": value = False elif value[0] == '"' and value[-1] == '"': value = value[1:-1] else: try: value = float(value) except ValueError: pass return value
Convert string values from command-line arguments into first-order Python boolean and float objects, if applicable.
entailment
def _validate(cls, message): """Confirm the validitiy of a given dict as an OpenXC message. Returns: ``True`` if the message contains at least a ``name`` and ``value``. """ valid = False if(('name' in message and 'value' in message) or ('id' in message and 'data' in message)): valid = True return valid
Confirm the validitiy of a given dict as an OpenXC message. Returns: ``True`` if the message contains at least a ``name`` and ``value``.
entailment
def value(self, new_value): """Set the value of this measurement. Raises: AttributeError: if the new value isn't of the correct units. """ if self.unit != units.Undefined and new_value.unit != self.unit: raise AttributeError("%s must be in %s" % ( self.__class__, self.unit)) self._value = new_value
Set the value of this measurement. Raises: AttributeError: if the new value isn't of the correct units.
entailment
def from_dict(cls, data): """Create a new Measurement subclass instance using the given dict. If Measurement.name_from_class was previously called with this data's associated Measurement sub-class in Python, the returned object will be an instance of that sub-class. If the measurement name in ``data`` is unrecognized, the returned object will be of the generic ``Measurement`` type. Args: data (dict): the data for the new measurement, including at least a name and value. """ args = [] if 'id' in data and 'data' in data: measurement_class = CanMessage args.append("Bus %s: 0x%x" % (data.get('bus', '?'), data['id'])) args.append(data['data']) # TODO grab bus else: measurement_class = cls._class_from_name(data['name']) if measurement_class == Measurement: args.append(data['name']) args.append(data['value']) return measurement_class(*args, event=data.get('event', None), override_unit=True)
Create a new Measurement subclass instance using the given dict. If Measurement.name_from_class was previously called with this data's associated Measurement sub-class in Python, the returned object will be an instance of that sub-class. If the measurement name in ``data`` is unrecognized, the returned object will be of the generic ``Measurement`` type. Args: data (dict): the data for the new measurement, including at least a name and value.
entailment
def name_from_class(cls, measurement_class): """For a given measurement class, return its generic name. The given class is expected to have a ``name`` attribute, otherwise this function will raise an execption. The point of using this method instead of just trying to grab that attribute in the application is to cache measurement name to class mappings for future use. Returns: the generic OpenXC name for a measurement class. Raise: UnrecognizedMeasurementError: if the class does not have a valid generic name """ if not getattr(cls, '_measurements_initialized', False): cls._measurement_map = dict((m.name, m) for m in all_measurements()) cls._measurements_initialized = True try: name = getattr(measurement_class, 'name') except AttributeError: raise UnrecognizedMeasurementError("No 'name' attribute in %s" % measurement_class) else: cls._measurement_map[name] = measurement_class return name
For a given measurement class, return its generic name. The given class is expected to have a ``name`` attribute, otherwise this function will raise an execption. The point of using this method instead of just trying to grab that attribute in the application is to cache measurement name to class mappings for future use. Returns: the generic OpenXC name for a measurement class. Raise: UnrecognizedMeasurementError: if the class does not have a valid generic name
entailment
def _store_timestamp(self, timestamp): """If not already saved, cache the first timestamp in the active trace file on the instance. """ if getattr(self, 'first_timestamp', None) is None: self.first_timestamp = timestamp LOG.debug("Storing %d as the first timestamp of the trace file %s", self.first_timestamp, self.filename)
If not already saved, cache the first timestamp in the active trace file on the instance.
entailment
def read(self): """Read a line of data from the input source at a time.""" line = self.trace_file.readline() if line == '': if self.loop: self._reopen_file() else: self.trace_file.close() self.trace_file = None raise DataSourceError() message = JsonFormatter.deserialize(line) timestamp = message.get('timestamp', None) if self.realtime and timestamp is not None: self._store_timestamp(timestamp) self._wait(self.starting_time, self.first_timestamp, timestamp) return line + "\x00"
Read a line of data from the input source at a time.
entailment
def _open_file(filename): """Attempt to open the the file at ``filename`` for reading. Raises: DataSourceError, if the file cannot be opened. """ if filename is None: raise DataSourceError("Trace filename is not defined") try: trace_file = open(filename, "r") except IOError as e: raise DataSourceError("Unable to open trace file %s" % filename, e) else: LOG.debug("Opened trace file %s", filename) return trace_file
Attempt to open the the file at ``filename`` for reading. Raises: DataSourceError, if the file cannot be opened.
entailment
def _wait(starting_time, first_timestamp, timestamp): """Given that the first timestamp in the trace file is ``first_timestamp`` and we started playing back the file at ``starting_time``, block until the current ``timestamp`` should occur. """ target_time = starting_time + (timestamp - first_timestamp) time.sleep(max(target_time - time.time(), 0))
Given that the first timestamp in the trace file is ``first_timestamp`` and we started playing back the file at ``starting_time``, block until the current ``timestamp`` should occur.
entailment
def merge(a, b): """Merge two deep dicts non-destructively Uses a stack to avoid maximum recursion depth exceptions >>> a = {'a': 1, 'b': {1: 1, 2: 2}, 'd': 6} >>> b = {'c': 3, 'b': {2: 7}, 'd': {'z': [1, 2, 3]}} >>> c = merge(a, b) >>> from pprint import pprint; pprint(c) {'a': 1, 'b': {1: 1, 2: 7}, 'c': 3, 'd': {'z': [1, 2, 3]}} """ assert quacks_like_dict(a), quacks_like_dict(b) dst = a.copy() stack = [(dst, b)] while stack: current_dst, current_src = stack.pop() for key in current_src: if key not in current_dst: current_dst[key] = current_src[key] else: if (quacks_like_dict(current_src[key]) and quacks_like_dict(current_dst[key])): stack.append((current_dst[key], current_src[key])) elif (quacks_like_list(current_src[key]) and quacks_like_list(current_dst[key])): current_dst[key].extend(current_src[key]) else: current_dst[key] = current_src[key] return dst
Merge two deep dicts non-destructively Uses a stack to avoid maximum recursion depth exceptions >>> a = {'a': 1, 'b': {1: 1, 2: 2}, 'd': 6} >>> b = {'c': 3, 'b': {2: 7}, 'd': {'z': [1, 2, 3]}} >>> c = merge(a, b) >>> from pprint import pprint; pprint(c) {'a': 1, 'b': {1: 1, 2: 7}, 'c': 3, 'd': {'z': [1, 2, 3]}}
entailment
def from_xml_node(cls, node): """Construct a Signal instance from an XML node exported from a Vector CANoe .dbc file.""" return cls(name=node.find("Name").text, bit_position=int(node.find("Bitposition").text), bit_size=int(node.find("Bitsize").text), factor=float(node.find("Factor").text), offset=float(node.find("Offset").text), min_value=float(node.find("Minimum").text), max_value=float(node.find("Maximum").text))
Construct a Signal instance from an XML node exported from a Vector CANoe .dbc file.
entailment
def run(self): """Continuously read data from the source and attempt to parse a valid message from the buffer of bytes. When a message is parsed, passes it off to the callback if one is set. """ message_buffer = b"" while self.running: try: message_buffer += self.source.read_logs() except DataSourceError as e: if self.running: LOG.warn("Can't read logs from data source -- stopping: %s", e) break except NotImplementedError as e: LOG.info("%s doesn't support logging" % self) break while True: if "\x00" not in message_buffer: break record, _, remainder = message_buffer.partition(b"\x00") self.record(record) message_buffer = remainder
Continuously read data from the source and attempt to parse a valid message from the buffer of bytes. When a message is parsed, passes it off to the callback if one is set.
entailment
def compare_versions(x, y): """ Expects 2 strings in the format of 'X.Y.Z' where X, Y and Z are integers. It will compare the items which will organize things properly by their major, minor and bugfix version. :: >>> my_list = ['v1.13', 'v1.14.2', 'v1.14.1', 'v1.9', 'v1.1'] >>> sorted(my_list, cmp=compare_versions) ['v1.1', 'v1.9', 'v1.13', 'v1.14.1', 'v1.14.2'] """ def version_to_tuple(version): # Trim off the leading v version_list = version[1:].split('.', 2) if len(version_list) <= 3: [version_list.append(0) for _ in range(3 - len(version_list))] try: return tuple((int(version) for version in version_list)) except ValueError: # not an integer, so it goes to the bottom return (0, 0, 0) x_major, x_minor, x_bugfix = version_to_tuple(x) y_major, y_minor, y_bugfix = version_to_tuple(y) return (cmp(x_major, y_major) or cmp(x_minor, y_minor) or cmp(x_bugfix, y_bugfix))
Expects 2 strings in the format of 'X.Y.Z' where X, Y and Z are integers. It will compare the items which will organize things properly by their major, minor and bugfix version. :: >>> my_list = ['v1.13', 'v1.14.2', 'v1.14.1', 'v1.9', 'v1.1'] >>> sorted(my_list, cmp=compare_versions) ['v1.1', 'v1.9', 'v1.13', 'v1.14.1', 'v1.14.2']
entailment
def compute(self, t, yerr=1.123e-12, check_sorted=True, A=None, U=None, V=None): """ Compute the extended form of the covariance matrix and factorize Args: x (array[n]): The independent coordinates of the data points. This array must be _sorted_ in ascending order. yerr (Optional[float or array[n]]): The measurement uncertainties for the data points at coordinates ``x``. These values will be added in quadrature to the diagonal of the covariance matrix. (default: ``1.123e-12``) check_sorted (bool): If ``True``, ``x`` will be checked to make sure that it is properly sorted. If ``False``, the coordinates will be assumed to be in the correct order. Raises: ValueError: For un-sorted data or mismatched dimensions. solver.LinAlgError: For non-positive definite matrices. """ t = np.atleast_1d(t) if check_sorted and np.any(np.diff(t) < 0.0): raise ValueError("the input coordinates must be sorted") if check_sorted and len(t.shape) > 1: raise ValueError("dimension mismatch") self._t = t self._yerr = np.empty_like(self._t) self._yerr[:] = yerr (alpha_real, beta_real, alpha_complex_real, alpha_complex_imag, beta_complex_real, beta_complex_imag) = self.kernel.coefficients self._A = np.empty(0) if A is None else A self._U = np.empty((0, 0)) if U is None else U self._V = np.empty((0, 0)) if V is None else V self.solver.compute( self.kernel.jitter, alpha_real, beta_real, alpha_complex_real, alpha_complex_imag, beta_complex_real, beta_complex_imag, self._A, self._U, self._V, t, self._yerr**2 ) self.dirty = False
Compute the extended form of the covariance matrix and factorize Args: x (array[n]): The independent coordinates of the data points. This array must be _sorted_ in ascending order. yerr (Optional[float or array[n]]): The measurement uncertainties for the data points at coordinates ``x``. These values will be added in quadrature to the diagonal of the covariance matrix. (default: ``1.123e-12``) check_sorted (bool): If ``True``, ``x`` will be checked to make sure that it is properly sorted. If ``False``, the coordinates will be assumed to be in the correct order. Raises: ValueError: For un-sorted data or mismatched dimensions. solver.LinAlgError: For non-positive definite matrices.
entailment
def log_likelihood(self, y, _const=math.log(2.0*math.pi), quiet=False): """ Compute the marginalized likelihood of the GP model The factorized matrix from the previous call to :func:`GP.compute` is used so ``compute`` must be called first. Args: y (array[n]): The observations at coordinates ``x`` from :func:`GP.compute`. quiet (bool): If true, return ``-numpy.inf`` for non-positive definite matrices instead of throwing an error. Returns: float: The marginalized likelihood of the GP model. Raises: ValueError: For mismatched dimensions. solver.LinAlgError: For non-positive definite matrices. """ y = self._process_input(y) resid = y - self.mean.get_value(self._t) try: self._recompute() except solver.LinAlgError: if quiet: return -np.inf raise if len(y.shape) > 1: raise ValueError("dimension mismatch") logdet = self.solver.log_determinant() if not np.isfinite(logdet): return -np.inf loglike = -0.5*(self.solver.dot_solve(resid)+logdet+len(y)*_const) if not np.isfinite(loglike): return -np.inf return loglike
Compute the marginalized likelihood of the GP model The factorized matrix from the previous call to :func:`GP.compute` is used so ``compute`` must be called first. Args: y (array[n]): The observations at coordinates ``x`` from :func:`GP.compute`. quiet (bool): If true, return ``-numpy.inf`` for non-positive definite matrices instead of throwing an error. Returns: float: The marginalized likelihood of the GP model. Raises: ValueError: For mismatched dimensions. solver.LinAlgError: For non-positive definite matrices.
entailment
def grad_log_likelihood(self, y, quiet=False): """ Compute the gradient of the marginalized likelihood The factorized matrix from the previous call to :func:`GP.compute` is used so ``compute`` must be called first. The gradient is taken with respect to the parameters returned by :func:`GP.get_parameter_vector`. This function requires the `autograd <https://github.com/HIPS/autograd>`_ package. Args: y (array[n]): The observations at coordinates ``x`` from :func:`GP.compute`. quiet (bool): If true, return ``-numpy.inf`` and a gradient vector of zeros for non-positive definite matrices instead of throwing an error. Returns: The gradient of marginalized likelihood with respect to the parameter vector. Raises: ValueError: For mismatched dimensions. solver.LinAlgError: For non-positive definite matrices. """ if not solver.has_autodiff(): raise RuntimeError("celerite must be compiled with autodiff " "support to use the gradient methods") if not self.kernel.vector_size: return self.log_likelihood(y, quiet=quiet), np.empty(0) y = self._process_input(y) if len(y.shape) > 1: raise ValueError("dimension mismatch") resid = y - self.mean.get_value(self._t) (alpha_real, beta_real, alpha_complex_real, alpha_complex_imag, beta_complex_real, beta_complex_imag) = self.kernel.coefficients try: val, grad = self.solver.grad_log_likelihood( self.kernel.jitter, alpha_real, beta_real, alpha_complex_real, alpha_complex_imag, beta_complex_real, beta_complex_imag, self._A, self._U, self._V, self._t, resid, self._yerr**2 ) except solver.LinAlgError: if quiet: return -np.inf, np.zeros(self.vector_size) raise if self.kernel._has_coeffs: coeffs_jac = self.kernel.get_coeffs_jacobian() full_grad = np.dot(coeffs_jac, grad[1:]) else: full_grad = np.zeros(self.kernel.vector_size) if self.kernel._has_jitter: jitter_jac = self.kernel.get_jitter_jacobian() full_grad += jitter_jac * grad[0] if self.mean.vector_size: self._recompute() alpha = self.solver.solve(resid) g = self.mean.get_gradient(self._t) full_grad = np.append(full_grad, np.dot(g, alpha)) return val, full_grad
Compute the gradient of the marginalized likelihood The factorized matrix from the previous call to :func:`GP.compute` is used so ``compute`` must be called first. The gradient is taken with respect to the parameters returned by :func:`GP.get_parameter_vector`. This function requires the `autograd <https://github.com/HIPS/autograd>`_ package. Args: y (array[n]): The observations at coordinates ``x`` from :func:`GP.compute`. quiet (bool): If true, return ``-numpy.inf`` and a gradient vector of zeros for non-positive definite matrices instead of throwing an error. Returns: The gradient of marginalized likelihood with respect to the parameter vector. Raises: ValueError: For mismatched dimensions. solver.LinAlgError: For non-positive definite matrices.
entailment
def apply_inverse(self, y): """ Apply the inverse of the covariance matrix to a vector or matrix Solve ``K.x = y`` for ``x`` where ``K`` is the covariance matrix of the GP with the white noise and ``yerr`` components included on the diagonal. Args: y (array[n] or array[n, nrhs]): The vector or matrix ``y`` described above. Returns: array[n] or array[n, nrhs]: The solution to the linear system. This will have the same shape as ``y``. Raises: ValueError: For mismatched dimensions. """ self._recompute() return self.solver.solve(self._process_input(y))
Apply the inverse of the covariance matrix to a vector or matrix Solve ``K.x = y`` for ``x`` where ``K`` is the covariance matrix of the GP with the white noise and ``yerr`` components included on the diagonal. Args: y (array[n] or array[n, nrhs]): The vector or matrix ``y`` described above. Returns: array[n] or array[n, nrhs]: The solution to the linear system. This will have the same shape as ``y``. Raises: ValueError: For mismatched dimensions.
entailment
def dot(self, y, t=None, A=None, U=None, V=None, kernel=None, check_sorted=True): """ Dot the covariance matrix into a vector or matrix Compute ``K.y`` where ``K`` is the covariance matrix of the GP without the white noise or ``yerr`` values on the diagonal. Args: y (array[n] or array[n, nrhs]): The vector or matrix ``y`` described above. kernel (Optional[terms.Term]): A different kernel can optionally be provided to compute the matrix ``K`` from a different kernel than the ``kernel`` property on this object. Returns: array[n] or array[n, nrhs]: The dot product ``K.y`` as described above. This will have the same shape as ``y``. Raises: ValueError: For mismatched dimensions. """ if kernel is None: kernel = self.kernel if t is not None: t = np.atleast_1d(t) if check_sorted and np.any(np.diff(t) < 0.0): raise ValueError("the input coordinates must be sorted") if check_sorted and len(t.shape) > 1: raise ValueError("dimension mismatch") A = np.empty(0) if A is None else A U = np.empty((0, 0)) if U is None else U V = np.empty((0, 0)) if V is None else V else: if not self.computed: raise RuntimeError("you must call 'compute' first") t = self._t A = self._A U = self._U V = self._V (alpha_real, beta_real, alpha_complex_real, alpha_complex_imag, beta_complex_real, beta_complex_imag) = kernel.coefficients return self.solver.dot( kernel.jitter, alpha_real, beta_real, alpha_complex_real, alpha_complex_imag, beta_complex_real, beta_complex_imag, A, U, V, t, np.ascontiguousarray(y, dtype=float) )
Dot the covariance matrix into a vector or matrix Compute ``K.y`` where ``K`` is the covariance matrix of the GP without the white noise or ``yerr`` values on the diagonal. Args: y (array[n] or array[n, nrhs]): The vector or matrix ``y`` described above. kernel (Optional[terms.Term]): A different kernel can optionally be provided to compute the matrix ``K`` from a different kernel than the ``kernel`` property on this object. Returns: array[n] or array[n, nrhs]: The dot product ``K.y`` as described above. This will have the same shape as ``y``. Raises: ValueError: For mismatched dimensions.
entailment
def predict(self, y, t=None, return_cov=True, return_var=False): """ Compute the conditional predictive distribution of the model You must call :func:`GP.compute` before this method. Args: y (array[n]): The observations at coordinates ``x`` from :func:`GP.compute`. t (Optional[array[ntest]]): The independent coordinates where the prediction should be made. If this is omitted the coordinates will be assumed to be ``x`` from :func:`GP.compute` and an efficient method will be used to compute the prediction. return_cov (Optional[bool]): If ``True``, the full covariance matrix is computed and returned. Otherwise, only the mean prediction is computed. (default: ``True``) return_var (Optional[bool]): If ``True``, only return the diagonal of the predictive covariance; this will be faster to compute than the full covariance matrix. This overrides ``return_cov`` so, if both are set to ``True``, only the diagonal is computed. (default: ``False``) Returns: ``mu``, ``(mu, cov)``, or ``(mu, var)`` depending on the values of ``return_cov`` and ``return_var``. These output values are: (a) **mu** ``(ntest,)``: mean of the predictive distribution, (b) **cov** ``(ntest, ntest)``: the predictive covariance matrix, and (c) **var** ``(ntest,)``: the diagonal elements of ``cov``. Raises: ValueError: For mismatched dimensions. """ y = self._process_input(y) if len(y.shape) > 1: raise ValueError("dimension mismatch") if t is None: xs = self._t else: xs = np.ascontiguousarray(t, dtype=float) if len(xs.shape) > 1: raise ValueError("dimension mismatch") # Make sure that the model is computed self._recompute() # Compute the predictive mean. resid = y - self.mean.get_value(self._t) if t is None: alpha = self.solver.solve(resid).flatten() alpha = resid - (self._yerr**2 + self.kernel.jitter) * alpha elif not len(self._A): alpha = self.solver.predict(resid, xs) else: Kxs = self.get_matrix(xs, self._t) alpha = np.dot(Kxs, alpha) mu = self.mean.get_value(xs) + alpha if not (return_var or return_cov): return mu # Predictive variance. Kxs = self.get_matrix(xs, self._t) KxsT = np.ascontiguousarray(Kxs.T, dtype=np.float64) if return_var: var = -np.sum(KxsT*self.apply_inverse(KxsT), axis=0) var += self.kernel.get_value(0.0) return mu, var # Predictive covariance cov = self.kernel.get_value(xs[:, None] - xs[None, :]) cov -= np.dot(Kxs, self.apply_inverse(KxsT)) return mu, cov
Compute the conditional predictive distribution of the model You must call :func:`GP.compute` before this method. Args: y (array[n]): The observations at coordinates ``x`` from :func:`GP.compute`. t (Optional[array[ntest]]): The independent coordinates where the prediction should be made. If this is omitted the coordinates will be assumed to be ``x`` from :func:`GP.compute` and an efficient method will be used to compute the prediction. return_cov (Optional[bool]): If ``True``, the full covariance matrix is computed and returned. Otherwise, only the mean prediction is computed. (default: ``True``) return_var (Optional[bool]): If ``True``, only return the diagonal of the predictive covariance; this will be faster to compute than the full covariance matrix. This overrides ``return_cov`` so, if both are set to ``True``, only the diagonal is computed. (default: ``False``) Returns: ``mu``, ``(mu, cov)``, or ``(mu, var)`` depending on the values of ``return_cov`` and ``return_var``. These output values are: (a) **mu** ``(ntest,)``: mean of the predictive distribution, (b) **cov** ``(ntest, ntest)``: the predictive covariance matrix, and (c) **var** ``(ntest,)``: the diagonal elements of ``cov``. Raises: ValueError: For mismatched dimensions.
entailment
def get_matrix(self, x1=None, x2=None, include_diagonal=None, include_general=None): """ Get the covariance matrix at given independent coordinates Args: x1 (Optional[array[n1]]): The first set of independent coordinates. If this is omitted, ``x1`` will be assumed to be equal to ``x`` from a previous call to :func:`GP.compute`. x2 (Optional[array[n2]]): The second set of independent coordinates. If this is omitted, ``x2`` will be assumed to be ``x1``. include_diagonal (Optional[bool]): Should the white noise and ``yerr`` terms be included on the diagonal? (default: ``False``) """ if x1 is None and x2 is None: if self._t is None or not self.computed: raise RuntimeError("you must call 'compute' first") K = self.kernel.get_value(self._t[:, None] - self._t[None, :]) if include_diagonal is None or include_diagonal: K[np.diag_indices_from(K)] += ( self._yerr**2 + self.kernel.jitter ) if (include_general is None or include_general) and len(self._A): K[np.diag_indices_from(K)] += self._A K += np.tril(np.dot(self._U.T, self._V), -1) K += np.triu(np.dot(self._V.T, self._U), 1) return K incl = False x1 = np.ascontiguousarray(x1, dtype=float) if x2 is None: x2 = x1 incl = include_diagonal is not None and include_diagonal K = self.kernel.get_value(x1[:, None] - x2[None, :]) if incl: K[np.diag_indices_from(K)] += self.kernel.jitter return K
Get the covariance matrix at given independent coordinates Args: x1 (Optional[array[n1]]): The first set of independent coordinates. If this is omitted, ``x1`` will be assumed to be equal to ``x`` from a previous call to :func:`GP.compute`. x2 (Optional[array[n2]]): The second set of independent coordinates. If this is omitted, ``x2`` will be assumed to be ``x1``. include_diagonal (Optional[bool]): Should the white noise and ``yerr`` terms be included on the diagonal? (default: ``False``)
entailment
def sample(self, size=None): """ Sample from the prior distribution over datasets Args: size (Optional[int]): The number of samples to draw. Returns: array[n] or array[size, n]: The samples from the prior distribution over datasets. """ self._recompute() if size is None: n = np.random.randn(len(self._t)) else: n = np.random.randn(len(self._t), size) n = self.solver.dot_L(n) if size is None: return self.mean.get_value(self._t) + n[:, 0] return self.mean.get_value(self._t)[None, :] + n.T
Sample from the prior distribution over datasets Args: size (Optional[int]): The number of samples to draw. Returns: array[n] or array[size, n]: The samples from the prior distribution over datasets.
entailment
def sample_conditional(self, y, t=None, size=None): """ Sample from the conditional (predictive) distribution Note: this method scales as ``O(M^3)`` for large ``M``, where ``M == len(t)``. Args: y (array[n]): The observations at coordinates ``x`` from :func:`GP.compute`. t (Optional[array[ntest]]): The independent coordinates where the prediction should be made. If this is omitted the coordinates will be assumed to be ``x`` from :func:`GP.compute` and an efficient method will be used to compute the prediction. size (Optional[int]): The number of samples to draw. Returns: array[n] or array[size, n]: The samples from the conditional distribution over datasets. """ mu, cov = self.predict(y, t, return_cov=True) return np.random.multivariate_normal(mu, cov, size=size)
Sample from the conditional (predictive) distribution Note: this method scales as ``O(M^3)`` for large ``M``, where ``M == len(t)``. Args: y (array[n]): The observations at coordinates ``x`` from :func:`GP.compute`. t (Optional[array[ntest]]): The independent coordinates where the prediction should be made. If this is omitted the coordinates will be assumed to be ``x`` from :func:`GP.compute` and an efficient method will be used to compute the prediction. size (Optional[int]): The number of samples to draw. Returns: array[n] or array[size, n]: The samples from the conditional distribution over datasets.
entailment
def get_value(self, tau): """ Compute the value of the term for an array of lags Args: tau (array[...]): An array of lags where the term should be evaluated. Returns: The value of the term for each ``tau``. This will have the same shape as ``tau``. """ tau = np.asarray(tau) (alpha_real, beta_real, alpha_complex_real, alpha_complex_imag, beta_complex_real, beta_complex_imag) = self.coefficients k = get_kernel_value( alpha_real, beta_real, alpha_complex_real, alpha_complex_imag, beta_complex_real, beta_complex_imag, tau.flatten(), ) return np.asarray(k).reshape(tau.shape)
Compute the value of the term for an array of lags Args: tau (array[...]): An array of lags where the term should be evaluated. Returns: The value of the term for each ``tau``. This will have the same shape as ``tau``.
entailment
def get_psd(self, omega): """ Compute the PSD of the term for an array of angular frequencies Args: omega (array[...]): An array of frequencies where the PSD should be evaluated. Returns: The value of the PSD for each ``omega``. This will have the same shape as ``omega``. """ w = np.asarray(omega) (alpha_real, beta_real, alpha_complex_real, alpha_complex_imag, beta_complex_real, beta_complex_imag) = self.coefficients p = get_psd_value( alpha_real, beta_real, alpha_complex_real, alpha_complex_imag, beta_complex_real, beta_complex_imag, w.flatten(), ) return p.reshape(w.shape)
Compute the PSD of the term for an array of angular frequencies Args: omega (array[...]): An array of frequencies where the PSD should be evaluated. Returns: The value of the PSD for each ``omega``. This will have the same shape as ``omega``.
entailment
def get_complex_coefficients(self, params): """ Get the arrays ``alpha_complex_*`` and ``beta_complex_*`` This method should be overloaded by subclasses to return the arrays ``alpha_complex_real``, ``alpha_complex_imag``, ``beta_complex_real``, and ``beta_complex_imag`` given the current parameter settings. By default, this term is empty. Returns: (array[j_complex], array[j_complex], array[j_complex], array[j_complex]): ``alpha_complex_real``, ``alpha_complex_imag``, ``beta_complex_real``, and ``beta_complex_imag`` as described above. ``alpha_complex_imag`` can be omitted and it will be assumed to be zero. """ return np.empty(0), np.empty(0), np.empty(0), np.empty(0)
Get the arrays ``alpha_complex_*`` and ``beta_complex_*`` This method should be overloaded by subclasses to return the arrays ``alpha_complex_real``, ``alpha_complex_imag``, ``beta_complex_real``, and ``beta_complex_imag`` given the current parameter settings. By default, this term is empty. Returns: (array[j_complex], array[j_complex], array[j_complex], array[j_complex]): ``alpha_complex_real``, ``alpha_complex_imag``, ``beta_complex_real``, and ``beta_complex_imag`` as described above. ``alpha_complex_imag`` can be omitted and it will be assumed to be zero.
entailment
def coefficients(self): """ All of the coefficient arrays This property is the concatenation of the results from :func:`terms.Term.get_real_coefficients` and :func:`terms.Term.get_complex_coefficients` but it will always return a tuple of length 6, even if ``alpha_complex_imag`` was omitted from ``get_complex_coefficients``. Returns: (array[j_real], array[j_real], array[j_complex], array[j_complex], array[j_complex], array[j_complex]): ``alpha_real``, ``beta_real``, ``alpha_complex_real``, ``alpha_complex_imag``, ``beta_complex_real``, and ``beta_complex_imag`` as described above. Raises: ValueError: For invalid dimensions for the coefficients. """ vector = self.get_parameter_vector(include_frozen=True) pars = self.get_all_coefficients(vector) if len(pars) != 6: raise ValueError("there must be 6 coefficient blocks") if any(len(p.shape) != 1 for p in pars): raise ValueError("coefficient blocks must be 1D") if len(pars[0]) != len(pars[1]): raise ValueError("coefficient blocks must have the same shape") if any(len(pars[2]) != len(p) for p in pars[3:]): raise ValueError("coefficient blocks must have the same shape") return pars
All of the coefficient arrays This property is the concatenation of the results from :func:`terms.Term.get_real_coefficients` and :func:`terms.Term.get_complex_coefficients` but it will always return a tuple of length 6, even if ``alpha_complex_imag`` was omitted from ``get_complex_coefficients``. Returns: (array[j_real], array[j_real], array[j_complex], array[j_complex], array[j_complex], array[j_complex]): ``alpha_real``, ``beta_real``, ``alpha_complex_real``, ``alpha_complex_imag``, ``beta_complex_real``, and ``beta_complex_imag`` as described above. Raises: ValueError: For invalid dimensions for the coefficients.
entailment
def pop(self, key, *args): 'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].' try: return self.maps[0].pop(key, *args) except KeyError: raise KeyError('Key not found in the first mapping: {!r}'.format(key))
Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].
entailment
def _determine_current_dimension_size(self, dim_name, max_size): """ Helper method to determine the current size of a dimension. """ # Limited dimension. if self.dimensions[dim_name] is not None: return max_size def _find_dim(h5group, dim): if dim not in h5group: return _find_dim(h5group.parent, dim) return h5group[dim] dim_variable = _find_dim(self._h5group, dim_name) if "REFERENCE_LIST" not in dim_variable.attrs: return max_size root = self._h5group["/"] for ref, _ in dim_variable.attrs["REFERENCE_LIST"]: var = root[ref] for i, var_d in enumerate(var.dims): name = _name_from_dimension(var_d) if name == dim_name: max_size = max(var.shape[i], max_size) return max_size
Helper method to determine the current size of a dimension.
entailment
def _create_dim_scales(self): """Create all necessary HDF5 dimension scale.""" dim_order = self._dim_order.maps[0] for dim in sorted(dim_order, key=lambda d: dim_order[d]): if dim not in self._h5group: size = self._current_dim_sizes[dim] kwargs = {} if self._dim_sizes[dim] is None: kwargs["maxshape"] = (None,) self._h5group.create_dataset( name=dim, shape=(size,), dtype='S1', **kwargs) h5ds = self._h5group[dim] h5ds.attrs['_Netcdf4Dimid'] = dim_order[dim] if len(h5ds.shape) > 1: dims = self._variables[dim].dimensions coord_ids = np.array([dim_order[d] for d in dims], 'int32') h5ds.attrs['_Netcdf4Coordinates'] = coord_ids scale_name = dim if dim in self.variables else NOT_A_VARIABLE h5ds.dims.create_scale(h5ds, scale_name) for subgroup in self.groups.values(): subgroup._create_dim_scales()
Create all necessary HDF5 dimension scale.
entailment
def _attach_dim_scales(self): """Attach dimension scales to all variables.""" for name, var in self.variables.items(): if name not in self.dimensions: for n, dim in enumerate(var.dimensions): var._h5ds.dims[n].attach_scale(self._all_h5groups[dim]) for subgroup in self.groups.values(): subgroup._attach_dim_scales()
Attach dimension scales to all variables.
entailment
def _detach_dim_scale(self, name): """Detach the dimension scale corresponding to a dimension name.""" for var in self.variables.values(): for n, dim in enumerate(var.dimensions): if dim == name: var._h5ds.dims[n].detach_scale(self._all_h5groups[dim]) for subgroup in self.groups.values(): if dim not in subgroup._h5group: subgroup._detach_dim_scale(name)
Detach the dimension scale corresponding to a dimension name.
entailment
def resize_dimension(self, dimension, size): """ Resize a dimension to a certain size. It will pad with the underlying HDF5 data sets' fill values (usually zero) where necessary. """ if self.dimensions[dimension] is not None: raise ValueError("Dimension '%s' is not unlimited and thus " "cannot be resized." % dimension) # Resize the dimension. self._current_dim_sizes[dimension] = size for var in self.variables.values(): new_shape = list(var.shape) for i, d in enumerate(var.dimensions): if d == dimension: new_shape[i] = size new_shape = tuple(new_shape) if new_shape != var.shape: var._h5ds.resize(new_shape) # Recurse as dimensions are visible to this group and all child groups. for i in self.groups.values(): i.resize_dimension(dimension, size)
Resize a dimension to a certain size. It will pad with the underlying HDF5 data sets' fill values (usually zero) where necessary.
entailment
def multiplyQuats(q1, q2): """q1, q2 must be [scalar, x, y, z] but those may be arrays or scalars""" return np.array([ q1[0]*q2[0] - q1[1]*q2[1] - q1[2]*q2[2] - q1[3]*q2[3], q1[2]*q2[3] - q2[2]*q1[3] + q1[0]*q2[1] + q2[0]*q1[1], q1[3]*q2[1] - q2[3]*q1[1] + q1[0]*q2[2] + q2[0]*q1[2], q1[1]*q2[2] - q2[1]*q1[2] + q1[0]*q2[3] + q2[0]*q1[3]])
q1, q2 must be [scalar, x, y, z] but those may be arrays or scalars
entailment
def quatInv(q): """Returns QBar such that Q*QBar = 1""" qConj = -q qConj[0] = -qConj[0] normSqr = multiplyQuats(q, qConj)[0] return qConj/normSqr
Returns QBar such that Q*QBar = 1
entailment
def alignVec_quat(vec): """Returns a unit quaternion that will align vec with the z-axis""" alpha = np.arctan2(vec[1], vec[0]) beta = np.arccos(vec[2]) gamma = -alpha*vec[2] cb = np.cos(0.5*beta) sb = np.sin(0.5*beta) return np.array([cb*np.cos(0.5*(alpha + gamma)), sb*np.sin(0.5*(gamma - alpha)), sb*np.cos(0.5*(gamma - alpha)), cb*np.sin(0.5*(alpha + gamma))])
Returns a unit quaternion that will align vec with the z-axis
entailment
def transformTimeDependentVector(quat, vec, inverse=0): """Given (for example) a minimal rotation frame quat, transforms vec from the minimal rotation frame to the inertial frame. With inverse=1, transforms from the inertial frame to the minimal rotation frame.""" qInv = quatInv(quat) if inverse: return transformTimeDependentVector(qInv, vec, inverse=0) return multiplyQuats(quat, multiplyQuats(np.append(np.array([ np.zeros(len(vec[0]))]), vec, 0), qInv))[1:]
Given (for example) a minimal rotation frame quat, transforms vec from the minimal rotation frame to the inertial frame. With inverse=1, transforms from the inertial frame to the minimal rotation frame.
entailment
def rotate_in_plane(chi, phase): """For transforming spins between the coprecessing and coorbital frames""" v = chi.T sp = np.sin(phase) cp = np.cos(phase) res = 1.*v res[0] = v[0]*cp + v[1]*sp res[1] = v[1]*cp - v[0]*sp return res.T
For transforming spins between the coprecessing and coorbital frames
entailment
def transform_vector_coorb_to_inertial(vec_coorb, orbPhase, quat_copr): """Given a vector (of size 3) in coorbital frame, orbital phase in coprecessing frame and a minimal rotation frame quat, transforms the vector from the coorbital to the inertial frame. """ # Transform to coprecessing frame vec_copr = rotate_in_plane(vec_coorb, -orbPhase) # Transform to inertial frame vec = transformTimeDependentVector(np.array([quat_copr]).T, np.array([vec_copr]).T).T[0] return np.array(vec)
Given a vector (of size 3) in coorbital frame, orbital phase in coprecessing frame and a minimal rotation frame quat, transforms the vector from the coorbital to the inertial frame.
entailment
def transform_error_coorb_to_inertial(vec_coorb, vec_err_coorb, orbPhase, quat_copr): """ Transform error in a vector from the coorbital frame to the inertial frame. Generates distributions in the coorbital frame, transforms them to inertial frame and returns 1-simga widths in the inertial frame. """ # for reproducibility np.random.seed(0) # Get distribution in coorbital frame dist_coorb = np.array([np.random.normal(m, s, 1000) for m,s in zip(vec_coorb, vec_err_coorb)]).T # Transform distribution to coprecessing frame dist_copr = rotate_in_plane(dist_coorb, -orbPhase) # Transform distribution to inertial frame dist_inertial = transformTimeDependentVector( np.array([quat_copr for _ in dist_copr]).T, dist_copr.T).T # Get 1sigma width in inertial frame vec_err_inertial = np.std(dist_inertial, axis=0) return vec_err_inertial
Transform error in a vector from the coorbital frame to the inertial frame. Generates distributions in the coorbital frame, transforms them to inertial frame and returns 1-simga widths in the inertial frame.
entailment
def _load_fits(self, h5file): """ Loads fits from h5file and returns a dictionary of fits. """ fits = {} for key in ['mf']: fits[key] = self._load_scalar_fit(fit_key=key, h5file=h5file) for key in ['chif', 'vf']: fits[key] = self._load_vector_fit(key, h5file) return fits
Loads fits from h5file and returns a dictionary of fits.
entailment
def _extra_regression_kwargs(self): """ List of additional kwargs to use in regression tests. """ # larger than default sometimes needed when extrapolating omega_switch_test = 0.019 extra_args = [] extra_args.append({ 'omega0': 5e-3, 'PN_approximant': 'SpinTaylorT4', 'PN_dt': 0.1, 'PN_spin_order': 7, 'PN_phase_order': 7, 'omega_switch': omega_switch_test, }) extra_args.append({ 'omega0': 6e-3, 'PN_approximant': 'SpinTaylorT1', 'PN_dt': 0.5, 'PN_spin_order': 5, 'PN_phase_order': 7, 'omega_switch': omega_switch_test, }) extra_args.append({ 'omega0': 7e-3, 'PN_approximant': 'SpinTaylorT2', 'PN_dt': 1, 'PN_spin_order': 7, 'PN_phase_order': 5, 'omega_switch': omega_switch_test, }) # These should be pure NRSur7dq2 extra_args.append({'omega0': 3e-2}) extra_args.append({'omega0': 5e-2}) return extra_args
List of additional kwargs to use in regression tests.
entailment
def _get_fit_params(self, x, fit_key): """ Transforms the input parameter to fit parameters for the 7dq2 model. That is, maps from x = [q, chiAx, chiAy, chiAz, chiBx, chiBy, chiBz] fit_params = [np.log(q), chiAx, chiAy, chiHat, chiBx, chiBy, chi_a] chiHat is defined in Eq.(3) of 1508.07253, but with chiAz and chiBz instead of chiA and chiB. chi_a = (chiAz - chiBz)/2. """ q, chiAz, chiBz = x[0], x[3], x[6] eta = q/(1.+q)**2 chi_wtAvg = (q*chiAz+chiBz)/(1.+q) chiHat = (chi_wtAvg - 38.*eta/113.*(chiAz + chiBz))/(1. - 76.*eta/113.) chi_a = (chiAz - chiBz)/2. fit_params = x fit_params[0] = np.log(q) fit_params[3] = chiHat fit_params[6] = chi_a return fit_params
Transforms the input parameter to fit parameters for the 7dq2 model. That is, maps from x = [q, chiAx, chiAy, chiAz, chiBx, chiBy, chiBz] fit_params = [np.log(q), chiAx, chiAy, chiHat, chiBx, chiBy, chi_a] chiHat is defined in Eq.(3) of 1508.07253, but with chiAz and chiBz instead of chiA and chiB. chi_a = (chiAz - chiBz)/2.
entailment
def _evolve_spins(self, q, chiA0, chiB0, omega0, PN_approximant, PN_dt, PN_spin0, PN_phase0, omega0_nrsur): """ Evolves spins of the component BHs from an initial orbital frequency = omega0 until t=-100 M from the peak of the waveform. If omega0 < omega0_nrsur, use PN to evolve the spins until orbital frequency = omega0. Then evolves further with the NRSur7dq2 waveform model until t=-100M from the peak. Assumes chiA0 and chiB0 are defined in the inertial frame defined at orbital frequency = omega0 as: The z-axis is along the Newtonian orbital angular momentum when the PN orbital frequency = omega0. The x-axis is along the line of separation from the smaller BH to the larger BH at this frequency. The y-axis completes the triad. Returns spins in the coorbital frame at t=-100M, as well as the coprecessing frame quaternion and orbital phase in the coprecessing frame at this time. """ if omega0 < omega0_nrsur: # If omega0 is below the NRSur7dq2 start frequency, we use PN # to evolve the spins until orbital frequency = omega0_nrsur. # Note that we update omega0_nrsur here with the PN # frequency that was closest to the input omega0_nrsur. chiA0_nrsur_copr, chiB0_nrsur_copr, quat0_nrsur_copr, \ phi0_nrsur, omega0_nrsur \ = evolve_pn_spins(q, chiA0, chiB0, omega0, omega0_nrsur, approximant=PN_approximant, dt=PN_dt, spinO=PN_spin0, phaseO=PN_phase0) else: # If omega0>= omega0_nrsur, we evolve spins directly with NRSur7dq2 # waveform model. We set the coprecessing frame quaternion to # identity and orbital phase to 0 at omega=omega0, hence the # coprecessing frame is the same as the inertial frame here. # Note that we update omega0_nrsur here and set it to omega0 chiA0_nrsur_copr, chiB0_nrsur_copr, quat0_nrsur_copr, \ phi0_nrsur, omega0_nrsur \ = chiA0, chiB0, [1,0,0,0], 0, omega0 # Load NRSur7dq2 if needed if self.nrsur is None: self._load_NRSur7dq2() # evaluate NRSur7dq2 dynamics # We set allow_extrapolation=True always since we test param limits # independently quat, orbphase, chiA_copr, chiB_copr = self.nrsur.get_dynamics(q, chiA0_nrsur_copr, chiB0_nrsur_copr, init_quat=quat0_nrsur_copr, init_phase=phi0_nrsur, omega_ref=omega0_nrsur, allow_extrapolation=True) # get data at time node where remnant fits are done fitnode_time = -100 nodeIdx = np.argmin(np.abs(self.nrsur.tds - fitnode_time)) quat_fitnode = quat.T[nodeIdx] orbphase_fitnode = orbphase[nodeIdx] # get coorbital frame spins at the time node chiA_coorb_fitnode = utils.rotate_in_plane(chiA_copr[nodeIdx], orbphase_fitnode) chiB_coorb_fitnode = utils.rotate_in_plane(chiB_copr[nodeIdx], orbphase_fitnode) return chiA_coorb_fitnode, chiB_coorb_fitnode, quat_fitnode, \ orbphase_fitnode
Evolves spins of the component BHs from an initial orbital frequency = omega0 until t=-100 M from the peak of the waveform. If omega0 < omega0_nrsur, use PN to evolve the spins until orbital frequency = omega0. Then evolves further with the NRSur7dq2 waveform model until t=-100M from the peak. Assumes chiA0 and chiB0 are defined in the inertial frame defined at orbital frequency = omega0 as: The z-axis is along the Newtonian orbital angular momentum when the PN orbital frequency = omega0. The x-axis is along the line of separation from the smaller BH to the larger BH at this frequency. The y-axis completes the triad. Returns spins in the coorbital frame at t=-100M, as well as the coprecessing frame quaternion and orbital phase in the coprecessing frame at this time.
entailment
def _eval_wrapper(self, fit_key, q, chiA, chiB, **kwargs): """Evaluates the surfinBH7dq2 model. """ chiA = np.array(chiA) chiB = np.array(chiB) # Warn/Exit if extrapolating allow_extrap = kwargs.pop('allow_extrap', False) self._check_param_limits(q, chiA, chiB, allow_extrap) omega0 = kwargs.pop('omega0', None) PN_approximant = kwargs.pop('PN_approximant', 'SpinTaylorT4') PN_dt = kwargs.pop('PN_dt', 0.1) PN_spin_order = kwargs.pop('PN_spin_order', 7) PN_phase_order = kwargs.pop('PN_phase_order', 7) omega_switch = kwargs.pop('omega_switch', 0.018) self._check_unused_kwargs(kwargs) if omega0 is None: # If omega0 is given, assume chiA, chiB are the coorbital frame # spins at t=-100 M. x = np.concatenate(([q], chiA, chiB)) else: # If omega0 is given, evolve the spins from omega0 # to t = -100 M from the peak. chiA_coorb_fitnode, chiB_coorb_fitnode, quat_fitnode, \ orbphase_fitnode \ = self._evolve_spins(q, chiA, chiB, omega0, PN_approximant, PN_dt, PN_spin_order, PN_phase_order, omega_switch) # x should contain coorbital frame spins at t=-100M x = np.concatenate(([q], chiA_coorb_fitnode, chiB_coorb_fitnode)) def eval_vector_fit(x, fit_key): res = self._evaluate_fits(x, fit_key) fit_val = res.T[0] fit_err = res.T[1] if omega0 is not None: # If spins were given in inertial frame at omega0, # transform vectors and errors back to the same frame. fit_val = utils.transform_vector_coorb_to_inertial(fit_val, orbphase_fitnode, quat_fitnode) fit_err = utils.transform_error_coorb_to_inertial(fit_val, fit_err, orbphase_fitnode, quat_fitnode) return fit_val, fit_err if fit_key == 'mf' or fit_key == 'all': mf, mf_err = self._evaluate_fits(x, 'mf') if fit_key == 'mf': return mf, mf_err if fit_key == 'chif' or fit_key == 'all': chif, chif_err = eval_vector_fit(x, 'chif') if fit_key == 'chif': return chif, chif_err if fit_key == 'vf' or fit_key == 'all': vf, vf_err = eval_vector_fit(x, 'vf') if fit_key == 'vf': return vf, vf_err if fit_key == 'all': return mf, chif, vf, mf_err, chif_err, vf_err
Evaluates the surfinBH7dq2 model.
entailment
def LoadFits(name): """ Loads data for a fit. If data is not available, downloads it before loading. """ if name not in fits_collection.keys(): raise Exception('Invalid fit name : %s'%name) else: testPath = DataPath() + '/' + fits_collection[name].data_url.split('/')[-1] if (not os.path.isfile(testPath)): DownloadData(name) fit = fits_collection[name].fit_class(name.split('surfinBH')[-1]) print('Loaded %s fit.'%name) return fit
Loads data for a fit. If data is not available, downloads it before loading.
entailment
def DownloadData(name='all', data_dir=DataPath()): """ Downloads fit data to DataPath() diretory. If name='all', gets all fit data. """ if name == 'all': for tmp_name in fits_collection.keys(): DownloadData(name=tmp_name, data_dir=data_dir) return if name not in fits_collection.keys(): raise Exception('Invalid fit name : %s'%name) print('Downloading %s data'%name) data_url = fits_collection[name].data_url filename = data_url.split('/')[-1] try: os.makedirs(data_dir) except OSError as exc: if exc.errno == errno.EEXIST and os.path.isdir(data_dir): pass else: raise urlretrieve(data_url, data_dir + '/' + filename)
Downloads fit data to DataPath() diretory. If name='all', gets all fit data.
entailment
def _load_fits(self, h5file): """ Loads fits from h5file and returns a dictionary of fits. """ fits = {} for key in ['mf', 'chifz', 'vfx', 'vfy']: fits[key] = self._load_scalar_fit(fit_key=key, h5file=h5file) return fits
Loads fits from h5file and returns a dictionary of fits.
entailment
def _eval_wrapper(self, fit_key, q, chiA, chiB, **kwargs): """ Evaluates the surfinBH3dq8 model. """ chiA = np.array(chiA) chiB = np.array(chiB) # Warn/Exit if extrapolating allow_extrap = kwargs.pop('allow_extrap', False) self._check_param_limits(q, chiA, chiB, allow_extrap) self._check_unused_kwargs(kwargs) x = [q, chiA[2], chiB[2]] if fit_key == 'mf' or fit_key == 'all': mf, mf_err = self._evaluate_fits(x, 'mf') if fit_key == 'mf': return mf, mf_err if fit_key == 'chif' or fit_key == 'all': chifz, chifz_err = self._evaluate_fits(x, 'chifz') chif = np.array([0,0,chifz]) chif_err = np.array([0,0,chifz_err]) if fit_key == 'chif': return chif, chif_err if fit_key == 'vf' or fit_key == 'all': vfx, vfx_err = self._evaluate_fits(x, 'vfx') vfy, vfy_err = self._evaluate_fits(x, 'vfy') vf = np.array([vfx, vfy, 0]) vf_err = np.array([vfx_err, vfy_err, 0]) if fit_key == 'vf': return vf, vf_err if fit_key == 'all': return mf, chif, vf, mf_err, chif_err, vf_err
Evaluates the surfinBH3dq8 model.
entailment
def lal_spin_evloution_wrapper(approximant, q, omega0, chiA0, chiB0, dt, spinO, phaseO): """ Inputs: approximant: 'SpinTaylorT1/T2/T4' q: Mass ratio (q>=1) omega0: Initial orbital frequency in dimless units. chiA0: Dimless spin of BhA at initial freq. chiB0: Dimless spin of BhB at initial freq. dt: Dimless step time for evolution. spinO: Twice PN order of spin effects. phaseO: Twice PN order in phase. Outputs (all are time series): Omega: Dimensionless orbital frequency. Phi: Orbital phase (radians) ChiA: Dimensionless spin of BhA ChiB: Dimensionless spin of BhB LNhat: Orbital angular momentum direction E1: Orbital plane basis vector The frame is defined at the initial frequency, as follows: z-axis is set by the orbital angular momentum direction. x-axis is the separation vector from BhB to BhA. y-axis completes the triad by right-hand rule. All quantities are defined in this fixed frame, including initial spins, returned spins, other vectors like LNhat, etc. """ approxTag = lalsim.GetApproximantFromString(approximant) # Total mass in solar masses M = 100 # This does not affect the returned values as they are # dimension less # time step and initial GW freq in SI units MT = M*MTSUN_SI deltaT = dt*MT fStart = omega0/np.pi/MT # component masses of the binary m1_SI = M*MSUN_SI*q/(1.+q) m2_SI = M*MSUN_SI/(1.+q) # spins at fStart s1x, s1y, s1z = chiA0 s2x, s2y, s2z = chiB0 # integrate as far forward as possible fEnd = 0 # initial value of orbital angular momentum unit vector, i.e at fStart lnhatx, lnhaty, lnhatz = 0,0,1 # initial value of orbital plane basis vector, i.e at fStart e1x, e1y, e1z = 1, 0, 0 # tidal deformability parameters lambda1, lambda2 = 0, 0 quadparam1, quadparam2 = 1, 1 # twice PN order of tidal effects tideO = 0 ### This function evolves the orbital equations for a precessing binary ### using the "TaylorT1/T2/T4" approximant for solving the orbital dynamics ### (see arXiv:0907.0700 for a review of the various PN approximants). ### ### It returns time series of the "orbital velocity", orbital phase, ### and components for both individual spin vectors, the "Newtonian" ### orbital angular momentum (which defines the instantaneous plane) ### and "E1", a basis vector in the instantaneous orbital plane. Note that ### LNhat and E1 completely specify the instantaneous orbital plane. ### It also returns the time and phase of the final time step ### ### For input, the function takes the two masses, the initial orbital phase, ### Values of S1, S2, LNhat, E1 vectors at starting time, ### the desired time step size, the starting GW frequency, ### and PN order at which to evolve the phase, ### ### NOTE: All vectors are given in the frame ### where the z-axis is set by the angular momentum at reference frequency, ### the x-axis is chosen orthogonal to it, and the y-axis is given by the ### RH rule. Initial values must be passed in this frame, and the time ### series of the vector components will also be returned in this frame. ### ### ### V, post-Newtonian parameter [returned] ### Phi, orbital phase [returned] ### S1x, Spin1 vector x component [returned] ### S1y, " " " y component [returned] ### S1z, " " " z component [returned] ### S2x, Spin2 vector x component [returned] ### S2y, " " " y component [returned] ### S2z, " " " z component [returned] ### LNhatx, unit orbital ang. mom. x [returned] ### LNhaty, " " " y component [returned] ### LNhatz, " " " z component [returned] ### E1x, orb. plane basis vector x[returned] ### E1y, " " " y component [returned] ### E1z, " " " z component [returned] ### ### ### deltaT, sampling interval (s) ### m1_SI, mass of companion 1 (kg) ### m2_SI, mass of companion 2 (kg) ### fStart, starting GW frequency ### fEnd, ending GW frequency, fEnd=0 means integrate as far ### forward as possible ### s1x, initial value of S1x ### s1y, initial value of S1y ### s1z, initial value of S1z ### s2x, initial value of S2x ### s2y, initial value of S2y ### s2z, initial value of S2z ### lnhatx, initial value of LNhatx ### lnhaty, initial value of LNhaty ### lnhatz, initial value of LNhatz ### e1x, initial value of E1x ### e1y, initial value of E1y ### e1z, initial value of E1z ### lambda1, tidal deformability of mass 1 ### lambda2, tidal deformability of mass 2 ### quadparam1, phenom. parameter describing induced quad. ### moment of body 1 (=1 for BHs, ~2-12 for NSs) ### quadparam2, phenom. parameter describing induced quad. ### moment of body 2 (=1 for BHs, ~2-12 for NSs) ### spinO, twice PN order of spin effects ### tideO, twice PN order of tidal effects ### phaseO, twice post-Newtonian order ### approx PN approximant (SpinTaylorT1/T2/T4) V, Phi, S1x, S1y, S1z, S2x, S2y, S2z, LNhatx, LNhaty, LNhatz, \ E1x, E1y, E1z = lalsim.SimInspiralSpinTaylorPNEvolveOrbit(deltaT, \ m1_SI, m2_SI, fStart, fEnd, s1x, s1y, s1z, s2x, s2y, s2z, \ lnhatx, lnhaty, lnhatz, e1x, e1y, e1z, lambda1, lambda2, \ quadparam1, quadparam2, spinO, tideO, phaseO, approxTag) V = np.array(V.data.data) Phi = np.array(Phi.data.data) ChiA = np.array([S1x.data.data, S1y.data.data, S1z.data.data]).T ChiB = np.array([S2x.data.data, S2y.data.data, S2z.data.data]).T LNhat = np.array([LNhatx.data.data, LNhaty.data.data, LNhatz.data.data]).T E1 = np.array([E1x.data.data, E1y.data.data, E1z.data.data]).T # Rescale to non-idiot spins, because stupid lal ChiA = ChiA/(q/(1.+q))**2 ChiB = ChiB/(1./(1.+q))**2 Omega = V**3 return Omega, Phi, ChiA, ChiB, LNhat, E1
Inputs: approximant: 'SpinTaylorT1/T2/T4' q: Mass ratio (q>=1) omega0: Initial orbital frequency in dimless units. chiA0: Dimless spin of BhA at initial freq. chiB0: Dimless spin of BhB at initial freq. dt: Dimless step time for evolution. spinO: Twice PN order of spin effects. phaseO: Twice PN order in phase. Outputs (all are time series): Omega: Dimensionless orbital frequency. Phi: Orbital phase (radians) ChiA: Dimensionless spin of BhA ChiB: Dimensionless spin of BhB LNhat: Orbital angular momentum direction E1: Orbital plane basis vector The frame is defined at the initial frequency, as follows: z-axis is set by the orbital angular momentum direction. x-axis is the separation vector from BhB to BhA. y-axis completes the triad by right-hand rule. All quantities are defined in this fixed frame, including initial spins, returned spins, other vectors like LNhat, etc.
entailment
def evolve_pn_spins(q, chiA0, chiB0, omega0, omegaTimesM_final, approximant='SpinTaylorT4', dt=0.1, spinO=7, phaseO=7): """ Evolves PN spins from a starting orbital frequency and spins to a final frequency. Inputs: q: Mass ratio (q>=1) chiA0: Dimless spin of BhA at initial freq. chiB0: Dimless spin of BhB at initial freq. omega0: Initial orbital frequency in dimless units. omegaTimesM_final: Final orbital frequency in dimless units. approximant: 'SpinTaylorT1/T2/T4'. Default: 'SpinTaylorT4'. dt: Dimless step time for evolution. Default: 0.1 . spinO: Twice PN order of spin effects. Default: 5 . phaseO: Twice PN order in phase. Default: 8 . Outputs (all are time series): chiA_end_copr: Spin of BhA at final frequency, in coprecessing frame. chiB_end_copr: Spin of BhB at final frequency, in coprecessing frame. q_copr_end: Coprecessing frame quaternion at final frequency. phi_end: Orbital phase in the coprecessing frame at final frequency. omegaTimesM_end Dimensionless final frequency. Should agree with omegaTimesM_final. The inertial frame is assumed to be aligned to the coorbital frame at orbital frequency = omega0. chiA0 and chiB0 are the inertial/coorbital frame spins at omega0. """ omega, phi, chiA, chiB, lNhat, e1 = lal_spin_evloution_wrapper(approximant, q, omega0, chiA0, chiB0, dt, spinO, phaseO) # Compute omega, inertial spins, angular momentum direction and orbital # phase when omega = omegaTimesM_final end_idx = np.argmin(np.abs(omega - omegaTimesM_final)) omegaTimesM_end = omega[end_idx] chiA_end = chiA[end_idx] chiB_end = chiB[end_idx] lNhat_end = lNhat[end_idx] phi_end = phi[end_idx] # Align the z-direction along orbital angular momentum direction # at end_idx. This moves us in to the coprecessing frame. q_copr_end = _utils.alignVec_quat(lNhat_end) chiA_end_copr = _utils.transformTimeDependentVector( np.array([q_copr_end]).T, np.array([chiA_end]).T, inverse=1).T[0] chiB_end_copr = _utils.transformTimeDependentVector( np.array([q_copr_end]).T, np.array([chiB_end]).T, inverse=1).T[0] return chiA_end_copr, chiB_end_copr, q_copr_end, phi_end, omegaTimesM_end
Evolves PN spins from a starting orbital frequency and spins to a final frequency. Inputs: q: Mass ratio (q>=1) chiA0: Dimless spin of BhA at initial freq. chiB0: Dimless spin of BhB at initial freq. omega0: Initial orbital frequency in dimless units. omegaTimesM_final: Final orbital frequency in dimless units. approximant: 'SpinTaylorT1/T2/T4'. Default: 'SpinTaylorT4'. dt: Dimless step time for evolution. Default: 0.1 . spinO: Twice PN order of spin effects. Default: 5 . phaseO: Twice PN order in phase. Default: 8 . Outputs (all are time series): chiA_end_copr: Spin of BhA at final frequency, in coprecessing frame. chiB_end_copr: Spin of BhB at final frequency, in coprecessing frame. q_copr_end: Coprecessing frame quaternion at final frequency. phi_end: Orbital phase in the coprecessing frame at final frequency. omegaTimesM_end Dimensionless final frequency. Should agree with omegaTimesM_final. The inertial frame is assumed to be aligned to the coorbital frame at orbital frequency = omega0. chiA0 and chiB0 are the inertial/coorbital frame spins at omega0.
entailment
def is_null(value): """ Check if the scalar value or tuple/list value is NULL. :param value: Value to check. :type value: a scalar or tuple or list :return: Returns ``True`` if and only if the value is NULL (scalar value is None or _any_ tuple/list elements are None). :rtype: bool """ if type(value) in (tuple, list): for v in value: if v is None: return True return False else: return value is None
Check if the scalar value or tuple/list value is NULL. :param value: Value to check. :type value: a scalar or tuple or list :return: Returns ``True`` if and only if the value is NULL (scalar value is None or _any_ tuple/list elements are None). :rtype: bool
entailment
def qual(obj): """ Return fully qualified name of a class. """ return u'{}.{}'.format(obj.__class__.__module__, obj.__class__.__name__)
Return fully qualified name of a class.
entailment
def select(self, txn, from_key=None, to_key=None, return_keys=True, return_values=True, reverse=False, limit=None): """ Select all records (key-value pairs) in table, optionally within a given key range. :param txn: The transaction in which to run. :type txn: :class:`zlmdb.Transaction` :param from_key: Return records starting from (and including) this key. :type from_key: object :param to_key: Return records up to (but not including) this key. :type to_key: object :param return_keys: If ``True`` (default), return keys of records. :type return_keys: bool :param return_values: If ``True`` (default), return values of records. :type return_values: bool :param limit: Limit number of records returned. :type limit: int :return: """ assert type(return_keys) == bool assert type(return_values) == bool assert type(reverse) == bool assert limit is None or (type(limit) == int and limit > 0 and limit < 10000000) return PersistentMapIterator(txn, self, from_key=from_key, to_key=to_key, return_keys=return_keys, return_values=return_values, reverse=reverse, limit=limit)
Select all records (key-value pairs) in table, optionally within a given key range. :param txn: The transaction in which to run. :type txn: :class:`zlmdb.Transaction` :param from_key: Return records starting from (and including) this key. :type from_key: object :param to_key: Return records up to (but not including) this key. :type to_key: object :param return_keys: If ``True`` (default), return keys of records. :type return_keys: bool :param return_values: If ``True`` (default), return values of records. :type return_values: bool :param limit: Limit number of records returned. :type limit: int :return:
entailment
def count(self, txn, prefix=None): """ Count number of records in the persistent map. When no prefix is given, the total number of records is returned. When a prefix is given, only the number of records with keys that have this prefix are counted. :param txn: The transaction in which to run. :type txn: :class:`zlmdb.Transaction` :param prefix: The key prefix of records to count. :type prefix: object :returns: The number of records. :rtype: int """ key_from = struct.pack('>H', self._slot) if prefix: key_from += self._serialize_key(prefix) kfl = len(key_from) cnt = 0 cursor = txn._txn.cursor() has_more = cursor.set_range(key_from) while has_more: _key = cursor.key() _prefix = _key[:kfl] if _prefix != key_from: break cnt += 1 has_more = cursor.next() return cnt
Count number of records in the persistent map. When no prefix is given, the total number of records is returned. When a prefix is given, only the number of records with keys that have this prefix are counted. :param txn: The transaction in which to run. :type txn: :class:`zlmdb.Transaction` :param prefix: The key prefix of records to count. :type prefix: object :returns: The number of records. :rtype: int
entailment
def count_range(self, txn, from_key, to_key): """ Counter number of records in the perstistent map with keys within the given range. :param txn: The transaction in which to run. :type txn: :class:`zlmdb.Transaction` :param from_key: Count records starting and including from this key. :type from_key: object :param to_key: End counting records before this key. :type to_key: object :returns: The number of records. :rtype: int """ key_from = struct.pack('>H', self._slot) + self._serialize_key(from_key) to_key = struct.pack('>H', self._slot) + self._serialize_key(to_key) cnt = 0 cursor = txn._txn.cursor() has_more = cursor.set_range(key_from) while has_more: if cursor.key() >= to_key: break cnt += 1 has_more = cursor.next() return cnt
Counter number of records in the perstistent map with keys within the given range. :param txn: The transaction in which to run. :type txn: :class:`zlmdb.Transaction` :param from_key: Count records starting and including from this key. :type from_key: object :param to_key: End counting records before this key. :type to_key: object :returns: The number of records. :rtype: int
entailment
def _random_string(): """ Generate a globally unique serial / product code of the form ``u'YRAC-EL4X-FQQE-AW4T-WNUV-VN6T'``. The generated value is cryptographically strong and has (at least) 114 bits of entropy. :return: new random string key """ rng = random.SystemRandom() token_value = u''.join(rng.choice(CHARSET) for _ in range(CHAR_GROUPS * CHARS_PER_GROUP)) if CHARS_PER_GROUP > 1: return GROUP_SEP.join(map(u''.join, zip(*[iter(token_value)] * CHARS_PER_GROUP))) else: return token_value
Generate a globally unique serial / product code of the form ``u'YRAC-EL4X-FQQE-AW4T-WNUV-VN6T'``. The generated value is cryptographically strong and has (at least) 114 bits of entropy. :return: new random string key
entailment
def _read_dict(self, f): """ Converts h5 groups to dictionaries """ d = {} for k, item in f.items(): if type(item) == h5py._hl.dataset.Dataset: v = item.value if type(v) == np.string_: v = str(v) if type(v) == str and v == "NONE": d[k] = None elif type(v) == str and v == "EMPTYARR": d[k] = np.array([]) elif isinstance(v, bytes): d[k] = v.decode('utf-8') else: d[k] = v elif k[:5] == "DICT_": d[k[5:]] = self._read_dict(item) elif k[:5] == "LIST_": tmpD = self._read_dict(item) d[k[5:]] = [tmpD[str(i)] for i in range(len(tmpD))] return d
Converts h5 groups to dictionaries
entailment
def _load_scalar_fit(self, fit_key=None, h5file=None, fit_data=None): """ Loads a single fit """ if (fit_key is None) ^ (h5file is None): raise ValueError("Either specify both fit_key and h5file, or" " neither") if not ((fit_key is None) ^ (fit_data is None)): raise ValueError("Specify exactly one of fit_key and fit_data.") if fit_data is None: fit_data = self._read_dict(h5file[fit_key]) if 'fitType' in fit_data.keys() and fit_data['fitType'] == 'GPR': fit = _eval_pysur.evaluate_fit.getGPRFitAndErrorEvaluator(fit_data) else: fit = _eval_pysur.evaluate_fit.getFitEvaluator(fit_data) return fit
Loads a single fit
entailment
def _load_vector_fit(self, fit_key, h5file): """ Loads a vector of fits """ vector_fit = [] for i in range(len(h5file[fit_key].keys())): fit_data = self._read_dict(h5file[fit_key]['comp_%d'%i]) vector_fit.append(self._load_scalar_fit(fit_data=fit_data)) return vector_fit
Loads a vector of fits
entailment
def _evaluate_fits(self, x, fit_key): """ Evaluates a particular fit by passing fit_key to self.fits. Assumes self._get_fit_params() has been overriden. """ fit = self.fits[fit_key] fit_params = self._get_fit_params(np.copy(x), fit_key) if type(fit) == list: res = [] for i in range(len(fit)): res.append(fit[i](fit_params)) return np.array(res) else: return fit(fit_params)
Evaluates a particular fit by passing fit_key to self.fits. Assumes self._get_fit_params() has been overriden.
entailment
def _check_unused_kwargs(self, kwargs): """ Call this at the end of call module to check if all the kwargs have been used. Assumes kwargs were extracted using pop. """ if len(kwargs.keys()) != 0: unused = "" for k in kwargs.keys(): unused += "'%s', "%k if unused[-2:] == ", ": # get rid of trailing comma unused = unused[:-2] raise Exception('Unused keys in kwargs: %s'%unused)
Call this at the end of call module to check if all the kwargs have been used. Assumes kwargs were extracted using pop.
entailment
def _check_param_limits(self, q, chiA, chiB, allow_extrap): """ Checks that params are within allowed range of paramters. Raises a warning if outside self.soft_param_lims limits and raises an error if outside self.hard_param_lims. If allow_extrap=True, skips these checks. """ if q < 1: raise ValueError('Mass ratio should be >= 1.') chiAmag = np.sqrt(np.sum(chiA**2)) chiBmag = np.sqrt(np.sum(chiB**2)) if chiAmag > 1 + 1e-14: raise ValueError('Spin magnitude of BhA > 1.') if chiBmag > 1 + 1e-14: raise ValueError('Spin magnitude of BhB > 1.') if self.aligned_spin_only: if np.sqrt(np.sum(chiA[:2]**2)) > 1e-14: raise ValueError('The x & y components of chiA should be zero.') if np.sqrt(np.sum(chiB[:2]**2)) > 1e-14: raise ValueError('The x & y components of chiB should be zero.') # Do not check param limits if allow_extrap=True if allow_extrap: return if q > self.hard_param_lims['q']+ 1e-14: raise ValueError('Mass ratio outside allowed range.') elif q > self.soft_param_lims['q']: warnings.warn('Mass ratio outside training range.') if chiAmag > self.hard_param_lims['chiAmag']+ 1e-14: raise ValueError('Spin magnitude of BhA outside allowed range.') elif chiAmag > self.soft_param_lims['chiAmag']: warnings.warn('Spin magnitude of BhA outside training range.') if chiBmag > self.hard_param_lims['chiBmag']+ 1e-14: raise ValueError('Spin magnitude of BhB outside allowed range.') elif chiBmag > self.soft_param_lims['chiBmag']: warnings.warn('Spin magnitude of BhB outside training range.')
Checks that params are within allowed range of paramters. Raises a warning if outside self.soft_param_lims limits and raises an error if outside self.hard_param_lims. If allow_extrap=True, skips these checks.
entailment
def slot(self, slot_index, marshal=None, unmarshal=None, build=None, cast=None, compress=False): """ Decorator for use on classes derived from zlmdb.PersistentMap. The decorator define slots in a LMDB database schema based on persistent maps, and slot configuration. :param slot_index: :param marshal: :param unmarshal: :param build: :param cast: :param compress: :return: """ def decorate(o): assert isinstance(o, PersistentMap) name = o.__class__.__name__ assert slot_index not in self._index_to_slot assert name not in self._name_to_slot o._zlmdb_slot = slot_index o._zlmdb_marshal = marshal o._zlmdb_unmarshal = unmarshal o._zlmdb_build = build o._zlmdb_cast = cast o._zlmdb_compress = compress _slot = Slot(slot_index, name, o) self._index_to_slot[slot_index] = _slot self._name_to_slot[name] = _slot return o return decorate
Decorator for use on classes derived from zlmdb.PersistentMap. The decorator define slots in a LMDB database schema based on persistent maps, and slot configuration. :param slot_index: :param marshal: :param unmarshal: :param build: :param cast: :param compress: :return:
entailment
def DataPath(): """ Return the default path for fit data h5 files""" return os.path.abspath('%s/data'%(os.path.dirname( \ os.path.realpath(__file__))))
Return the default path for fit data h5 files
entailment
def request_object_encryption(msg, service_context, **kwargs): """ Created an encrypted JSON Web token with *msg* as body. :param msg: The mesaqg :param service_context: :param kwargs: :return: """ try: encalg = kwargs["request_object_encryption_alg"] except KeyError: try: encalg = service_context.behaviour["request_object_encryption_alg"] except KeyError: return msg if not encalg: return msg try: encenc = kwargs["request_object_encryption_enc"] except KeyError: try: encenc = service_context.behaviour["request_object_encryption_enc"] except KeyError: raise MissingRequiredAttribute( "No request_object_encryption_enc specified") if not encenc: raise MissingRequiredAttribute( "No request_object_encryption_enc specified") _jwe = JWE(msg, alg=encalg, enc=encenc) _kty = alg2keytype(encalg) try: _kid = kwargs["enc_kid"] except KeyError: _kid = "" if "target" not in kwargs: raise MissingRequiredAttribute("No target specified") if _kid: _keys = service_context.keyjar.get_encrypt_key(_kty, owner=kwargs["target"], kid=_kid) _jwe["kid"] = _kid else: _keys = service_context.keyjar.get_encrypt_key(_kty, owner=kwargs["target"]) return _jwe.encrypt(_keys)
Created an encrypted JSON Web token with *msg* as body. :param msg: The mesaqg :param service_context: :param kwargs: :return:
entailment