code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def iterate(self, max_iter=150): r"""Iterate This method calls update until either convergence criteria is met or the maximum number of iterations is reached Parameters ---------- max_iter : int, optional Maximum number of iterations (default is ``150``) """ self._run_alg(max_iter) # retrieve metrics results self.retrieve_outputs() self.x_final = self._x_new
r"""Iterate This method calls update until either convergence criteria is met or the maximum number of iterations is reached Parameters ---------- max_iter : int, optional Maximum number of iterations (default is ``150``)
def build_masked_loss(loss_function, mask_value): """Builds a loss function that masks based on targets Args: loss_function: The loss function to mask mask_value: The value to mask in the targets Returns: function: a loss function that acts like loss_function with masked inputs """ def masked_loss_function(y_true, y_pred): mask = K.cast(K.not_equal(y_true, mask_value), K.floatx()) return loss_function(y_true * mask, y_pred * mask) return masked_loss_function
Builds a loss function that masks based on targets Args: loss_function: The loss function to mask mask_value: The value to mask in the targets Returns: function: a loss function that acts like loss_function with masked inputs
async def identity_of(client: Client, search: str) -> dict: """ GET Identity data written in the blockchain :param client: Client to connect to the api :param search: UID or public key :return: """ return await client.get(MODULE + '/identity-of/%s' % search, schema=IDENTITY_OF_SCHEMA)
GET Identity data written in the blockchain :param client: Client to connect to the api :param search: UID or public key :return:
def decipher(self,string,keep_punct=False): """Decipher string using Simple Substitution cipher according to initialised key. Example:: plaintext = SimpleSubstitution('AJPCZWRLFBDKOTYUQGENHXMIVS').decipher(ciphertext) :param string: The string to decipher. :param keep_punct: if true, punctuation and spacing are retained. If false, it is all removed. Default is False. :returns: The deciphered string. """ # if we have not yet calculated the inverse key, calculate it now if self.invkey == '': for i in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ': self.invkey += self.i2a(self.key.index(i)) if not keep_punct: string = self.remove_punctuation(string) ret = '' for c in string.upper(): if c.isalpha(): ret += self.invkey[self.a2i(c)] else: ret += c return ret
Decipher string using Simple Substitution cipher according to initialised key. Example:: plaintext = SimpleSubstitution('AJPCZWRLFBDKOTYUQGENHXMIVS').decipher(ciphertext) :param string: The string to decipher. :param keep_punct: if true, punctuation and spacing are retained. If false, it is all removed. Default is False. :returns: The deciphered string.
def get_meta_type_by_dir(dir_name): parent_data = get_default_metadata_data() child_data = get_child_metadata_data() data = parent_data['metadataObjects'] + child_data for item in data: if 'directoryName' in item and item['directoryName'].lower() == dir_name.lower(): return item elif 'tagName' in item and item['tagName'].lower() == dir_name.lower(): return item ''' > quick and dirty fix for users experiencing issues with "newer" metadata types not properly tested by mm > if the project has a cached .describe, let's use that to detect metadata types ''' try: if config.describe_data != None: project_org_describe = config.describe_data if config.project != None and os.path.isfile(os.path.join(config.project.location,'config','.describe')): project_org_describe = parse_json_from_file(os.path.join(config.project.location,'config','.describe')) if project_org_describe != None and 'metadataObjects' in project_org_describe: for item in project_org_describe["metadataObjects"]: if 'directoryName' in item and item['directoryName'].lower() == dir_name.lower(): return item elif 'tagName' in item and item['tagName'].lower() == dir_name.lower(): return item except: pass
> quick and dirty fix for users experiencing issues with "newer" metadata types not properly tested by mm > if the project has a cached .describe, let's use that to detect metadata types
def image_shift(xshift=0, yshift=0, axes="gca"): """ This will shift an image to a new location on x and y. """ if axes=="gca": axes = _pylab.gca() e = axes.images[0].get_extent() e[0] = e[0] + xshift e[1] = e[1] + xshift e[2] = e[2] + yshift e[3] = e[3] + yshift axes.images[0].set_extent(e) _pylab.draw()
This will shift an image to a new location on x and y.
def drop(self): """Drop this database. See https://cloud.google.com/spanner/reference/rpc/google.spanner.admin.database.v1#google.spanner.admin.database.v1.DatabaseAdmin.DropDatabase """ api = self._instance._client.database_admin_api metadata = _metadata_with_prefix(self.name) api.drop_database(self.name, metadata=metadata)
Drop this database. See https://cloud.google.com/spanner/reference/rpc/google.spanner.admin.database.v1#google.spanner.admin.database.v1.DatabaseAdmin.DropDatabase
def reset(self): """Reset all upstream Steps to the default training parameters and cleans cache for all upstream Steps including this Step. Defaults are: 'mode': 'train', 'is_fittable': True, 'force_fitting': True, 'persist_output': False, 'cache_output': False, 'load_persisted_output': False """ self.clean_cache_upstream() self.set_mode_train() for step_obj in self.all_upstream_steps.values(): step_obj.is_fittable = DEFAULT_TRAINING_SETUP['is_fittable'] step_obj.force_fitting = DEFAULT_TRAINING_SETUP['force_fitting'] step_obj.persist_output = DEFAULT_TRAINING_SETUP['persist_output'] step_obj.cache_output = DEFAULT_TRAINING_SETUP['cache_output'] step_obj.load_persisted_output = DEFAULT_TRAINING_SETUP['load_persisted_output'] logger.info('Step {}, reset all upstream Steps to default training parameters, ' 'including this Step'.format(self.name)) return self
Reset all upstream Steps to the default training parameters and cleans cache for all upstream Steps including this Step. Defaults are: 'mode': 'train', 'is_fittable': True, 'force_fitting': True, 'persist_output': False, 'cache_output': False, 'load_persisted_output': False
def _set_allowed_ouis(self, v, load=False): """ Setter method for allowed_ouis, mapped from YANG variable /interface/port_channel/switchport/port_security/allowed_ouis (list) If this variable is read-only (config: false) in the source YANG file, then _set_allowed_ouis is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_allowed_ouis() directly. YANG Description: List of allowed OUIs """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("oui",allowed_ouis.allowed_ouis, yang_name="allowed-ouis", rest_name="allowed-ouis", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='oui', extensions={u'tailf-common': {u'info': u'List of allowed OUIs', u'cli-drop-node-name': None, u'cli-suppress-mode': None, u'callpoint': u'interface_po_portsecurity_oui', u'cli-suppress-list-no': None}}), is_container='list', yang_name="allowed-ouis", rest_name="allowed-ouis", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'List of allowed OUIs', u'cli-drop-node-name': None, u'cli-suppress-mode': None, u'callpoint': u'interface_po_portsecurity_oui', u'cli-suppress-list-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """allowed_ouis must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("oui",allowed_ouis.allowed_ouis, yang_name="allowed-ouis", rest_name="allowed-ouis", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='oui', extensions={u'tailf-common': {u'info': u'List of allowed OUIs', u'cli-drop-node-name': None, u'cli-suppress-mode': None, u'callpoint': u'interface_po_portsecurity_oui', u'cli-suppress-list-no': None}}), is_container='list', yang_name="allowed-ouis", rest_name="allowed-ouis", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'List of allowed OUIs', u'cli-drop-node-name': None, u'cli-suppress-mode': None, u'callpoint': u'interface_po_portsecurity_oui', u'cli-suppress-list-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='list', is_config=True)""", }) self.__allowed_ouis = t if hasattr(self, '_set'): self._set()
Setter method for allowed_ouis, mapped from YANG variable /interface/port_channel/switchport/port_security/allowed_ouis (list) If this variable is read-only (config: false) in the source YANG file, then _set_allowed_ouis is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_allowed_ouis() directly. YANG Description: List of allowed OUIs
def add_timing(self, label, secs, is_tool=False): """Aggregate timings by label. secs - a double, so fractional seconds are allowed. is_tool - whether this label represents a tool invocation. """ self._timings_by_path[label] += secs if is_tool: self._tool_labels.add(label) # Check existence in case we're a clean-all. We don't want to write anything in that case. if self._path and os.path.exists(os.path.dirname(self._path)): with open(self._path, 'w') as f: for x in self.get_all(): f.write('{label}: {timing}\n'.format(**x))
Aggregate timings by label. secs - a double, so fractional seconds are allowed. is_tool - whether this label represents a tool invocation.
def nvmlDeviceGetComputeMode(handle): r""" /** * Retrieves the current compute mode for the device. * * For all products. * * See \ref nvmlComputeMode_t for details on allowed compute modes. * * @param device The identifier of the target device * @param mode Reference in which to return the current compute mode * * @return * - \ref NVML_SUCCESS if \a mode has been set * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a mode is NULL * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible * - \ref NVML_ERROR_UNKNOWN on any unexpected error * * @see nvmlDeviceSetComputeMode() */ nvmlReturn_t DECLDIR nvmlDeviceGetComputeMode """ c_mode = _nvmlComputeMode_t() fn = _nvmlGetFunctionPointer("nvmlDeviceGetComputeMode") ret = fn(handle, byref(c_mode)) _nvmlCheckReturn(ret) return bytes_to_str(c_mode.value)
r""" /** * Retrieves the current compute mode for the device. * * For all products. * * See \ref nvmlComputeMode_t for details on allowed compute modes. * * @param device The identifier of the target device * @param mode Reference in which to return the current compute mode * * @return * - \ref NVML_SUCCESS if \a mode has been set * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a mode is NULL * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible * - \ref NVML_ERROR_UNKNOWN on any unexpected error * * @see nvmlDeviceSetComputeMode() */ nvmlReturn_t DECLDIR nvmlDeviceGetComputeMode
def retry(f, exc_classes=DEFAULT_EXC_CLASSES, logger=None, retry_log_level=logging.INFO, retry_log_message="Connection broken in '{f}' (error: '{e}'); " "retrying with new connection.", max_failures=None, interval=0, max_failure_log_level=logging.ERROR, max_failure_log_message="Max retries reached for '{f}'. Aborting."): """ Decorator to automatically reexecute a function if the connection is broken for any reason. """ exc_classes = tuple(exc_classes) @wraps(f) def deco(*args, **kwargs): failures = 0 while True: try: return f(*args, **kwargs) except exc_classes as e: if logger is not None: logger.log(retry_log_level, retry_log_message.format(f=f.func_name, e=e)) gevent.sleep(interval) failures += 1 if max_failures is not None \ and failures > max_failures: if logger is not None: logger.log(max_failure_log_level, max_failure_log_message.format( f=f.func_name, e=e)) raise return deco
Decorator to automatically reexecute a function if the connection is broken for any reason.
def league(page): """ Return the league name """ soup = BeautifulSoup(page) try: return soup.find('title').text.split(' | ')[0].split(' - ')[0] except: return None
Return the league name
def register_work_from_cbk(self, cbk_name, cbk_data, deps, work_class, manager=None): """ Registers a callback function that will generate the :class:`Task` of the :class:`Work`. Args: cbk_name: Name of the callback function (must be a bound method of self) cbk_data: Additional data passed to the callback function. deps: List of :class:`Dependency` objects specifying the dependency of the work. work_class: :class:`Work` class to instantiate. manager: The :class:`TaskManager` responsible for the submission of the task. If manager is None, we use the `TaskManager` specified during the creation of the :class:`Flow`. Returns: The :class:`Work` that will be finalized by the callback. """ # TODO: pass a Work factory instead of a class # Directory of the Work. work_workdir = os.path.join(self.workdir, "w" + str(len(self))) # Create an empty work and register the callback work = work_class(workdir=work_workdir, manager=manager) self._works.append(work) deps = [Dependency(node, exts) for node, exts in deps.items()] if not deps: raise ValueError("A callback must have deps!") work.add_deps(deps) # Wrap the callable in a Callback object and save # useful info such as the index of the work and the callback data. cbk = FlowCallback(cbk_name, self, deps=deps, cbk_data=cbk_data) self._callbacks.append(cbk) return work
Registers a callback function that will generate the :class:`Task` of the :class:`Work`. Args: cbk_name: Name of the callback function (must be a bound method of self) cbk_data: Additional data passed to the callback function. deps: List of :class:`Dependency` objects specifying the dependency of the work. work_class: :class:`Work` class to instantiate. manager: The :class:`TaskManager` responsible for the submission of the task. If manager is None, we use the `TaskManager` specified during the creation of the :class:`Flow`. Returns: The :class:`Work` that will be finalized by the callback.
def update(self, value, tys=None, override=True): """ Update this context. if value is another context, the names from that context are added into this one. Otherwise, a new name is assigned and returned. TODO tys for inputs. """ if isinstance(value, WeldObject): self.context.update(value.context) else: # Ensure that the same inputs always have same names value_str = str(value) if value_str in WeldObject._registry: name = WeldObject._registry[value_str] else: name = WeldObject.generate_input_name(value_str) self.context[name] = value if tys is not None and not override: self.argtypes[name] = tys return name
Update this context. if value is another context, the names from that context are added into this one. Otherwise, a new name is assigned and returned. TODO tys for inputs.
def set_log_level(logger, level): # type: (logging.Logger, int) -> None """Dynamic reconfiguration of the log level""" if level > 2: level = 2 if level < -1: level = -1 levels = { -1: logging.ERROR, 0: logging.WARN, 1: logging.INFO, 2: logging.DEBUG } logger.setLevel(levels[level])
Dynamic reconfiguration of the log level
def connect_edges_pd(graph): """ Given a Graph element containing abstract edges compute edge segments directly connecting the source and target nodes. This operation depends on pandas and is a lot faster than the pure NumPy equivalent. """ edges = graph.dframe() edges.index.name = 'graph_edge_index' edges = edges.reset_index() nodes = graph.nodes.dframe() src, tgt = graph.kdims x, y, idx = graph.nodes.kdims[:3] df = pd.merge(edges, nodes, left_on=[src.name], right_on=[idx.name]) df = df.rename(columns={x.name: 'src_x', y.name: 'src_y'}) df = pd.merge(df, nodes, left_on=[tgt.name], right_on=[idx.name]) df = df.rename(columns={x.name: 'dst_x', y.name: 'dst_y'}) df = df.sort_values('graph_edge_index').drop(['graph_edge_index'], axis=1) edge_segments = [] for i, edge in df.iterrows(): start = edge['src_x'], edge['src_y'] end = edge['dst_x'], edge['dst_y'] edge_segments.append(np.array([start, end])) return edge_segments
Given a Graph element containing abstract edges compute edge segments directly connecting the source and target nodes. This operation depends on pandas and is a lot faster than the pure NumPy equivalent.
def getThirdpartyLibs(self, libs, configuration = 'Development', includePlatformDefaults = True): """ Retrieves the ThirdPartyLibraryDetails instance for Unreal-bundled versions of the specified third-party libraries """ if includePlatformDefaults == True: libs = self._defaultThirdpartyLibs() + libs interrogator = self._getUE4BuildInterrogator() return interrogator.interrogate(self.getPlatformIdentifier(), configuration, libs, self._getLibraryOverrides())
Retrieves the ThirdPartyLibraryDetails instance for Unreal-bundled versions of the specified third-party libraries
def _init_taxid2asscs(self): """Create dict with taxid keys and annotation namedtuple list.""" taxid2asscs = cx.defaultdict(list) for ntanno in self.associations: taxid2asscs[ntanno.tax_id].append(ntanno) assert len(taxid2asscs) != 0, "**FATAL: NO TAXIDS: {F}".format(F=self.filename) # """Print the number of taxids stored.""" prt = sys.stdout num_taxids = len(taxid2asscs) prt.write('{N} taxids stored'.format(N=num_taxids)) if num_taxids < 5: prt.write(': {Ts}'.format(Ts=' '.join(sorted(str(t) for t in taxid2asscs)))) prt.write('\n') return dict(taxid2asscs)
Create dict with taxid keys and annotation namedtuple list.
def list(): """ List blink(1) devices connected, by serial number :return: List of blink(1) device serial numbers """ try: devs = hid.enumerate(VENDOR_ID,PRODUCT_ID) serials = list(map(lambda d:d.get('serial_number'), devs)) return serials except IOError as e: return []
List blink(1) devices connected, by serial number :return: List of blink(1) device serial numbers
def _process(self, segments): """sort segments in read order - left to right, up to down""" # sort_f= lambda r: max_line_width*(r[1]/max_line_height)+r[0] # segments= sorted(segments, key=sort_f) # segments= segments_to_numpy( segments ) # return segments mlh, mlw = self.max_line_height, self.max_line_width s = segments.astype(numpy.uint32) # prevent overflows order = mlw * (s[:, 1] // mlh) + s[:, 0] sort_order = numpy.argsort(order) return segments[sort_order]
sort segments in read order - left to right, up to down
def messages_from_response(response): """Returns a list of the messages from the django MessageMiddleware package contained within the given response. This is to be used during unit testing when trying to see if a message was set properly in a view. :param response: HttpResponse object, likely obtained through a test client.get() or client.post() call :returns: a list of tuples (message_string, message_level), one for each message in the response context """ messages = [] if hasattr(response, 'context') and response.context and \ 'messages' in response.context: messages = response.context['messages'] elif hasattr(response, 'cookies'): # no "context" set-up or no messages item, check for message info in # the cookies morsel = response.cookies.get('messages') if not morsel: return [] # use the decoder in the CookieStore to process and get a list of # messages from django.contrib.messages.storage.cookie import CookieStorage store = CookieStorage(FakeRequest()) messages = store._decode(morsel.value) else: return [] return [(m.message, m.level) for m in messages]
Returns a list of the messages from the django MessageMiddleware package contained within the given response. This is to be used during unit testing when trying to see if a message was set properly in a view. :param response: HttpResponse object, likely obtained through a test client.get() or client.post() call :returns: a list of tuples (message_string, message_level), one for each message in the response context
def plot_freq(self, x, y, title='', ylabel=None, scale='semilogy'): """Plot mean frequency spectrum and display in dialog. Parameters ---------- x : list vector with frequencies y : ndarray vector with amplitudes title : str plot title ylabel : str plot y label scale : str semilogy, loglog or linear """ freq = self.frequency scaling = freq['scaling'].get_value() if ylabel is None: if freq['complex'].get_value(): ylabel = 'Amplitude (uV)' elif 'power' == scaling: ylabel = 'Power spectral density (uV ** 2 / Hz)' elif 'energy' == scaling: ylabel = 'Energy spectral density (uV ** 2)' self.parent.plot_dialog = PlotDialog(self.parent) self.parent.plot_dialog.canvas.plot(x, y, title, ylabel, scale=scale) self.parent.show_plot_dialog()
Plot mean frequency spectrum and display in dialog. Parameters ---------- x : list vector with frequencies y : ndarray vector with amplitudes title : str plot title ylabel : str plot y label scale : str semilogy, loglog or linear
def _closed_cb(self, final_frame=None): ''' "Private" callback from the ChannelClass when a channel is closed. Only called after broker initiated close, or we receive a close_ok. Caller has the option to send a final frame, to be used to bypass any synchronous or otherwise-pending frames so that the channel can be cleanly closed. ''' # delete all pending data and send final frame if thre is one. note # that it bypasses send_frame so that even if the closed state is set, # the frame is published. if final_frame: self._connection.send_frame(final_frame) try: self._notify_close_listeners() finally: self._pending_events = deque() self._frame_buffer = deque() # clear out other references for faster cleanup for protocol_class in self._class_map.values(): protocol_class._cleanup() delattr(self, protocol_class.name) self._connection = None self._class_map = None self._close_listeners = set()
"Private" callback from the ChannelClass when a channel is closed. Only called after broker initiated close, or we receive a close_ok. Caller has the option to send a final frame, to be used to bypass any synchronous or otherwise-pending frames so that the channel can be cleanly closed.
def _scheduled_check_for_summaries(self): """Present the results if they have become available or timed out.""" if self._analysis_process is None: return # handle time out timed_out = time.time() - self._analyze_start_time > self.time_limit if timed_out: self._handle_results('Analysis timed out but managed\n' ' to get lower turn results.', 'Analysis timed out with no results.') return # handle standard completion try: self._analysis_process.join(0.001) except AssertionError: pass # if some timing issue with closed process, just continue if not self._analysis_process.is_alive(): self._handle_results('Completed analysis.', 'Unable to find the game on screen.') return #finally, if it's still alive, then come back later self._base.after(self._POLL_PERIOD_MILLISECONDS, self._scheduled_check_for_summaries)
Present the results if they have become available or timed out.
def mouseReleaseEvent( self, event ): """ Overloads the mouse release event to ignore the event when the \ scene is in view mode, and release the selection block signal. :param event <QMouseReleaseEvent> """ event.setAccepted(False) if self._hotspotPressed: event.accept() self._hotspotPressed = False return # ignore events when the scene is in view mode scene = self.scene() if ( self.isLocked() or self._ignoreMouseEvents or \ (scene and (scene.inViewMode() or scene.isConnecting()))): event.ignore() self._ignoreMouseEvents = False return super(XNode, self).mouseReleaseEvent(event) # emit the geometry changed signal self.emitGeometryChanged() # unblock the selection signals if ( scene ): scene.blockSelectionSignals(False) delta = datetime.datetime.now() - self._pressTime if not scene.signalsBlocked() and delta.seconds < 1: scene.nodeClicked.emit(self)
Overloads the mouse release event to ignore the event when the \ scene is in view mode, and release the selection block signal. :param event <QMouseReleaseEvent>
def me(self): """Similar to :attr:`Client.user` except an instance of :class:`Member`. This is essentially used to get the member version of yourself. """ self_id = self._state.user.id return self.get_member(self_id)
Similar to :attr:`Client.user` except an instance of :class:`Member`. This is essentially used to get the member version of yourself.
def quantile(self, prob=None, combine_method="interpolate", weights_column=None): """ Compute quantiles. :param List[float] prob: list of probabilities for which quantiles should be computed. :param str combine_method: for even samples this setting determines how to combine quantiles. This can be one of ``"interpolate"``, ``"average"``, ``"low"``, ``"high"``. :param weights_column: optional weights for each row. If not given, all rows are assumed to have equal importance. This parameter can be either the name of column containing the observation weights in this frame, or a single-column separate H2OFrame of observation weights. :returns: a new H2OFrame containing the quantiles and probabilities. """ if len(self) == 0: return self if prob is None: prob = [0.01, 0.1, 0.25, 0.333, 0.5, 0.667, 0.75, 0.9, 0.99] if weights_column is None: weights_column = "_" else: assert_is_type(weights_column, str, I(H2OFrame, lambda wc: wc.ncol == 1 and wc.nrow == self.nrow)) if isinstance(weights_column, H2OFrame): merged = self.cbind(weights_column) weights_column = merged.names[-1] return H2OFrame._expr(expr=ExprNode("quantile", merged, prob, combine_method, weights_column)) return H2OFrame._expr(expr=ExprNode("quantile", self, prob, combine_method, weights_column))
Compute quantiles. :param List[float] prob: list of probabilities for which quantiles should be computed. :param str combine_method: for even samples this setting determines how to combine quantiles. This can be one of ``"interpolate"``, ``"average"``, ``"low"``, ``"high"``. :param weights_column: optional weights for each row. If not given, all rows are assumed to have equal importance. This parameter can be either the name of column containing the observation weights in this frame, or a single-column separate H2OFrame of observation weights. :returns: a new H2OFrame containing the quantiles and probabilities.
def on_sighup(self, signal_unused, frame_unused): """Reload the configuration :param int signal_unused: Unused signal number :param frame frame_unused: Unused frame the signal was caught in """ # Update HTTP configuration for setting in self.http_config: if getattr(self.http_server, setting) != self.http_config[setting]: LOGGER.debug('Changing HTTPServer %s setting', setting) setattr(self.http_server, setting, self.http_config[setting]) # Update Application Settings for setting in self.settings: if self.app.settings[setting] != self.settings[setting]: LOGGER.debug('Changing Application %s setting', setting) self.app.settings[setting] = self.settings[setting] # Update the routes self.app.handlers = [] self.app.named_handlers = {} routes = self.namespace.config.get(config.ROUTES) self.app.add_handlers(".*$", self.app.prepare_routes(routes)) LOGGER.info('Configuration reloaded')
Reload the configuration :param int signal_unused: Unused signal number :param frame frame_unused: Unused frame the signal was caught in
def get_dropout_mask(dropout_probability: float, tensor_for_masking: torch.Tensor): """ Computes and returns an element-wise dropout mask for a given tensor, where each element in the mask is dropped out with probability dropout_probability. Note that the mask is NOT applied to the tensor - the tensor is passed to retain the correct CUDA tensor type for the mask. Parameters ---------- dropout_probability : float, required. Probability of dropping a dimension of the input. tensor_for_masking : torch.Tensor, required. Returns ------- A torch.FloatTensor consisting of the binary mask scaled by 1/ (1 - dropout_probability). This scaling ensures expected values and variances of the output of applying this mask and the original tensor are the same. """ binary_mask = (torch.rand(tensor_for_masking.size()) > dropout_probability).to(tensor_for_masking.device) # Scale mask by 1/keep_prob to preserve output statistics. dropout_mask = binary_mask.float().div(1.0 - dropout_probability) return dropout_mask
Computes and returns an element-wise dropout mask for a given tensor, where each element in the mask is dropped out with probability dropout_probability. Note that the mask is NOT applied to the tensor - the tensor is passed to retain the correct CUDA tensor type for the mask. Parameters ---------- dropout_probability : float, required. Probability of dropping a dimension of the input. tensor_for_masking : torch.Tensor, required. Returns ------- A torch.FloatTensor consisting of the binary mask scaled by 1/ (1 - dropout_probability). This scaling ensures expected values and variances of the output of applying this mask and the original tensor are the same.
def extract_features(self, text): """Extracts features from a body of text. :rtype: dictionary of features """ # Feature extractor may take one or two arguments try: return self.feature_extractor(text, self.train_set) except (TypeError, AttributeError): return self.feature_extractor(text)
Extracts features from a body of text. :rtype: dictionary of features
def pos_tag_sents( sentences: List[List[str]], engine: str = "perceptron", corpus: str = "orchid" ) -> List[List[Tuple[str, str]]]: """ Part of Speech tagging Sentence function. :param list sentences: a list of lists of tokenized words :param str engine: * unigram - unigram tagger * perceptron - perceptron tagger (default) * artagger - RDR POS tagger :param str corpus: * orchid - annotated Thai academic articles (default) * orchid_ud - annotated Thai academic articles using Universal Dependencies Tags * pud - Parallel Universal Dependencies (PUD) treebanks :return: returns a list of labels regarding which part of speech it is """ if not sentences: return [] return [pos_tag(sent, engine=engine, corpus=corpus) for sent in sentences]
Part of Speech tagging Sentence function. :param list sentences: a list of lists of tokenized words :param str engine: * unigram - unigram tagger * perceptron - perceptron tagger (default) * artagger - RDR POS tagger :param str corpus: * orchid - annotated Thai academic articles (default) * orchid_ud - annotated Thai academic articles using Universal Dependencies Tags * pud - Parallel Universal Dependencies (PUD) treebanks :return: returns a list of labels regarding which part of speech it is
def _import_bibdoc(): """Import BibDocFile.""" try: from invenio.bibdocfile import BibRecDocs, BibDoc except ImportError: from invenio.legacy.bibdocfile.api import BibRecDocs, BibDoc return BibRecDocs, BibDoc
Import BibDocFile.
def _do_broker_main(self): """ Broker thread main function. Dispatches IO events until :meth:`shutdown` is called. """ # For Python 2.4, no way to retrieve ident except on thread. self._waker.broker_ident = thread.get_ident() try: while self._alive: self._loop_once() fire(self, 'shutdown') self._broker_shutdown() except Exception: LOG.exception('_broker_main() crashed') self._alive = False # Ensure _alive is consistent on crash. self._exitted = True self._broker_exit()
Broker thread main function. Dispatches IO events until :meth:`shutdown` is called.
def _auth( self, username, password, pkey, key_filenames, allow_agent, look_for_keys, gss_auth, gss_kex, gss_deleg_creds, gss_host, passphrase, ): """ Try, in order: - The key(s) passed in, if one was passed in. - Any key we can find through an SSH agent (if allowed). - Any "id_rsa", "id_dsa" or "id_ecdsa" key discoverable in ~/.ssh/ (if allowed). - Plain username/password auth, if a password was given. (The password might be needed to unlock a private key [if 'passphrase' isn't also given], or for two-factor authentication [for which it is required].) """ saved_exception = None two_factor = False allowed_types = set() two_factor_types = {"keyboard-interactive", "password"} if passphrase is None and password is not None: passphrase = password # If GSS-API support and GSS-PI Key Exchange was performed, we attempt # authentication with gssapi-keyex. if gss_kex and self._transport.gss_kex_used: try: self._transport.auth_gssapi_keyex(username) return except Exception as e: saved_exception = e # Try GSS-API authentication (gssapi-with-mic) only if GSS-API Key # Exchange is not performed, because if we use GSS-API for the key # exchange, there is already a fully established GSS-API context, so # why should we do that again? if gss_auth: try: return self._transport.auth_gssapi_with_mic( username, gss_host, gss_deleg_creds ) except Exception as e: saved_exception = e if pkey is not None: try: self._log( DEBUG, "Trying SSH key {}".format( hexlify(pkey.get_fingerprint()) ), ) allowed_types = set( self._transport.auth_publickey(username, pkey) ) two_factor = allowed_types & two_factor_types if not two_factor: return except SSHException as e: saved_exception = e if not two_factor: for key_filename in key_filenames: for pkey_class in (RSAKey, DSSKey, ECDSAKey, Ed25519Key): try: key = self._key_from_filepath( key_filename, pkey_class, passphrase ) allowed_types = set( self._transport.auth_publickey(username, key) ) two_factor = allowed_types & two_factor_types if not two_factor: return break except SSHException as e: saved_exception = e if not two_factor and allow_agent: if self._agent is None: self._agent = Agent() for key in self._agent.get_keys(): try: id_ = hexlify(key.get_fingerprint()) self._log(DEBUG, "Trying SSH agent key {}".format(id_)) # for 2-factor auth a successfully auth'd key password # will return an allowed 2fac auth method allowed_types = set( self._transport.auth_publickey(username, key) ) two_factor = allowed_types & two_factor_types if not two_factor: return break except SSHException as e: saved_exception = e if not two_factor: keyfiles = [] for keytype, name in [ (RSAKey, "rsa"), (DSSKey, "dsa"), (ECDSAKey, "ecdsa"), (Ed25519Key, "ed25519"), ]: # ~/ssh/ is for windows for directory in [".ssh", "ssh"]: full_path = os.path.expanduser( "~/{}/id_{}".format(directory, name) ) if os.path.isfile(full_path): # TODO: only do this append if below did not run keyfiles.append((keytype, full_path)) if os.path.isfile(full_path + "-cert.pub"): keyfiles.append((keytype, full_path + "-cert.pub")) if not look_for_keys: keyfiles = [] for pkey_class, filename in keyfiles: try: key = self._key_from_filepath( filename, pkey_class, passphrase ) # for 2-factor auth a successfully auth'd key will result # in ['password'] allowed_types = set( self._transport.auth_publickey(username, key) ) two_factor = allowed_types & two_factor_types if not two_factor: return break except (SSHException, IOError) as e: saved_exception = e if password is not None: try: self._transport.auth_password(username, password) return except SSHException as e: saved_exception = e elif two_factor: try: self._transport.auth_interactive_dumb(username) return except SSHException as e: saved_exception = e # if we got an auth-failed exception earlier, re-raise it if saved_exception is not None: raise saved_exception raise SSHException("No authentication methods available")
Try, in order: - The key(s) passed in, if one was passed in. - Any key we can find through an SSH agent (if allowed). - Any "id_rsa", "id_dsa" or "id_ecdsa" key discoverable in ~/.ssh/ (if allowed). - Plain username/password auth, if a password was given. (The password might be needed to unlock a private key [if 'passphrase' isn't also given], or for two-factor authentication [for which it is required].)
def me(self): """ Returns :class:`Person <pypump.models.person.Person>` instance of the logged in user. Example: >>> pump.me <Person: bob@example.org> """ if self._me is not None: return self._me self._me = self.Person("{username}@{server}".format( username=self.client.nickname, server=self.client.server, )) return self._me
Returns :class:`Person <pypump.models.person.Person>` instance of the logged in user. Example: >>> pump.me <Person: bob@example.org>
def generate_type(self): """ Validation of type. Can be one type or list of types. Since draft 06 a float without fractional part is an integer. .. code-block:: python {'type': 'string'} {'type': ['string', 'number']} """ types = enforce_list(self._definition['type']) try: python_types = ', '.join(JSON_TYPE_TO_PYTHON_TYPE[t] for t in types) except KeyError as exc: raise JsonSchemaDefinitionException('Unknown type: {}'.format(exc)) extra = '' if 'integer' in types: extra += ' and not (isinstance({variable}, float) and {variable}.is_integer())'.format( variable=self._variable, ) if ('number' in types or 'integer' in types) and 'boolean' not in types: extra += ' or isinstance({variable}, bool)'.format(variable=self._variable) with self.l('if not isinstance({variable}, ({})){}:', python_types, extra): self.l('raise JsonSchemaException("{name} must be {}")', ' or '.join(types))
Validation of type. Can be one type or list of types. Since draft 06 a float without fractional part is an integer. .. code-block:: python {'type': 'string'} {'type': ['string', 'number']}
def reserve_time_slot(self, calendar_event, participant_id=None, **kwargs): """ Return single Calendar Event by id :calls: `POST /api/v1/calendar_events/:id/reservations \ <https://canvas.instructure.com/doc/api/calendar_events.html#method.calendar_events_api.reserve>`_ :param calendar_event: The object or ID of the calendar event. :type calendar_event: :class:`canvasapi.calendar_event.CalendarEvent` or int :param participant_id: The ID of the participant, if given. :type participant_id: str :rtype: :class:`canvasapi.calendar_event.CalendarEvent` """ from canvasapi.calendar_event import CalendarEvent calendar_event_id = obj_or_id(calendar_event, "calendar_event", (CalendarEvent,)) if participant_id: uri = 'calendar_events/{}/reservations/{}'.format( calendar_event_id, participant_id ) else: uri = 'calendar_events/{}/reservations'.format(calendar_event_id) response = self.__requester.request( 'POST', uri, _kwargs=combine_kwargs(**kwargs) ) return CalendarEvent(self.__requester, response.json())
Return single Calendar Event by id :calls: `POST /api/v1/calendar_events/:id/reservations \ <https://canvas.instructure.com/doc/api/calendar_events.html#method.calendar_events_api.reserve>`_ :param calendar_event: The object or ID of the calendar event. :type calendar_event: :class:`canvasapi.calendar_event.CalendarEvent` or int :param participant_id: The ID of the participant, if given. :type participant_id: str :rtype: :class:`canvasapi.calendar_event.CalendarEvent`
def upload_list(book_id_list, rdf_library=None): """ Uses the fetch, make, push subcommands to add a list of pg books """ with open(book_id_list, 'r') as f: cache = {} for book_id in f: book_id = book_id.strip() try: if int(book_id) in missing_pgid: print(u'missing\t{}'.format(book_id)) continue upload_book(book_id, rdf_library=rdf_library, cache=cache) except Exception as e: print(u'error\t{}'.format(book_id)) logger.error(u"Error processing: {}\r{}".format(book_id, e))
Uses the fetch, make, push subcommands to add a list of pg books
def allstats(self, approximate=False): """ Compute some basic raster statistics Parameters ---------- approximate: bool approximate statistics from overviews or a subset of all tiles? Returns ------- list of dicts a list with a dictionary of statistics for each band. Keys: `min`, `max`, `mean`, `sdev`. See :osgeo:meth:`gdal.Band.ComputeStatistics`. """ statcollect = [] for x in self.layers(): try: stats = x.ComputeStatistics(approximate) except RuntimeError: stats = None stats = dict(zip(['min', 'max', 'mean', 'sdev'], stats)) statcollect.append(stats) return statcollect
Compute some basic raster statistics Parameters ---------- approximate: bool approximate statistics from overviews or a subset of all tiles? Returns ------- list of dicts a list with a dictionary of statistics for each band. Keys: `min`, `max`, `mean`, `sdev`. See :osgeo:meth:`gdal.Band.ComputeStatistics`.
def get_by_name(account_name, bucket, region='us-west-2', json_path='accounts.json', alias=None): """Given an account name, attempts to retrieve associated account info.""" for account in get_all_accounts(bucket, region, json_path)['accounts']: if 'aws' in account['type']: if account['name'] == account_name: return account elif alias: for a in account['alias']: if a == account_name: return account
Given an account name, attempts to retrieve associated account info.
def _create_dim_scales(self): """Create all necessary HDF5 dimension scale.""" dim_order = self._dim_order.maps[0] for dim in sorted(dim_order, key=lambda d: dim_order[d]): if dim not in self._h5group: size = self._current_dim_sizes[dim] kwargs = {} if self._dim_sizes[dim] is None: kwargs["maxshape"] = (None,) self._h5group.create_dataset( name=dim, shape=(size,), dtype='S1', **kwargs) h5ds = self._h5group[dim] h5ds.attrs['_Netcdf4Dimid'] = dim_order[dim] if len(h5ds.shape) > 1: dims = self._variables[dim].dimensions coord_ids = np.array([dim_order[d] for d in dims], 'int32') h5ds.attrs['_Netcdf4Coordinates'] = coord_ids scale_name = dim if dim in self.variables else NOT_A_VARIABLE h5ds.dims.create_scale(h5ds, scale_name) for subgroup in self.groups.values(): subgroup._create_dim_scales()
Create all necessary HDF5 dimension scale.
def future_timeout_manager(timeout=None, ioloop=None): """Create Helper function for yielding with a cumulative timeout if required Keeps track of time over multiple timeout calls so that a single timeout can be placed over multiple operations. Parameters ---------- timeout : int or None Timeout, or None for no timeout ioloop : IOLoop instance or None tornado IOloop instance to use, or None for IOLoop.current() Return value ------------ maybe_timeout : func Accepts a future, and wraps it in :func:tornado.gen.with_timeout. maybe_timeout raises :class:`tornado.gen.TimeoutError` if the timeout expires Has a function attribute `remaining()` that returns the remaining timeout or None if timeout == None Example ------- :: @tornado.gen.coroutine def multi_op(timeout): maybe_timeout = future_timeout_manager(timeout) result1 = yield maybe_timeout(op1()) result2 = yield maybe_timeout(op2()) # If the cumulative time of op1 and op2 exceeds timeout, # :class:`tornado.gen.TimeoutError` is raised """ ioloop = ioloop or tornado.ioloop.IOLoop.current() t0 = ioloop.time() def _remaining(): return timeout - (ioloop.time() - t0) if timeout else None def maybe_timeout(f): """Applies timeout if timeout is not None""" if not timeout: return f else: remaining = _remaining() deadline = ioloop.time() + remaining return with_timeout(deadline, f, ioloop) maybe_timeout.remaining = _remaining return maybe_timeout
Create Helper function for yielding with a cumulative timeout if required Keeps track of time over multiple timeout calls so that a single timeout can be placed over multiple operations. Parameters ---------- timeout : int or None Timeout, or None for no timeout ioloop : IOLoop instance or None tornado IOloop instance to use, or None for IOLoop.current() Return value ------------ maybe_timeout : func Accepts a future, and wraps it in :func:tornado.gen.with_timeout. maybe_timeout raises :class:`tornado.gen.TimeoutError` if the timeout expires Has a function attribute `remaining()` that returns the remaining timeout or None if timeout == None Example ------- :: @tornado.gen.coroutine def multi_op(timeout): maybe_timeout = future_timeout_manager(timeout) result1 = yield maybe_timeout(op1()) result2 = yield maybe_timeout(op2()) # If the cumulative time of op1 and op2 exceeds timeout, # :class:`tornado.gen.TimeoutError` is raised
def modify_subscription_status(netid, subscription_code, status): """ Post a subscription 'modify' action for the given netid and subscription_code """ url = _netid_subscription_url(netid, subscription_code) body = { 'action': 'modify', 'value': str(status) } response = post_resource(url, json.dumps(body)) return _json_to_subscriptions(response)
Post a subscription 'modify' action for the given netid and subscription_code
def _get_package(auth, owner, package_name): """ Helper for looking up a package and checking permissions. Only useful for *_list functions; all others should use more efficient queries. """ package = ( Package.query .filter_by(owner=owner, name=package_name) .join(Package.access) .filter(_access_filter(auth)) .one_or_none() ) if package is None: raise PackageNotFoundException(owner, package_name, auth.is_logged_in) return package
Helper for looking up a package and checking permissions. Only useful for *_list functions; all others should use more efficient queries.
def get_student_current_grade(self, username, course_id): """ Returns an CurrentGrade object for the user in a course Args: username (str): an edx user's username course_id (str): an edX course id. Returns: CurrentGrade: object representing the student current grade for a course """ # the request is done in behalf of the current logged in user resp = self.requester.get( urljoin( self.base_url, '/api/grades/v1/courses/{course_key}/?username={username}'.format( username=username, course_key=course_id ) ) ) resp.raise_for_status() return CurrentGrade(resp.json()[0])
Returns an CurrentGrade object for the user in a course Args: username (str): an edx user's username course_id (str): an edX course id. Returns: CurrentGrade: object representing the student current grade for a course
async def genTempCoreProxy(mods=None): '''Get a temporary cortex proxy.''' with s_common.getTempDir() as dirn: async with await s_cortex.Cortex.anit(dirn) as core: if mods: for mod in mods: await core.loadCoreModule(mod) async with core.getLocalProxy() as prox: # Use object.__setattr__ to hulk smash and avoid proxy getattr magick object.__setattr__(prox, '_core', core) yield prox
Get a temporary cortex proxy.
def build_fncall( ctx, fndoc, argdocs=(), kwargdocs=(), hug_sole_arg=False, trailing_comment=None, ): """Builds a doc that looks like a function call, from docs that represent the function, arguments and keyword arguments. If ``hug_sole_arg`` is True, and the represented functional call is done with a single non-keyword argument, the function call parentheses will hug the sole argument doc without newlines and indentation in break mode. This makes a difference in calls like this:: > hug_sole_arg = False frozenset( [ 1, 2, 3, 4, 5 ] ) > hug_sole_arg = True frozenset([ 1, 2, 3, 4, 5, ]) If ``trailing_comment`` is provided, the text is rendered as a comment after the last argument and before the closing parenthesis. This will force the function call to be broken to multiple lines. """ if callable(fndoc): fndoc = general_identifier(fndoc) has_comment = bool(trailing_comment) argdocs = list(argdocs) kwargdocs = list(kwargdocs) kwargdocs = [ # Propagate any comments to the kwarg doc. ( comment_doc( concat([ keyword_arg(binding), ASSIGN_OP, doc.doc ]), doc.annotation.value ) if is_commented(doc) else concat([ keyword_arg(binding), ASSIGN_OP, doc ]) ) for binding, doc in kwargdocs ] if not (argdocs or kwargdocs): return concat([ fndoc, LPAREN, RPAREN, ]) if ( hug_sole_arg and not kwargdocs and len(argdocs) == 1 and not is_commented(argdocs[0]) ): return group( concat([ fndoc, LPAREN, argdocs[0], RPAREN ]) ) allarg_docs = [*argdocs, *kwargdocs] if trailing_comment: allarg_docs.append(commentdoc(trailing_comment)) parts = [] for idx, doc in enumerate(allarg_docs): last = idx == len(allarg_docs) - 1 if is_commented(doc): has_comment = True comment_str = doc.annotation.value doc = doc.doc else: comment_str = None part = concat([doc, NIL if last else COMMA]) if comment_str: part = group( flat_choice( when_flat=concat([ part, ' ', commentdoc(comment_str) ]), when_broken=concat([ commentdoc(comment_str), HARDLINE, part, ]), ) ) if not last: part = concat([part, HARDLINE if has_comment else LINE]) parts.append(part) outer = ( always_break if has_comment else group ) return outer( concat([ fndoc, LPAREN, nest( ctx.indent, concat([ SOFTLINE, concat(parts), ]) ), SOFTLINE, RPAREN ]) )
Builds a doc that looks like a function call, from docs that represent the function, arguments and keyword arguments. If ``hug_sole_arg`` is True, and the represented functional call is done with a single non-keyword argument, the function call parentheses will hug the sole argument doc without newlines and indentation in break mode. This makes a difference in calls like this:: > hug_sole_arg = False frozenset( [ 1, 2, 3, 4, 5 ] ) > hug_sole_arg = True frozenset([ 1, 2, 3, 4, 5, ]) If ``trailing_comment`` is provided, the text is rendered as a comment after the last argument and before the closing parenthesis. This will force the function call to be broken to multiple lines.
def _get_default_parameter_values(sam_template): """ Method to read default values for template parameters and return it Example: If the template contains the following parameters defined Parameters: Param1: Type: String Default: default_value1 Param2: Type: String Default: default_value2 then, this method will grab default value for Param1 and return the following result: { Param1: "default_value1", Param2: "default_value2" } :param dict sam_template: SAM template :return dict: Default values for parameters """ default_values = {} parameter_definition = sam_template.get("Parameters", None) if not parameter_definition or not isinstance(parameter_definition, dict): LOG.debug("No Parameters detected in the template") return default_values for param_name, value in parameter_definition.items(): if isinstance(value, dict) and "Default" in value: default_values[param_name] = value["Default"] LOG.debug("Collected default values for parameters: %s", default_values) return default_values
Method to read default values for template parameters and return it Example: If the template contains the following parameters defined Parameters: Param1: Type: String Default: default_value1 Param2: Type: String Default: default_value2 then, this method will grab default value for Param1 and return the following result: { Param1: "default_value1", Param2: "default_value2" } :param dict sam_template: SAM template :return dict: Default values for parameters
def create(self, language, tagged_text, source_channel=values.unset): """ Create a new SampleInstance :param unicode language: The ISO language-country string that specifies the language used for the new sample :param unicode tagged_text: The text example of how end users might express the task :param unicode source_channel: The communication channel from which the new sample was captured :returns: Newly created SampleInstance :rtype: twilio.rest.autopilot.v1.assistant.task.sample.SampleInstance """ data = values.of({'Language': language, 'TaggedText': tagged_text, 'SourceChannel': source_channel, }) payload = self._version.create( 'POST', self._uri, data=data, ) return SampleInstance( self._version, payload, assistant_sid=self._solution['assistant_sid'], task_sid=self._solution['task_sid'], )
Create a new SampleInstance :param unicode language: The ISO language-country string that specifies the language used for the new sample :param unicode tagged_text: The text example of how end users might express the task :param unicode source_channel: The communication channel from which the new sample was captured :returns: Newly created SampleInstance :rtype: twilio.rest.autopilot.v1.assistant.task.sample.SampleInstance
def subcommand(self, *args): """Get subcommand acting on a service. Subcommand will run in service directory and with the environment variables used to run the service itself. Args: *args: Arguments to run command (e.g. "redis-cli", "-n", "1") Returns: Subcommand object. """ return Subcommand(*args, directory=self.directory, env_vars=self.env_vars)
Get subcommand acting on a service. Subcommand will run in service directory and with the environment variables used to run the service itself. Args: *args: Arguments to run command (e.g. "redis-cli", "-n", "1") Returns: Subcommand object.
def arange(start,stop,step=1,**kwargs): """ >>> a=arange(1,10) # 1:10 >>> size(a) matlabarray([[ 1, 10]]) """ expand_value = 1 if step > 0 else -1 return matlabarray(np.arange(start, stop+expand_value, step, **kwargs).reshape(1,-1),**kwargs)
>>> a=arange(1,10) # 1:10 >>> size(a) matlabarray([[ 1, 10]])
def static(self, uri, file_or_directory, *args, **kwargs): """Create a blueprint static route from a decorated function. :param uri: endpoint at which the route will be accessible. :param file_or_directory: Static asset. """ name = kwargs.pop("name", "static") if not name.startswith(self.name + "."): name = "{}.{}".format(self.name, name) kwargs.update(name=name) strict_slashes = kwargs.get("strict_slashes") if strict_slashes is None and self.strict_slashes is not None: kwargs.update(strict_slashes=self.strict_slashes) static = FutureStatic(uri, file_or_directory, args, kwargs) self.statics.append(static)
Create a blueprint static route from a decorated function. :param uri: endpoint at which the route will be accessible. :param file_or_directory: Static asset.
def start_monitoring(self): """ Pass IP lists to monitor sub-plugins and get results from them. Override the common definition of this function, since in the multi plugin it's a little different: Instead of monitoring ourselves, we just use a number of other plugins to gather results. The multi plugin just serves as a proxy and (de)multiplexer for those other plugins. Note that we don't have to push any updates about failed IPs if nothing new was detected. Therefore, our own updates can be entirely driven by updates from the sub-plugin, which keeps our architecture simple. """ logging.info("Multi-plugin health monitor: Started in thread.") try: while True: # Get new IP addresses and pass them on to the sub-plugins new_ips = self.get_new_working_set() if new_ips: logging.debug("Sending list of %d IPs to %d plugins." % (len(new_ips), len(self.plugins))) for q in self.monitor_ip_queues.values(): q.put(new_ips) # Get any notifications about failed or questionable IPs from # the plugins. all_failed_ips = self._accumulate_ips_from_plugins( "failed", self.failed_queue_lookup, self.report_failed_acc) if all_failed_ips: self.q_failed_ips.put(all_failed_ips) all_questionable_ips = self._accumulate_ips_from_plugins( "questionable", self.questionable_queue_lookup, self.report_questionable_acc) if all_questionable_ips: self.q_questionable_ips.put(all_questionable_ips) time.sleep(self.get_monitor_interval()) except common.StopReceived: # Received the stop signal, just exiting the thread function return
Pass IP lists to monitor sub-plugins and get results from them. Override the common definition of this function, since in the multi plugin it's a little different: Instead of monitoring ourselves, we just use a number of other plugins to gather results. The multi plugin just serves as a proxy and (de)multiplexer for those other plugins. Note that we don't have to push any updates about failed IPs if nothing new was detected. Therefore, our own updates can be entirely driven by updates from the sub-plugin, which keeps our architecture simple.
def merge_elisions(elided: List[str]) -> str: """ Given a list of strings with different space swapping elisions applied, merge the elisions, taking the most without compounding the omissions. :param elided: :return: >>> merge_elisions([ ... "ignavae agua multum hiatus", "ignav agua multum hiatus" ,"ignavae agua mult hiatus"]) 'ignav agua mult hiatus' """ results = list(elided[0]) for line in elided: for idx, car in enumerate(line): if car == " ": results[idx] = " " return "".join(results)
Given a list of strings with different space swapping elisions applied, merge the elisions, taking the most without compounding the omissions. :param elided: :return: >>> merge_elisions([ ... "ignavae agua multum hiatus", "ignav agua multum hiatus" ,"ignavae agua mult hiatus"]) 'ignav agua mult hiatus'
def partial_fit(self, X, y): """ :X: {array-like, sparse matrix}, shape [n_samples, n_features] The data used to compute the mean and standard deviation used for later scaling along the features axis. :y: Healthy 'h' or 'sick_name' """ X, y = filter_by_label(X, y, self.reference_label) super().partial_fit(X, y) return self
:X: {array-like, sparse matrix}, shape [n_samples, n_features] The data used to compute the mean and standard deviation used for later scaling along the features axis. :y: Healthy 'h' or 'sick_name'
def gen_add(src1, src2, dst): """Return an ADD instruction. """ assert src1.size == src2.size return ReilBuilder.build(ReilMnemonic.ADD, src1, src2, dst)
Return an ADD instruction.
def seek(self, offset, whence=None, partition=None): """ Alter the current offset in the consumer, similar to fseek Arguments: offset: how much to modify the offset whence: where to modify it from, default is None * None is an absolute offset * 0 is relative to the earliest available offset (head) * 1 is relative to the current offset * 2 is relative to the latest known offset (tail) partition: modify which partition, default is None. If partition is None, would modify all partitions. """ if whence is None: # set an absolute offset if partition is None: for tmp_partition in self.offsets: self.offsets[tmp_partition] = offset else: self.offsets[partition] = offset elif whence == 1: # relative to current position if partition is None: for tmp_partition, _offset in self.offsets.items(): self.offsets[tmp_partition] = _offset + offset else: self.offsets[partition] += offset elif whence in (0, 2): # relative to beginning or end reqs = [] deltas = {} if partition is None: # divide the request offset by number of partitions, # distribute the remained evenly (delta, rem) = divmod(offset, len(self.offsets)) for tmp_partition, r in izip_longest(self.offsets.keys(), repeat(1, rem), fillvalue=0): deltas[tmp_partition] = delta + r for tmp_partition in self.offsets.keys(): if whence == 0: reqs.append(OffsetRequestPayload(self.topic, tmp_partition, -2, 1)) elif whence == 2: reqs.append(OffsetRequestPayload(self.topic, tmp_partition, -1, 1)) else: pass else: deltas[partition] = offset if whence == 0: reqs.append(OffsetRequestPayload(self.topic, partition, -2, 1)) elif whence == 2: reqs.append(OffsetRequestPayload(self.topic, partition, -1, 1)) else: pass resps = self.client.send_offset_request(reqs) for resp in resps: self.offsets[resp.partition] = \ resp.offsets[0] + deltas[resp.partition] else: raise ValueError('Unexpected value for `whence`, %d' % (whence,)) # Reset queue and fetch offsets since they are invalid self.fetch_offsets = self.offsets.copy() self.count_since_commit += 1 if self.auto_commit: self.commit() self.queue = queue.Queue()
Alter the current offset in the consumer, similar to fseek Arguments: offset: how much to modify the offset whence: where to modify it from, default is None * None is an absolute offset * 0 is relative to the earliest available offset (head) * 1 is relative to the current offset * 2 is relative to the latest known offset (tail) partition: modify which partition, default is None. If partition is None, would modify all partitions.
def _message_to_payload(cls, message): '''Returns a Python object or a ProtocolError.''' try: return json.loads(message.decode()) except UnicodeDecodeError: message = 'messages must be encoded in UTF-8' except json.JSONDecodeError: message = 'invalid JSON' raise cls._error(cls.PARSE_ERROR, message, True, None)
Returns a Python object or a ProtocolError.
def picknthweekday(year, month, dayofweek, hour, minute, whichweek): """dayofweek == 0 means Sunday, whichweek 5 means last instance""" first = datetime.datetime(year, month, 1, hour, minute) weekdayone = first.replace(day=((dayofweek-first.isoweekday())%7+1)) for n in range(whichweek): dt = weekdayone+(whichweek-n)*ONEWEEK if dt.month == month: return dt
dayofweek == 0 means Sunday, whichweek 5 means last instance
def cornice_enable_openapi_explorer( config, api_explorer_path='/api-explorer', permission=NO_PERMISSION_REQUIRED, route_factory=None, **kwargs): """ :param config: Pyramid configurator object :param api_explorer_path: where to expose Swagger UI interface view :param permission: pyramid permission for those views :param route_factory: factory for context object for those routes This registers and configures the view that serves api explorer """ config.add_route('cornice_swagger.api_explorer_path', api_explorer_path, factory=route_factory) config.add_view('cornice_swagger.views.swagger_ui_template_view', permission=permission, route_name='cornice_swagger.api_explorer_path')
:param config: Pyramid configurator object :param api_explorer_path: where to expose Swagger UI interface view :param permission: pyramid permission for those views :param route_factory: factory for context object for those routes This registers and configures the view that serves api explorer
def check_species_object(species_name_or_object): """ Helper for validating user supplied species names or objects. """ if isinstance(species_name_or_object, Species): return species_name_or_object elif isinstance(species_name_or_object, str): return find_species_by_name(species_name_or_object) else: raise ValueError("Unexpected type for species: %s : %s" % ( species_name_or_object, type(species_name_or_object)))
Helper for validating user supplied species names or objects.
def ensemble(transform, loglikelihood, parameter_names, nsteps=40000, nburn=400, start=0.5, **problem): """ **Ensemble MCMC** via `emcee <http://dan.iel.fm/emcee/>`_ """ import emcee import progressbar if 'seed' in problem: numpy.random.seed(problem['seed']) n_params = len(parameter_names) nwalkers = 50 + n_params * 2 if nwalkers > 200: nwalkers = 200 p0 = [numpy.random.rand(n_params) for i in xrange(nwalkers)] start = start + numpy.zeros(n_params) p0[0] = start def like(cube): cube = numpy.array(cube) if (cube <= 1e-10).any() or (cube >= 1-1e-10).any(): return -1e100 params = transform(cube) return loglikelihood(params) sampler = emcee.EnsembleSampler(nwalkers, n_params, like, live_dangerously=True) print 'burn-in...' pos, prob, state = sampler.run_mcmc(p0, nburn / nwalkers) # Reset the chain to remove the burn-in samples. sampler.reset() print 'running ...' # Starting from the final position in the burn-in chain, sample pbar = progressbar.ProgressBar( widgets=[progressbar.Percentage(), progressbar.Counter('%5d'), progressbar.Bar(), progressbar.ETA()], maxval=nsteps).start() for results in sampler.sample(pos, iterations=nsteps / nwalkers, rstate0=state): pbar.update(pbar.currval + 1) pbar.finish() print "Mean acceptance fraction:", numpy.mean(sampler.acceptance_fraction) chain = sampler.flatchain final = chain[-1] print 'postprocessing...' chain_post = numpy.array([transform(params) for params in chain]) chain_prob = sampler.flatlnprobability return dict(start=final, chain=chain_post, chain_prior=chain, chain_prob=chain_prob, method='Ensemble MCMC')
**Ensemble MCMC** via `emcee <http://dan.iel.fm/emcee/>`_
def get_next_line(self): """If we reach the end of the file, we simply open the next, until we \ run out of archives to process""" line = self.freq_file.readline().strip().split() if len(line) < 1: self.load_genotypes() line = self.freq_file.readline().strip().split() info_line = self.info_file.readline().strip().split() info = float(info_line[4]) exp_freq = float(info_line[3]) return line, info, exp_freq
If we reach the end of the file, we simply open the next, until we \ run out of archives to process
def complete_token_filtered_with_next(aliases, prefix, expanded, commands): """Find all starting matches in dictionary *aliases* that start with *prefix*, but filter out any matches already in *expanded*.""" complete_ary = list(aliases.keys()) expanded_ary = list(expanded.keys()) # result = [cmd for cmd in # complete_ary if cmd.startswith(prefix) and not ( # cmd in aliases and # 0 == len(set(expanded_ary) - set([aliases[cmd]])))] result = [] for cmd in complete_ary: if cmd.startswith(prefix): if cmd in aliases and ( 0 == len(set(expanded_ary) - set([aliases[cmd]]))): result.append([cmd, aliases[cmd]]) pass pass pass return sorted(result, key=lambda pair: pair[0])
Find all starting matches in dictionary *aliases* that start with *prefix*, but filter out any matches already in *expanded*.
def validate_scopes(value_list): """Validate if each element in a list is a registered scope. :param value_list: The list of scopes. :raises invenio_oauth2server.errors.ScopeDoesNotExists: The exception is raised if a scope is not registered. :returns: ``True`` if it's successfully validated. """ for value in value_list: if value not in current_oauth2server.scopes: raise ScopeDoesNotExists(value) return True
Validate if each element in a list is a registered scope. :param value_list: The list of scopes. :raises invenio_oauth2server.errors.ScopeDoesNotExists: The exception is raised if a scope is not registered. :returns: ``True`` if it's successfully validated.
def to_keypoints(self): """ Convert this polygon's `exterior` to ``Keypoint`` instances. Returns ------- list of imgaug.Keypoint Exterior vertices as ``Keypoint`` instances. """ # TODO get rid of this deferred import from imgaug.augmentables.kps import Keypoint return [Keypoint(x=point[0], y=point[1]) for point in self.exterior]
Convert this polygon's `exterior` to ``Keypoint`` instances. Returns ------- list of imgaug.Keypoint Exterior vertices as ``Keypoint`` instances.
def mouse_move_event(self, event): """ Forward mouse cursor position events to the example """ self.example.mouse_position_event(event.x(), event.y())
Forward mouse cursor position events to the example
def to_transfac(self): """Return motif formatted in TRANSFAC format Returns ------- m : str String of motif in TRANSFAC format. """ m = "%s\t%s\t%s\n" % ("DE", self.id, "unknown") for i, (row, cons) in enumerate(zip(self.pfm, self.to_consensus())): m += "%i\t%s\t%s\n" % (i, "\t".join([str(int(x)) for x in row]), cons) m += "XX" return m
Return motif formatted in TRANSFAC format Returns ------- m : str String of motif in TRANSFAC format.
def _is_primitive(thing): """Determine if the value is a primitive""" primitive = (int, str, bool, float) return isinstance(thing, primitive)
Determine if the value is a primitive
def hasModule(self, mod): """Returns True if mod is among the loaded modules. @param mod: Module name. @return: Boolean """ if self._modules is None: self._initModuleList() return mod in self._modules
Returns True if mod is among the loaded modules. @param mod: Module name. @return: Boolean
def validate_address(address: Any) -> None: """ Raise a ValidationError if an address is not canonicalized. """ if not is_address(address): raise ValidationError(f"Expected an address, got: {address}") if not is_canonical_address(address): raise ValidationError( "Py-EthPM library only accepts canonicalized addresses. " f"{address} is not in the accepted format." )
Raise a ValidationError if an address is not canonicalized.
def deleteSelected(self): 'Delete all selected rows.' ndeleted = self.deleteBy(self.isSelected) nselected = len(self._selectedRows) self._selectedRows.clear() if ndeleted != nselected: error('expected %s' % nselected)
Delete all selected rows.
def program_supports_compression (program, compression): """Decide if the given program supports the compression natively. @return: True iff the program supports the given compression format natively, else False. """ if program in ('tar', ): return compression in ('gzip', 'bzip2', 'xz', 'lzip', 'compress', 'lzma') + py_lzma elif program in ('star', 'bsdtar', 'py_tarfile'): return compression in ('gzip', 'bzip2') + py_lzma return False
Decide if the given program supports the compression natively. @return: True iff the program supports the given compression format natively, else False.
def read_int32(self, little_endian=True): """ Read 4 bytes as a signed integer value from the stream. Args: little_endian (bool): specify the endianness. (Default) Little endian. Returns: int: """ if little_endian: endian = "<" else: endian = ">" return self.unpack('%si' % endian, 4)
Read 4 bytes as a signed integer value from the stream. Args: little_endian (bool): specify the endianness. (Default) Little endian. Returns: int:
def create_objective_bank_hierarchy(self, alias, desc, genus): """ Create a bank hierarchy with the given alias :param alias: :return: """ url_path = self._urls.hierarchy() data = { 'id': re.sub(r'[ ]', '', alias.lower()), 'displayName': { 'text': alias }, 'description': { 'text': desc }, 'genusTypeId': str(genus) } return self._post_request(url_path, data)
Create a bank hierarchy with the given alias :param alias: :return:
def MGMT_COMM_SET(self, Addr='ff02::1', xCommissionerSessionID=None, xSteeringData=None, xBorderRouterLocator=None, xChannelTlv=None, ExceedMaxPayload=False): """send MGMT_COMM_SET command Returns: True: successful to send MGMT_COMM_SET False: fail to send MGMT_COMM_SET """ print '%s call MGMT_COMM_SET' % self.port try: cmd = WPANCTL_CMD + 'commissioner mgmt-set ' print "-------------------------------" print xCommissionerSessionID print xSteeringData print str(xSteeringData) + ' ' + str(hex(xSteeringData)[2:]) print xBorderRouterLocator print xChannelTlv print ExceedMaxPayload print "-------------------------------" if xCommissionerSessionID != None: # use assigned session id cmd += '0b02' + str(xCommissionerSessionID) elif xCommissionerSessionID is None: # use original session id if self.isActiveCommissioner is True: cmd += '0b02' + self.__getCommissionerSessionId().lstrip('0x') else: pass if xSteeringData != None: cmd += '08' + str(len(hex(xSteeringData)[2:])) + str(hex(xSteeringData)[2:]) if xBorderRouterLocator != None: cmd += '0902' + str(hex(xBorderRouterLocator)) if xChannelTlv != None: cmd += '000300' + hex(xChannelTlv).lstrip('0x').zfill(4) print cmd return self.__sendCommand(cmd)[0] != 'Fail' except Exception, e: ModuleHelper.WriteIntoDebugLogger('MGMT_COMM_SET() Error: ' + str(e))
send MGMT_COMM_SET command Returns: True: successful to send MGMT_COMM_SET False: fail to send MGMT_COMM_SET
def add_announcement_view(request): """Add an announcement.""" if request.method == "POST": form = AnnouncementForm(request.POST) logger.debug(form) if form.is_valid(): obj = form.save() obj.user = request.user # SAFE HTML obj.content = safe_html(obj.content) obj.save() announcement_posted_hook(request, obj) messages.success(request, "Successfully added announcement.") return redirect("index") else: messages.error(request, "Error adding announcement") else: form = AnnouncementForm() return render(request, "announcements/add_modify.html", {"form": form, "action": "add"})
Add an announcement.
async def identify(self, request): """ 从request中得到登录身份identity """ if hasattr(request, '_session_identity'): return request._session_identity token = request.cookies.get(self._cookie_name) if token is None: token = getAuthorizationTokenFromHeader(request) if token is None: raise Unauthorized('无认证身份') identity = await self.decode_jwt(token) setattr(request, '_session_identity', identity) # if identity.client_id.startsWith('spa|'): # checkCRSFToken(request) return identity
从request中得到登录身份identity
def update_allowed(self): """Determines whether update of given cell is allowed. Calls allowed action of defined UpdateAction of the Column. """ return self.update_action.allowed(self.column.table.request, self.datum, self)
Determines whether update of given cell is allowed. Calls allowed action of defined UpdateAction of the Column.
def plugin_is_enabled(name, runas=None): ''' Return whether the plugin is enabled. CLI Example: .. code-block:: bash salt '*' rabbitmq.plugin_is_enabled rabbitmq_plugin_name ''' if runas is None and not salt.utils.platform.is_windows(): runas = salt.utils.user.get_user() return name in list_enabled_plugins(runas)
Return whether the plugin is enabled. CLI Example: .. code-block:: bash salt '*' rabbitmq.plugin_is_enabled rabbitmq_plugin_name
def _get_line_type(line): ''' Decide the line type in function of its contents ''' stripped = line.strip() if not stripped: return 'empty' remainder = re.sub(r"\s+", " ", re.sub(CHORD_RE, "", stripped)) if len(remainder) * 2 < len(re.sub(r"\s+", " ", stripped)): return 'chord' return 'lyric'
Decide the line type in function of its contents
def _load_config(initial_namespace=None, defaults=None): # type: (Optional[str], Optional[str]) -> ConfigLoader """ Kwargs: initial_namespace: defaults: """ # load defaults if defaults: config = ConfigLoader() config.update_from_object(defaults) namespace = getattr(config, 'CONFIG_NAMESPACE', initial_namespace) app_config = getattr(config, 'APP_CONFIG', None) # load customised config if app_config: if namespace is None: config.update_from_object(app_config) else: _temp = ConfigLoader() _temp.update_from_object(app_config, lambda key: key.startswith(namespace)) config.update(_temp.namespace(namespace)) return config
Kwargs: initial_namespace: defaults:
def GetPossibleGroup(self): """Returns a possible group from the end of the call queue or None if no other methods are on the stack. """ # Remove this method from the tail of the queue so we can add it to a group. this_method = self._call_queue.pop() assert this_method == self # Determine if the tail of the queue is a group, or just a regular ordered # mock method. group = None try: group = self._call_queue[-1] except IndexError: pass return group
Returns a possible group from the end of the call queue or None if no other methods are on the stack.
def url_builder(self, endpoint, params=None, url_params=None): """Add authentication URL parameter.""" if url_params is None: url_params = OrderedDict() url_params[self.AUTH_PARAM] = self.api_token return super().url_builder( endpoint, params=params, url_params=url_params, )
Add authentication URL parameter.
def populate_timestamps(self,update_header=False): """ Populate time axis. IF update_header then only return tstart """ #Check to see how many integrations requested ii_start, ii_stop = 0, self.n_ints_in_file if self.t_start: ii_start = self.t_start if self.t_stop: ii_stop = self.t_stop ## Setup time axis t0 = self.header[b'tstart'] t_delt = self.header[b'tsamp'] if update_header: timestamps = ii_start * t_delt / 24./60./60. + t0 else: timestamps = np.arange(ii_start, ii_stop) * t_delt / 24./60./60. + t0 return timestamps
Populate time axis. IF update_header then only return tstart
def persist_revision_once(tokens, revision): """ This function makes sure that a revision is only marked as persisting for a token once. This is important since some diff algorithms allow tokens to be copied more than once in a revision. The id(token) should unique to the in-memory representation of any object, so we use that as unique token instance identifier. """ token_map = {id(token):token for token in tokens} for token in token_map.values(): token.persist(revision)
This function makes sure that a revision is only marked as persisting for a token once. This is important since some diff algorithms allow tokens to be copied more than once in a revision. The id(token) should unique to the in-memory representation of any object, so we use that as unique token instance identifier.
async def start(self): """Start all adapters managed by this device adapter. If there is an error starting one or more adapters, this method will stop any adapters that we successfully started and raise an exception. """ successful = 0 try: for adapter in self.adapters: await adapter.start() successful += 1 self._started = True except: for adapter in self.adapters[:successful]: await adapter.stop() raise
Start all adapters managed by this device adapter. If there is an error starting one or more adapters, this method will stop any adapters that we successfully started and raise an exception.
def clear(self): """Completely clear a Node of all its cached state (so that it can be re-evaluated by interfaces that do continuous integration builds). """ # The del_binfo() call here isn't necessary for normal execution, # but is for interactive mode, where we might rebuild the same # target and need to start from scratch. self.del_binfo() self.clear_memoized_values() self.ninfo = self.new_ninfo() self.executor_cleanup() try: delattr(self, '_calculated_sig') except AttributeError: pass self.includes = None
Completely clear a Node of all its cached state (so that it can be re-evaluated by interfaces that do continuous integration builds).
def filter_by_gene_expression( self, gene_expression_dict, min_expression_value=0.0): """ Filters variants down to those which have overlap a gene whose expression value in the transcript_expression_dict argument is greater than min_expression_value. Parameters ---------- gene_expression_dict : dict Dictionary mapping Ensembl gene IDs to expression estimates (either FPKM or TPM) min_expression_value : float Threshold above which we'll keep an effect in the result collection """ return self.filter_any_above_threshold( multi_key_fn=lambda effect: effect.gene_ids, value_dict=gene_expression_dict, threshold=min_expression_value)
Filters variants down to those which have overlap a gene whose expression value in the transcript_expression_dict argument is greater than min_expression_value. Parameters ---------- gene_expression_dict : dict Dictionary mapping Ensembl gene IDs to expression estimates (either FPKM or TPM) min_expression_value : float Threshold above which we'll keep an effect in the result collection
def get_coordination_symmetry_measures(self, only_minimum=True, all_csms=True, optimization=None): """ Returns the continuous symmetry measures of the current local geometry in a dictionary. :return: the continuous symmetry measures of the current local geometry in a dictionary. """ test_geometries = self.allcg.get_implemented_geometries( len(self.local_geometry.coords)) if len(self.local_geometry.coords) == 1: if len(test_geometries) == 0: return {} result_dict = {'S:1': {'csm': 0.0, 'indices': [0], 'algo': 'EXPLICIT', 'local2perfect_map': {0: 0}, 'perfect2local_map': {0: 0}, 'scaling_factor': None, 'rotation_matrix': None, 'translation_vector': None}} if all_csms: for csmtype in ['wocs_ctwocc', 'wocs_ctwcc', 'wocs_csc', 'wcs_ctwocc', 'wcs_ctwcc', 'wcs_csc']: result_dict['S:1']['csm_{}'.format(csmtype)] = 0.0 result_dict['S:1']['scaling_factor_{}'.format(csmtype)] = None result_dict['S:1']['rotation_matrix_{}'.format(csmtype)] = None result_dict['S:1']['translation_vector_{}'.format(csmtype)] = None return result_dict result_dict = {} for geometry in test_geometries: self.perfect_geometry = AbstractGeometry.from_cg(cg=geometry, centering_type=self.centering_type, include_central_site_in_centroid= self.include_central_site_in_centroid) points_perfect = self.perfect_geometry.points_wcs_ctwcc() cgsm = self.coordination_geometry_symmetry_measures(geometry, points_perfect=points_perfect, optimization=optimization) result, permutations, algos, local2perfect_maps, perfect2local_maps = cgsm if only_minimum: if len(result) > 0: imin = np.argmin([rr['symmetry_measure'] for rr in result]) if geometry.algorithms is not None: algo = algos[imin] else: algo = algos result_dict[geometry.mp_symbol] = {'csm': result[imin]['symmetry_measure'], 'indices': permutations[ imin], 'algo': algo, 'local2perfect_map': local2perfect_maps[ imin], 'perfect2local_map': perfect2local_maps[ imin], 'scaling_factor': 1.0 / result[imin]['scaling_factor'], 'rotation_matrix': np.linalg.inv(result[imin]['rotation_matrix']), 'translation_vector': result[imin]['translation_vector']} if all_csms: self._update_results_all_csms(result_dict, permutations, imin, geometry) else: result_dict[geometry.mp_symbol] = {'csm': result, 'indices': permutations, 'algo': algos, 'local2perfect_map': local2perfect_maps, 'perfect2local_map': perfect2local_maps} return result_dict
Returns the continuous symmetry measures of the current local geometry in a dictionary. :return: the continuous symmetry measures of the current local geometry in a dictionary.
def handle(request, message=None, redirect=None, ignore=False, escalate=False, log_level=None, force_log=None): """Centralized error handling for Horizon. Because Horizon consumes so many different APIs with completely different ``Exception`` types, it's necessary to have a centralized place for handling exceptions which may be raised. Exceptions are roughly divided into 3 types: #. ``UNAUTHORIZED``: Errors resulting from authentication or authorization problems. These result in being logged out and sent to the login screen. #. ``NOT_FOUND``: Errors resulting from objects which could not be located via the API. These generally result in a user-facing error message, but are otherwise returned to the normal code flow. Optionally a redirect value may be passed to the error handler so users are returned to a different view than the one requested in addition to the error message. #. ``RECOVERABLE``: Generic API errors which generate a user-facing message but drop directly back to the regular code flow. All other exceptions bubble the stack as normal unless the ``ignore`` argument is passed in as ``True``, in which case only unrecognized errors are bubbled. If the exception is not re-raised, an appropriate wrapper exception class indicating the type of exception that was encountered will be returned. """ exc_type, exc_value, exc_traceback = sys.exc_info() log_method = getattr(LOG, log_level or "exception") force_log = force_log or os.environ.get("HORIZON_TEST_RUN", False) force_silence = getattr(exc_value, "silence_logging", False) # Because the same exception may travel through this method more than # once (if it's re-raised) we may want to treat it differently # the second time (e.g. no user messages/logging). handled = issubclass(exc_type, HandledException) wrap = False # Restore our original exception information, but re-wrap it at the end if handled: exc_type, exc_value, exc_traceback = exc_value.wrapped wrap = True log_entry = encoding.force_text(exc_value) user_message = "" # We trust messages from our own exceptions if issubclass(exc_type, HorizonException): user_message = log_entry # If the message has a placeholder for the exception, fill it in elif message and "%(exc)s" in message: user_message = encoding.force_text(message) % {"exc": log_entry} elif message: user_message = encoding.force_text(message) for exc_handler in HANDLE_EXC_METHODS: if issubclass(exc_type, exc_handler['exc']): if exc_handler['set_wrap']: wrap = True handler = exc_handler['handler'] ret = handler(request, user_message, redirect, ignore, exc_handler.get('escalate', escalate), handled, force_silence, force_log, log_method, log_entry, log_level) if ret: return ret # return to normal code flow # If we've gotten here, time to wrap and/or raise our exception. if wrap: raise HandledException([exc_type, exc_value, exc_traceback]) # assume exceptions handled in the code that pass in a message are already # handled appropriately and treat as recoverable if message: ret = handle_recoverable(request, user_message, redirect, ignore, escalate, handled, force_silence, force_log, log_method, log_entry, log_level) # pylint: disable=using-constant-test if ret: return ret six.reraise(exc_type, exc_value, exc_traceback)
Centralized error handling for Horizon. Because Horizon consumes so many different APIs with completely different ``Exception`` types, it's necessary to have a centralized place for handling exceptions which may be raised. Exceptions are roughly divided into 3 types: #. ``UNAUTHORIZED``: Errors resulting from authentication or authorization problems. These result in being logged out and sent to the login screen. #. ``NOT_FOUND``: Errors resulting from objects which could not be located via the API. These generally result in a user-facing error message, but are otherwise returned to the normal code flow. Optionally a redirect value may be passed to the error handler so users are returned to a different view than the one requested in addition to the error message. #. ``RECOVERABLE``: Generic API errors which generate a user-facing message but drop directly back to the regular code flow. All other exceptions bubble the stack as normal unless the ``ignore`` argument is passed in as ``True``, in which case only unrecognized errors are bubbled. If the exception is not re-raised, an appropriate wrapper exception class indicating the type of exception that was encountered will be returned.
def ensemble_change_summary(ensemble1, ensemble2, pst,bins=10, facecolor='0.5',logger=None,filename=None,**kwargs): """helper function to plot first and second moment change histograms Parameters ---------- ensemble1 : varies str or pd.DataFrames ensemble2 : varies str or pd.DataFrame pst : pyemu.Pst pst instance facecolor : str the histogram facecolor. filename : str the name of the pdf to create. If None, return figs without saving. Default is None. """ if logger is None: logger=Logger('Default_Loggger.log',echo=False) logger.log("plot ensemble change") if isinstance(ensemble1, str): ensemble1 = pd.read_csv(ensemble1,index_col=0) ensemble1.columns = ensemble1.columns.str.lower() if isinstance(ensemble2, str): ensemble2 = pd.read_csv(ensemble2,index_col=0) ensemble2.columns = ensemble2.columns.str.lower() # better to ensure this is caught by pestpp-ies ensemble csvs unnamed1 = [col for col in ensemble1.columns if "unnamed:" in col] if len(unnamed1) != 0: ensemble1 = ensemble1.iloc[:,:-1] # ensure unnamed col result of poor csv read only (ie last col) unnamed2 = [col for col in ensemble2.columns if "unnamed:" in col] if len(unnamed2) != 0: ensemble2 = ensemble2.iloc[:,:-1] # ensure unnamed col result of poor csv read only (ie last col) d = set(ensemble1.columns).symmetric_difference(set(ensemble2. columns)) if len(d) != 0: logger.lraise("ensemble1 does not have the same columns as ensemble2: {0}". format(','.join(d))) if "grouper" in kwargs: raise NotImplementedError() else: en_cols = set(ensemble1.columns) if len(en_cols.symmetric_difference(set(pst.par_names))) == 0: par = pst.parameter_data.loc[pst.adj_par_names,:] grouper = par.groupby(par.pargp).groups grouper["all"] = pst.adj_par_names li = par.loc[par.partrans == "log","parnme"] ensemble1.loc[:,li] = ensemble1.loc[:,li].apply(np.log10) ensemble2.loc[:, li] = ensemble2.loc[:, li].apply(np.log10) elif len(en_cols.symmetric_difference(set(pst.obs_names))) == 0: obs = pst.observation_data.loc[pst.nnz_obs_names,:] grouper = obs.groupby(obs.obgnme).groups grouper["all"] = pst.nnz_obs_names else: logger.lraise("could not match ensemble cols with par or obs...") en1_mn, en1_std = ensemble1.mean(axis=0), ensemble1.std(axis=0) en2_mn, en2_std = ensemble2.mean(axis=0), ensemble2.std(axis=0) # mn_diff = 100.0 * ((en1_mn - en2_mn) / en1_mn) # std_diff = 100 * ((en1_std - en2_std) / en1_std) mn_diff = -1 * (en2_mn - en1_mn) std_diff = 100 * (((en1_std - en2_std) / en1_std)) #set en1_std==0 to nan std_diff[en1_std.index[en1_std==0]] = np.nan #diff = ensemble1 - ensemble2 #mn_diff = diff.mean(axis=0) #std_diff = diff.std(axis=0) fig = plt.figure(figsize=figsize) if "fig_title" in kwargs: plt.figtext(0.5,0.5,kwargs["fig_title"]) else: plt.figtext(0.5, 0.5, "pyemu.Pst.plot(kind='1to1')\nfrom pest control file '{0}'\n at {1}" .format(pst.filename, str(datetime.now())), ha="center") #if plot_hexbin: # pdfname = pst.filename.replace(".pst", ".1to1.hexbin.pdf") #else: # pdfname = pst.filename.replace(".pst", ".1to1.pdf") figs = [] ax_count = 0 for g, names in grouper.items(): logger.log("plotting change for {0}".format(g)) mn_g = mn_diff.loc[names] std_g = std_diff.loc[names] if mn_g.shape[0] == 0: logger.statement("no entries for group '{0}'".format(g)) logger.log("plotting change for {0}".format(g)) continue if ax_count % (nr * nc) == 0: if ax_count > 0: plt.tight_layout() #pdf.savefig() #plt.close(fig) figs.append(fig) fig = plt.figure(figsize=figsize) axes = get_page_axes() ax_count = 0 ax = axes[ax_count] mn_g.hist(ax=ax,facecolor=facecolor,alpha=0.5,edgecolor=None,bins=bins) #mx = max(mn_g.max(), mn_g.min(),np.abs(mn_g.max()),np.abs(mn_g.min())) * 1.2 #ax.set_xlim(-mx,mx) #std_g.hist(ax=ax,facecolor='b',alpha=0.5,edgecolor=None) #ax.set_xlim(xlim) ax.set_yticklabels([]) ax.set_xlabel("mean change",labelpad=0.1) ax.set_title("{0}) mean change group:{1}, {2} entries\nmax:{3:10G}, min:{4:10G}". format(abet[ax_count], g, mn_g.shape[0],mn_g.max(),mn_g.min()), loc="left") ax.grid() ax_count += 1 ax = axes[ax_count] std_g.hist(ax=ax, facecolor=facecolor, alpha=0.5, edgecolor=None, bins=bins) # std_g.hist(ax=ax,facecolor='b',alpha=0.5,edgecolor=None) # ax.set_xlim(xlim) ax.set_yticklabels([]) ax.set_xlabel("sigma percent reduction", labelpad=0.1) ax.set_title("{0}) sigma change group:{1}, {2} entries\nmax:{3:10G}, min:{4:10G}". format(abet[ax_count], g, mn_g.shape[0], std_g.max(), std_g.min()), loc="left") ax.grid() ax_count += 1 logger.log("plotting change for {0}".format(g)) for a in range(ax_count, nr * nc): axes[a].set_axis_off() axes[a].set_yticks([]) axes[a].set_xticks([]) plt.tight_layout() #pdf.savefig() #plt.close(fig) figs.append(fig) if filename is not None: plt.tight_layout() with PdfPages(filename) as pdf: for fig in figs: pdf.savefig(fig) plt.close(fig) logger.log("plot ensemble change") else: logger.log("plot ensemble change") return figs
helper function to plot first and second moment change histograms Parameters ---------- ensemble1 : varies str or pd.DataFrames ensemble2 : varies str or pd.DataFrame pst : pyemu.Pst pst instance facecolor : str the histogram facecolor. filename : str the name of the pdf to create. If None, return figs without saving. Default is None.
def put(text, cbname): """ Put the given string into the given clipboard. """ global _lastSel _checkTkInit() if cbname == 'CLIPBOARD': _theRoot.clipboard_clear() if text: # for clipboard_append, kwds can be -displayof, -format, or -type _theRoot.clipboard_append(text) return if cbname == 'PRIMARY': _lastSel = text _theRoot.selection_handle(ch_handler, selection='PRIMARY') # we need to claim/own it so that ch_handler is used _theRoot.selection_own(selection='PRIMARY') # could add command arg for a func to be called when we lose ownership return raise RuntimeError("Unexpected clipboard name: "+str(cbname))
Put the given string into the given clipboard.
def get_path_name(self): """Gets path and name of song :return: Name of path, name of file (or folder) """ path = fix_raw_path(os.path.dirname(os.path.abspath(self.path))) name = os.path.basename(self.path) return path, name
Gets path and name of song :return: Name of path, name of file (or folder)
def _pdf_find_urls(bytes, mimetype): """ This function finds URLs inside of PDF bytes. """ # Start with only the ASCII bytes. Limit it to 12+ character strings. try: ascii_bytes = b' '.join(re.compile(b'[\x00\x09\x0A\x0D\x20-\x7E]{12,}').findall(bytes)) ascii_bytes = ascii_bytes.replace(b'\x00', b'') except: return [] urls = [] # Find the embedded text sandwiched between [ ] embedded_text = set(re.compile(b'(\[(\([\x20-\x27\x2A-\x7E]{1,3}\)[\-\d]*){5,}\])').findall(ascii_bytes)) # Get the text inside the parentheses. This catches URLs embedded in the text of the PDF that don't # use the normal "URI/URI()>>" method. for match in embedded_text: text = match[0] parentheses_text = b''.join(re.compile(b'\((.*?)\)').findall(text)) urls.append(parentheses_text) # Find any URLs that use the "URI/URI()>>" method. urls += re.compile(b'\/URI\s*\((.*?)\)\s*>>').findall(ascii_bytes) if urls: # PDF URLs escape certain characters. We want to remove any of the escapes (backslashes) # from the URLs so that we get the original URL. urls = [u.replace(b'\\', b'') for u in urls] return urls
This function finds URLs inside of PDF bytes.
def strip_tags(s): """Resolve HTML entities and remove tags from a string.""" def handle_match(m): name = m.group(1) if name in html_entities: return unichr(html_entities[name]) if name[:2] in ("#x", "#X"): try: return unichr(int(name[2:], 16)) except ValueError: return u"" elif name.startswith("#"): try: return unichr(int(name[1:])) except ValueError: return u"" return u"" return _entity_re.sub(handle_match, _striptags_re.sub("", s))
Resolve HTML entities and remove tags from a string.
def migrate(gandi, resource, force, background): """ Migrate a disk to another datacenter. """ # check it's not attached source_info = gandi.disk.info(resource) if source_info['vms_id']: click.echo('Cannot start the migration: disk %s is attached. ' 'Please detach the disk before starting the migration.' % resource) return disk_datacenter = source_info['datacenter_id'] dc_choices = gandi.datacenter.list_migration_choice(disk_datacenter) if not dc_choices: click.echo('No datacenter is available for migration') return elif len(dc_choices) == 1: # use the only one available datacenter_id = dc_choices[0]['id'] else: choice_list = [dc['dc_code'] for dc in dc_choices] dc_choice = click.Choice(choice_list) dc_chosen = click.prompt('Select a datacenter [%s]' % '|'.join(choice_list), # noqa type=dc_choice, show_default=True) datacenter_id = [dc['id'] for dc in dc_choices if dc['dc_code'] == dc_chosen][0] if not force: proceed = click.confirm('Are you sure you want to migrate disk %s ?' % resource) if not proceed: return datacenters = gandi.datacenter.list() dc_from = [dc['dc_code'] for dc in datacenters if dc['id'] == disk_datacenter][0] dc_to = [dc['dc_code'] for dc in datacenters if dc['id'] == datacenter_id][0] migration_msg = ('* Starting the migration of disk %s from datacenter %s ' 'to %s' % (resource, dc_from, dc_to)) gandi.echo(migration_msg) output_keys = ['id', 'type', 'step'] oper = gandi.disk.migrate(resource, datacenter_id, background) if background: output_generic(gandi, oper, output_keys) return oper
Migrate a disk to another datacenter.
def from_directory_import( self, module, real_names, local_names, import_alias_mapping, skip_init=False ): """ Directories don't need to be packages. """ module_path = module[1] init_file_location = os.path.join(module_path, '__init__.py') init_exists = os.path.isfile(init_file_location) if init_exists and not skip_init: package_name = os.path.split(module_path)[1] return self.add_module( module=(module[0], init_file_location), module_or_package_name=package_name, local_names=local_names, import_alias_mapping=import_alias_mapping, is_init=True, from_from=True ) for real_name in real_names: full_name = os.path.join(module_path, real_name) if os.path.isdir(full_name): new_init_file_location = os.path.join(full_name, '__init__.py') if os.path.isfile(new_init_file_location): self.add_module( module=(real_name, new_init_file_location), module_or_package_name=real_name, local_names=local_names, import_alias_mapping=import_alias_mapping, is_init=True, from_from=True, from_fdid=True ) else: raise Exception('from anything import directory needs an __init__.py file in directory') else: file_module = (real_name, full_name + '.py') self.add_module( module=file_module, module_or_package_name=real_name, local_names=local_names, import_alias_mapping=import_alias_mapping, from_from=True ) return IgnoredNode()
Directories don't need to be packages.
def to_ndarray(self): """ Transfer JTensor to ndarray. As SparseTensor may generate an very big ndarray, so we don't support this function for SparseTensor. :return: a ndarray """ assert self.indices is None, "sparseTensor to ndarray is not supported" return np.array(self.storage, dtype=get_dtype(self.bigdl_type)).reshape(self.shape)
Transfer JTensor to ndarray. As SparseTensor may generate an very big ndarray, so we don't support this function for SparseTensor. :return: a ndarray