code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def easybake(css_in, html_in=sys.stdin, html_out=sys.stdout, last_step=None, coverage_file=None, use_repeatable_ids=False): """Process the given HTML file stream with the css stream.""" html_doc = etree.parse(html_in) oven = Oven(css_in, use_repeatable_ids) oven.bake(html_doc, last_step) # serialize out HTML print(etree.tostring(html_doc, method="xml").decode('utf-8'), file=html_out) # generate CSS coverage_file file if coverage_file: print('SF:{}'.format(css_in.name), file=coverage_file) print(oven.get_coverage_report(), file=coverage_file) print('end_of_record', file=coverage_file)
Process the given HTML file stream with the css stream.
def FailoverInstance(r, instance, iallocator=None, ignore_consistency=False, target_node=None): """Does a failover of an instance. @type instance: string @param instance: Instance name @type iallocator: string @param iallocator: Iallocator for deciding the target node for shared-storage instances @type ignore_consistency: bool @param ignore_consistency: Whether to ignore disk consistency @type target_node: string @param target_node: Target node for shared-storage instances @rtype: string @return: job id """ body = { "ignore_consistency": ignore_consistency, } if iallocator is not None: body["iallocator"] = iallocator if target_node is not None: body["target_node"] = target_node return r.request("put", "/2/instances/%s/failover" % instance, content=body)
Does a failover of an instance. @type instance: string @param instance: Instance name @type iallocator: string @param iallocator: Iallocator for deciding the target node for shared-storage instances @type ignore_consistency: bool @param ignore_consistency: Whether to ignore disk consistency @type target_node: string @param target_node: Target node for shared-storage instances @rtype: string @return: job id
def render(self): """ Returns the ``struct`` format and list of the size and value. The format is derived from the size primitive and the length of the resulting encoded value (e.g. the format for a string of 'foo' ends up as 'h3s'. .. note :: The value is expected to be string-able (wrapped in ``str()``) and is then encoded as UTF-8. """ size_format = self.size_primitive.fmt if self.value is None: return size_format, [-1] value = self.render_value(self.value) size = len(value) fmt = "%s%ds" % (size_format, size) return fmt, [size, value]
Returns the ``struct`` format and list of the size and value. The format is derived from the size primitive and the length of the resulting encoded value (e.g. the format for a string of 'foo' ends up as 'h3s'. .. note :: The value is expected to be string-able (wrapped in ``str()``) and is then encoded as UTF-8.
def get_parameter(self, name): """Get a parameter.""" default_value = "$%!)(INVALID)(!%$" value = self.lib.tdGetDeviceParameter(self.id, name, default_value) if value == default_value: raise AttributeError(name) return value
Get a parameter.
def show_progress(name, **kwargs): ''' Sets up a :class:`ProgressBarHandler` to handle progess logs for a given module. Parameters ---------- name : string The module name of the progress logger to use. For example, :class:`skl_groups.divergences.KNNDivergenceEstimator` uses ``'skl_groups.divergences.knn.progress'``. * : anything Other keyword arguments are passed to the :class:`ProgressBarHandler`. ''' logger = logging.getLogger(name) logger.setLevel(logging.INFO) logger.addHandler(ProgressBarHandler(**kwargs))
Sets up a :class:`ProgressBarHandler` to handle progess logs for a given module. Parameters ---------- name : string The module name of the progress logger to use. For example, :class:`skl_groups.divergences.KNNDivergenceEstimator` uses ``'skl_groups.divergences.knn.progress'``. * : anything Other keyword arguments are passed to the :class:`ProgressBarHandler`.
def skull_strip(dset,suffix='_ns',prefix=None,unifize=True): '''attempts to cleanly remove skull from ``dset``''' return available_method('skull_strip')(dset,suffix,prefix,unifize)
attempts to cleanly remove skull from ``dset``
def POPFQ(cpu): """ Pops stack into EFLAGS register. :param cpu: current CPU. """ mask = 0x00000001 | 0x00000004 | 0x00000010 | 0x00000040 | 0x00000080 | 0x00000400 | 0x00000800 cpu.EFLAGS = (cpu.EFLAGS & ~mask) | cpu.pop(64) & mask
Pops stack into EFLAGS register. :param cpu: current CPU.
def get_rectangle(self): """Gets the coordinates of the rectangle, in which the tree can be put. Returns: tupel: (x1, y1, x2, y2) """ rec = [self.pos[0], self.pos[1]]*2 for age in self.nodes: for node in age: # Check max/min for x/y coords for i in range(2): if rec[0+i] > node.pos[i]: rec[0+i] = node.pos[i] elif rec[2+i] < node.pos[i]: rec[2+i] = node.pos[i] return tuple(rec)
Gets the coordinates of the rectangle, in which the tree can be put. Returns: tupel: (x1, y1, x2, y2)
def __handle_events(self): """This is the place to put all event handeling.""" events = pygame.event.get() for event in events: if event.type == pygame.QUIT: self.exit()
This is the place to put all event handeling.
def values_clear(self, range): """Lower-level method that directly calls `spreadsheets.values.clear <https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/clear>`_. :param str range: The `A1 notation <https://developers.google.com/sheets/api/guides/concepts#a1_notation>`_ of the values to clear. :returns: `Response body <https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/clear#response-body>`_. :rtype: dict .. versionadded:: 3.0 """ url = SPREADSHEET_VALUES_CLEAR_URL % (self.id, quote(range)) r = self.client.request('post', url) return r.json()
Lower-level method that directly calls `spreadsheets.values.clear <https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/clear>`_. :param str range: The `A1 notation <https://developers.google.com/sheets/api/guides/concepts#a1_notation>`_ of the values to clear. :returns: `Response body <https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/clear#response-body>`_. :rtype: dict .. versionadded:: 3.0
def issubclass(cls, ifaces): """Check if the given class is an implementation of the given iface.""" ifaces = _ensure_ifaces_tuple(ifaces) for iface in ifaces: return all(( _check_for_definition( iface, cls, '__iclassattribute__', _is_attribute, ), _check_for_definition( iface, cls, '__iproperty__', _is_property, ), _check_for_definition( iface, cls, '__imethod__', _is_method, ), _check_for_definition( iface, cls, '__iclassmethod__', _is_classmethod, ), ))
Check if the given class is an implementation of the given iface.
def get(self, resource): """Get attributes of the data model object named by the given resource. Args: resource (intern.resource.boss.BossResource): resource.name as well as any parents must be identified to succeed. Returns: (intern.resource.boss.BossResource): Returns resource of type requested on success. Raises: requests.HTTPError on failure. """ return self.service.get( resource, self.url_prefix, self.auth, self.session, self.session_send_opts)
Get attributes of the data model object named by the given resource. Args: resource (intern.resource.boss.BossResource): resource.name as well as any parents must be identified to succeed. Returns: (intern.resource.boss.BossResource): Returns resource of type requested on success. Raises: requests.HTTPError on failure.
def extract_bbox(layers): """ Returns a bounding box for ``layers`` or (0, 0, 0, 0) if the layers have no bounding box. """ if not hasattr(layers, '__iter__'): layers = [layers] bboxes = [ layer.bbox for layer in layers if layer.is_visible() and not layer.bbox == (0, 0, 0, 0) ] if len(bboxes) == 0: # Empty bounding box. return (0, 0, 0, 0) lefts, tops, rights, bottoms = zip(*bboxes) return (min(lefts), min(tops), max(rights), max(bottoms))
Returns a bounding box for ``layers`` or (0, 0, 0, 0) if the layers have no bounding box.
def populate_values(self): ''' Add values from the underlying dash layout configuration ''' obj = self._get_base_state() self.base_state = json.dumps(obj)
Add values from the underlying dash layout configuration
def qteTextChanged(self): """ Search for sub-string matches. This method is triggered by Qt whenever the text changes, ie. whenever the user has altered the input. Extract the new input, find all matches, and highlight them accordingly. """ # Remove any previous highlighting. self.clearHighlighting() SCI = self.qteWidget # Compile a list of spans that contain the specified string. self.compileMatchList() # Return if the substring does not exist in the text. if len(self.matchList) == 0: return # ------------------------------------------------------------ # Make a copy of the style bits of the document, overwrite # those parts containing a substring, and then write them # back all at once. This is much faster than calling the # styling methods repeatedly. # ------------------------------------------------------------ # Make a copy of the document style bits and determine the # cursor position in the document. style = bytearray(self.styleOrig) cur = SCI.positionFromLineIndex(*self.cursorPosOrig) # Style all matches. self.selMatchIdx = 0 for start, stop in self.matchList: if start < cur: self.selMatchIdx += 1 style[start:stop] = bytes(b'\x1e') * (stop - start) # If the cursor is after the last possible match (eg. always # the case when the cursor is at the end of the file) then # self.selMatchIdx will point beyond the list. if self.selMatchIdx == len(self.matchList): self.selMatchIdx = 0 # Style the first match after the current cursor position # differently to indicate that it is the currently # selected one. start, stop = self.matchList[self.selMatchIdx] style[start:stop] = bytes(b'\x1f') * (stop - start) # Place the cursor at the start of the currently selected match. line, col = SCI.lineIndexFromPosition(start) SCI.setCursorPosition(line, col) self.selMatchIdx += 1 # Apply the modified style array to the document. self.qteWidget.SCISetStylingEx(0, 0, style)
Search for sub-string matches. This method is triggered by Qt whenever the text changes, ie. whenever the user has altered the input. Extract the new input, find all matches, and highlight them accordingly.
def dim_dc(self, pars): r""" :math:`\frac{\partial \hat{\rho''}(\omega)}{\partial c} = \rho_0 \frac{-m sin(\frac{c \pi}{2}) ln(\omega \tau)(\omega \tau)^c - m (\omega \tau)^c \frac{\pi}{2} cos(\frac{\pi}{2}}{1 + 2 (\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}} + \rho_0 \frac{\left[-m (\omega \tau)^c cos(\frac{c \pi}{2}) \right] \cdot \left[ -2 ln(\omega \tau) (\omega \tau)^c cos(\frac{c \pi}{2}) + 2 (\omega \tau)^c \frac{\pi}{2} cos(\frac{c \pi}{2}) \right] + \left[2 ln(\omega \tau) (\omega \tau)^{2 c}\right]}{\left[1 + 2 (\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}\right]^2}` """ self._set_parameters(pars) # term1 nom1a = - self.m * np.log(self.w * self.tau) * self.otc *\ np.sin(self.ang) nom1b = - self.m * self.otc * (np.pi / 2.0) * np.cos(self.ang) term1 = (nom1a + nom1b) / self.denom # term2 nom2 = (self.m * self.otc * np.sin(self.ang)) *\ (2 * np.log(self.w * self.tau) * self.otc * np.cos(self.ang) - 2 * self.otc * (np.pi / 2.0) * np.sin(self.ang) + 2 * np.log(self.w * self.tau) * self.otc2) term2 = nom2 / self.denom ** 2 result = term1 + term2 result *= self.rho0 return result
r""" :math:`\frac{\partial \hat{\rho''}(\omega)}{\partial c} = \rho_0 \frac{-m sin(\frac{c \pi}{2}) ln(\omega \tau)(\omega \tau)^c - m (\omega \tau)^c \frac{\pi}{2} cos(\frac{\pi}{2}}{1 + 2 (\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}} + \rho_0 \frac{\left[-m (\omega \tau)^c cos(\frac{c \pi}{2}) \right] \cdot \left[ -2 ln(\omega \tau) (\omega \tau)^c cos(\frac{c \pi}{2}) + 2 (\omega \tau)^c \frac{\pi}{2} cos(\frac{c \pi}{2}) \right] + \left[2 ln(\omega \tau) (\omega \tau)^{2 c}\right]}{\left[1 + 2 (\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}\right]^2}`
def check_status(self): """ This function checks the status of the task by inspecting the output and the error files produced by the application and by the queue manager. """ # 1) see it the job is blocked # 2) see if an error occured at submitting the job the job was submitted, TODO these problems can be solved # 3) see if there is output # 4) see if abinit reports problems # 5) see if both err files exist and are empty # 6) no output and no err files, the job must still be running # 7) try to find out what caused the problems # 8) there is a problem but we did not figure out what ... # 9) the only way of landing here is if there is a output file but no err files... # 1) A locked task can only be unlocked by calling set_status explicitly. # an errored task, should not end up here but just to be sure black_list = (self.S_LOCKED, self.S_ERROR) #if self.status in black_list: return self.status # 2) Check the returncode of the job script if self.returncode != 0: msg = "job.sh return code: %s\nPerhaps the job was not submitted properly?" % self.returncode return self.set_status(self.S_QCRITICAL, msg=msg) # If we have an abort file produced by Abinit if self.mpiabort_file.exists: return self.set_status(self.S_ABICRITICAL, msg="Found ABINIT abort file") # Analyze the stderr file for Fortran runtime errors. # getsize is 0 if the file is empty or it does not exist. err_msg = None if self.stderr_file.getsize() != 0: err_msg = self.stderr_file.read() # Analyze the stderr file of the resource manager runtime errors. # TODO: Why are we looking for errors in queue.qerr? qerr_info = None if self.qerr_file.getsize() != 0: qerr_info = self.qerr_file.read() # Analyze the stdout file of the resource manager (needed for PBS !) qout_info = None if self.qout_file.getsize(): qout_info = self.qout_file.read() # Start to check ABINIT status if the output file has been created. #if self.output_file.getsize() != 0: if self.output_file.exists: try: report = self.get_event_report() except Exception as exc: msg = "%s exception while parsing event_report:\n%s" % (self, exc) return self.set_status(self.S_ABICRITICAL, msg=msg) if report is None: return self.set_status(self.S_ERROR, msg="got None report!") if report.run_completed: # Here we set the correct timing data reported by Abinit self.datetimes.start = report.start_datetime self.datetimes.end = report.end_datetime # Check if the calculation converged. not_ok = report.filter_types(self.CRITICAL_EVENTS) if not_ok: return self.set_status(self.S_UNCONVERGED, msg='status set to unconverged based on abiout') else: return self.set_status(self.S_OK, msg="status set to ok based on abiout") # Calculation still running or errors? if report.errors: # Abinit reported problems logger.debug('Found errors in report') for error in report.errors: logger.debug(str(error)) try: self.abi_errors.append(error) except AttributeError: self.abi_errors = [error] # The job is unfixable due to ABINIT errors logger.debug("%s: Found Errors or Bugs in ABINIT main output!" % self) msg = "\n".join(map(repr, report.errors)) return self.set_status(self.S_ABICRITICAL, msg=msg) # 5) if self.stderr_file.exists and not err_msg: if self.qerr_file.exists and not qerr_info: # there is output and no errors # The job still seems to be running return self.set_status(self.S_RUN, msg='there is output and no errors: job still seems to be running') # 6) if not self.output_file.exists: logger.debug("output_file does not exists") if not self.stderr_file.exists and not self.qerr_file.exists: # No output at allThe job is still in the queue. return self.status # 7) Analyze the files of the resource manager and abinit and execution err (mvs) # MG: This section has been disabled: several portability issues # Need more robust logic in error_parser, perhaps logic provided by users via callbacks. if False and (qerr_info or qout_info): from pymatgen.io.abinit.scheduler_error_parsers import get_parser scheduler_parser = get_parser(self.manager.qadapter.QTYPE, err_file=self.qerr_file.path, out_file=self.qout_file.path, run_err_file=self.stderr_file.path) if scheduler_parser is None: return self.set_status(self.S_QCRITICAL, msg="Cannot find scheduler_parser for qtype %s" % self.manager.qadapter.QTYPE) scheduler_parser.parse() if scheduler_parser.errors: # Store the queue errors in the task self.queue_errors = scheduler_parser.errors # The job is killed or crashed and we know what happened msg = "scheduler errors found:\n%s" % str(scheduler_parser.errors) return self.set_status(self.S_QCRITICAL, msg=msg) elif lennone(qerr_info) > 0: # if only qout_info, we are not necessarily in QCRITICAL state, # since there will always be info in the qout file self.history.info('Found unknown message in the queue qerr file: %s' % str(qerr_info)) #try: # rt = self.datetimes.get_runtime().seconds #except: # rt = -1.0 #tl = self.manager.qadapter.timelimit #if rt > tl: # msg += 'set to error : runtime (%s) exceded walltime (%s)' % (rt, tl) # print(msg) # return self.set_status(self.S_ERROR, msg=msg) # The job may be killed or crashed but we don't know what happened # It may also be that an innocent message was written to qerr, so we wait for a while # it is set to QCritical, we will attempt to fix it by running on more resources # 8) analyzing the err files and abinit output did not identify a problem # but if the files are not empty we do have a problem but no way of solving it: # The job is killed or crashed but we don't know what happend # it is set to QCritical, we will attempt to fix it by running on more resources if err_msg: msg = 'Found error message:\n %s' % str(err_msg) self.history.warning(msg) #return self.set_status(self.S_QCRITICAL, msg=msg) # 9) if we still haven't returned there is no indication of any error and the job can only still be running # but we should actually never land here, or we have delays in the file system .... # print('the job still seems to be running maybe it is hanging without producing output... ') # Check time of last modification. if self.output_file.exists and \ (time.time() - self.output_file.get_stat().st_mtime > self.manager.policy.frozen_timeout): msg = "Task seems to be frozen, last change more than %s [s] ago" % self.manager.policy.frozen_timeout return self.set_status(self.S_ERROR, msg=msg) # Handle weird case in which either run.abo, or run.log have not been produced #if self.status not in (self.S_INIT, self.S_READY) and (not self.output.file.exists or not self.log_file.exits): # msg = "Task have been submitted but cannot find the log file or the output file" # return self.set_status(self.S_ERROR, msg) return self.set_status(self.S_RUN, msg='final option: nothing seems to be wrong, the job must still be running')
This function checks the status of the task by inspecting the output and the error files produced by the application and by the queue manager.
def calc_area_extent(self, key): """Calculate area extent for a dataset.""" # Calculate the area extent of the swath based on start line and column # information, total number of segments and channel resolution xyres = {500: 22272, 1000: 11136, 2000: 5568} chkres = xyres[key.resolution] # Get metadata for given dataset measured = self.nc['/data/{}/measured'.format(key.name)] variable = self.nc['/data/{}/measured/effective_radiance' .format(key.name)] # Get start/end line and column of loaded swath. self.startline = int(measured['start_position_row'][...]) self.endline = int(measured['end_position_row'][...]) self.startcol = int(measured['start_position_column'][...]) self.endcol = int(measured['end_position_column'][...]) self.nlines, self.ncols = variable[:].shape logger.debug('Channel {} resolution: {}'.format(key.name, chkres)) logger.debug('Row/Cols: {} / {}'.format(self.nlines, self.ncols)) logger.debug('Start/End row: {} / {}'.format(self.startline, self.endline)) logger.debug('Start/End col: {} / {}'.format(self.startcol, self.endcol)) # total_segments = 70 # Calculate full globe line extent max_y = 5432229.9317116784 min_y = -5429229.5285458621 full_y = max_y + abs(min_y) # Single swath line extent res_y = full_y / chkres # Extent per pixel resolution startl = min_y + res_y * self.startline - 0.5 * (res_y) endl = min_y + res_y * self.endline + 0.5 * (res_y) logger.debug('Start / end extent: {} / {}'.format(startl, endl)) chk_extent = (-5432229.9317116784, endl, 5429229.5285458621, startl) return(chk_extent)
Calculate area extent for a dataset.
def path(path_name=None, override=None, *, root=None, name=None, ext=None, inject=None, relpath=None, reduce=False): """ Path manipulation black magic """ path_name, identity, root = _initialize(path_name, override, root, inject) new_name = _process_name(path_name, identity, name, ext) new_directory = _process_directory(path_name, identity, root, inject) full_path = os.path.normpath(os.path.join(new_directory, new_name)) if APPEND_SEP_TO_DIRS and not new_name and full_path[-1] != os.sep: full_path += os.sep final_path = _format_path(full_path, root, relpath, reduce) return final_path
Path manipulation black magic
def usergroup_update(usrgrpid, **kwargs): ''' .. versionadded:: 2016.3.0 Update existing user group .. note:: This function accepts all standard user group properties: keyword argument names differ depending on your zabbix version, see here__. .. __: https://www.zabbix.com/documentation/2.4/manual/api/reference/usergroup/object#user_group :param usrgrpid: ID of the user group to update. :param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring) :param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring) :param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring) :return: IDs of the updated user group, False on failure. CLI Example: .. code-block:: bash salt '*' zabbix.usergroup_update 8 name=guestsRenamed ''' conn_args = _login(**kwargs) ret = {} try: if conn_args: method = 'usergroup.update' params = {"usrgrpid": usrgrpid} params = _params_extend(params, **kwargs) ret = _query(method, params, conn_args['url'], conn_args['auth']) return ret['result']['usrgrpids'] else: raise KeyError except KeyError: return ret
.. versionadded:: 2016.3.0 Update existing user group .. note:: This function accepts all standard user group properties: keyword argument names differ depending on your zabbix version, see here__. .. __: https://www.zabbix.com/documentation/2.4/manual/api/reference/usergroup/object#user_group :param usrgrpid: ID of the user group to update. :param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring) :param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring) :param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring) :return: IDs of the updated user group, False on failure. CLI Example: .. code-block:: bash salt '*' zabbix.usergroup_update 8 name=guestsRenamed
def update(self, d): """Works like regular update, but only actually updates when the new value and the old value differ. This is necessary to prevent certain infinite loops. :arg d: a dictionary """ for (k, v) in d.items(): if k not in self or self[k] != v: self[k] = v
Works like regular update, but only actually updates when the new value and the old value differ. This is necessary to prevent certain infinite loops. :arg d: a dictionary
def get_critical_original_kink_ratio(self): """ Returns a list of molar mixing ratio for each kink between ORIGINAL (instead of processed) reactant compositions. This is the same list as mixing ratio obtained from get_kinks method if self.norm = False. Returns: A list of floats representing molar mixing ratios between the original reactant compositions for each kink. """ ratios = [] if self.c1_original == self.c2_original: return [0, 1] reaction_kink = [k[3] for k in self.get_kinks()] for rxt in reaction_kink: ratios.append(abs(self._get_original_composition_ratio(rxt))) return ratios
Returns a list of molar mixing ratio for each kink between ORIGINAL (instead of processed) reactant compositions. This is the same list as mixing ratio obtained from get_kinks method if self.norm = False. Returns: A list of floats representing molar mixing ratios between the original reactant compositions for each kink.
def helices(self): """Generates new `Assembly` containing just α-helices. Notes ----- Metadata is not currently preserved from the parent object. Returns ------- hel_assembly : ampal.Protein `Assembly` containing only the α-helices of the original `Assembly`. """ hel_molecules = list(itertools.chain( *[p.helices._molecules for p in self._molecules if hasattr(p, 'helices')])) hel_assembly = Assembly(molecules=hel_molecules, assembly_id=self.id) return hel_assembly
Generates new `Assembly` containing just α-helices. Notes ----- Metadata is not currently preserved from the parent object. Returns ------- hel_assembly : ampal.Protein `Assembly` containing only the α-helices of the original `Assembly`.
def make_sentence(self, init_state=None, **kwargs): """ Attempts `tries` (default: 10) times to generate a valid sentence, based on the model and `test_sentence_output`. Passes `max_overlap_ratio` and `max_overlap_total` to `test_sentence_output`. If successful, returns the sentence as a string. If not, returns None. If `init_state` (a tuple of `self.chain.state_size` words) is not specified, this method chooses a sentence-start at random, in accordance with the model. If `test_output` is set as False then the `test_sentence_output` check will be skipped. If `max_words` is specified, the word count for the sentence will be evaluated against the provided limit. """ tries = kwargs.get('tries', DEFAULT_TRIES) mor = kwargs.get('max_overlap_ratio', DEFAULT_MAX_OVERLAP_RATIO) mot = kwargs.get('max_overlap_total', DEFAULT_MAX_OVERLAP_TOTAL) test_output = kwargs.get('test_output', True) max_words = kwargs.get('max_words', None) if init_state != None: prefix = list(init_state) for word in prefix: if word == BEGIN: prefix = prefix[1:] else: break else: prefix = [] for _ in range(tries): words = prefix + self.chain.walk(init_state) if max_words != None and len(words) > max_words: continue if test_output and hasattr(self, "rejoined_text"): if self.test_sentence_output(words, mor, mot): return self.word_join(words) else: return self.word_join(words) return None
Attempts `tries` (default: 10) times to generate a valid sentence, based on the model and `test_sentence_output`. Passes `max_overlap_ratio` and `max_overlap_total` to `test_sentence_output`. If successful, returns the sentence as a string. If not, returns None. If `init_state` (a tuple of `self.chain.state_size` words) is not specified, this method chooses a sentence-start at random, in accordance with the model. If `test_output` is set as False then the `test_sentence_output` check will be skipped. If `max_words` is specified, the word count for the sentence will be evaluated against the provided limit.
def vol_per_rev_LS(id_number): """Look up the volume per revolution output by a Masterflex L/S pump through L/S tubing of the given ID number. :param id_number: Identification number of the L/S tubing. Valid numbers are 13-18, 24, 35, and 36. :type id_number: int :return: Volume per revolution output by a Masterflex L/S pump through the L/S tubing :rtype: float :Examples: >>> from aguaclara.research.peristaltic_pump import vol_per_rev_LS >>> from aguaclara.core.units import unit_registry as u >>> vol_per_rev_LS(13) <Quantity(0.06, 'milliliter / turn')> >>> vol_per_rev_LS(18) <Quantity(3.8, 'milliliter / turn')> """ tubing_data_path = os.path.join(os.path.dirname(__file__), "data", "LS_tubing.txt") df = pd.read_csv(tubing_data_path, delimiter='\t') idx = df["Number"] == id_number return df[idx]['Flow (mL/rev)'].values[0] * u.mL/u.turn
Look up the volume per revolution output by a Masterflex L/S pump through L/S tubing of the given ID number. :param id_number: Identification number of the L/S tubing. Valid numbers are 13-18, 24, 35, and 36. :type id_number: int :return: Volume per revolution output by a Masterflex L/S pump through the L/S tubing :rtype: float :Examples: >>> from aguaclara.research.peristaltic_pump import vol_per_rev_LS >>> from aguaclara.core.units import unit_registry as u >>> vol_per_rev_LS(13) <Quantity(0.06, 'milliliter / turn')> >>> vol_per_rev_LS(18) <Quantity(3.8, 'milliliter / turn')>
def fit(self, dataset): """ Computes the mean and variance and stores as a model to be used for later scaling. :param dataset: The data used to compute the mean and variance to build the transformation model. :return: a StandardScalarModel """ dataset = dataset.map(_convert_to_vector) jmodel = callMLlibFunc("fitStandardScaler", self.withMean, self.withStd, dataset) return StandardScalerModel(jmodel)
Computes the mean and variance and stores as a model to be used for later scaling. :param dataset: The data used to compute the mean and variance to build the transformation model. :return: a StandardScalarModel
def get_signature_challenge(self): """Returns new signature challenge""" devices = [DeviceRegistration.wrap(device) for device in self.__get_u2f_devices()] if devices == []: return { 'status' : 'failed', 'error' : 'No devices been associated with the account!' } challenge = start_authenticate(devices) challenge['status'] = 'ok' session['_u2f_challenge_'] = challenge.json return challenge
Returns new signature challenge
def fit(self, X): """Fit Kernel density estimation to an list of values. Args: X: 1-d `np.ndarray` or `pd.Series` or `list` datapoints to be estimated from. This function will fit a gaussian_kde model to a list of datapoints and store it as a class attribute. """ self.constant_value = self._get_constant_value(X) if self.constant_value is None: self.model = scipy.stats.gaussian_kde(X) else: self._replace_constant_methods() self.fitted = True
Fit Kernel density estimation to an list of values. Args: X: 1-d `np.ndarray` or `pd.Series` or `list` datapoints to be estimated from. This function will fit a gaussian_kde model to a list of datapoints and store it as a class attribute.
def activate(self, asset): '''Request activation of the specified asset representation. Asset representations are obtained from :py:meth:`get_assets`. :param request dict: An asset representation from the API. :returns: :py:class:`planet.api.models.Body` with no response content :raises planet.api.exceptions.APIException: On API error. ''' activate_url = asset['_links']['activate'] return self._get(activate_url, body_type=models.Body).get_body()
Request activation of the specified asset representation. Asset representations are obtained from :py:meth:`get_assets`. :param request dict: An asset representation from the API. :returns: :py:class:`planet.api.models.Body` with no response content :raises planet.api.exceptions.APIException: On API error.
def to_tokens(self, indices): """Converts token indices to tokens according to the vocabulary. Parameters ---------- indices : int or list of ints A source token index or token indices to be converted. Returns ------- str or list of strs A token or a list of tokens according to the vocabulary. """ to_reduce = False if not isinstance(indices, (list, tuple)): indices = [indices] to_reduce = True max_idx = len(self._idx_to_token) - 1 tokens = [] for idx in indices: if not isinstance(idx, int) or idx > max_idx: raise ValueError('Token index {} in the provided `indices` is invalid.'.format(idx)) else: tokens.append(self._idx_to_token[idx]) return tokens[0] if to_reduce else tokens
Converts token indices to tokens according to the vocabulary. Parameters ---------- indices : int or list of ints A source token index or token indices to be converted. Returns ------- str or list of strs A token or a list of tokens according to the vocabulary.
def requestTimingInfo(self): """ Returns the time needed to process the request by the frontend server in microseconds and the EPOC timestamp of the request in microseconds. :rtype: tuple containing processing time and timestamp """ try: return tuple(item.split('=')[1] for item in self.http_response.header.get('CMS-Server-Time').split()) except AttributeError: return None, None
Returns the time needed to process the request by the frontend server in microseconds and the EPOC timestamp of the request in microseconds. :rtype: tuple containing processing time and timestamp
def __is_function_action(self, action_function): """ Detect if given function is really an action function. Args: action_function: Function to test. Note: We don't care if the variable refer to a function but rather if it is callable or not. """ # test if function returns a couple of values is_function_action = True if not hasattr(action_function, '__call__'): return False # OK, callable. Do we receive the right arguments? try: for end_string, context in action_function(): if not isinstance(end_string, basestring): self.log_error("Action function must return end of filename as a string as first argument") if not isinstance(context, dict): self.log_error("Action function must return context as a dict as second argument") break except Exception: is_function_action = False return is_function_action
Detect if given function is really an action function. Args: action_function: Function to test. Note: We don't care if the variable refer to a function but rather if it is callable or not.
def _convert_to_image_color(self, color): """:return: a color that can be used by the image""" rgb = self._convert_color_to_rrggbb(color) return self._convert_rrggbb_to_image_color(rgb)
:return: a color that can be used by the image
def hgsub_report(self): """ Yields: str: .hgsubs line for this repository """ if self.relpath == '.': return yield "%s = [%s]%s" % ( self.fpath.lstrip('./'), self.label, self.remote_url)
Yields: str: .hgsubs line for this repository
def shutdown(self, delete=False): """ Shutdown this VM :param delete: Should we delete after shutting the VM down? :type delete: bool """ disks = self.get_disks() self.domain.destroy() if delete: for disk in disks: disk.wipe() disk.delete()
Shutdown this VM :param delete: Should we delete after shutting the VM down? :type delete: bool
def create(self, image, geometry, options): """ Processing conductor, returns the thumbnail as an image engine instance """ image = self.cropbox(image, geometry, options) image = self.orientation(image, geometry, options) image = self.colorspace(image, geometry, options) image = self.remove_border(image, options) image = self.scale(image, geometry, options) image = self.crop(image, geometry, options) image = self.rounded(image, geometry, options) image = self.blur(image, geometry, options) image = self.padding(image, geometry, options) return image
Processing conductor, returns the thumbnail as an image engine instance
def _construct_retry(method_config, retry_codes, retry_params, retry_names): """Helper for ``construct_settings()``. Args: method_config (dict): A dictionary representing a single ``methods`` entry of the standard API client config file. (See ``construct_settings()`` for information on this yaml.) retry_codes (dict): A dictionary parsed from the ``retry_codes`` entry of the standard API client config file. (See ``construct_settings()`` for information on this yaml.) retry_params (dict): A dictionary parsed from the ``retry_params`` entry of the standard API client config file. (See ``construct_settings()`` for information on this yaml.) retry_names (dict): A dictionary mapping the string names used in the standard API client config file to API response status codes. Returns: Optional[RetryOptions]: The retry options, if applicable. """ if method_config is None: return None codes = None if retry_codes and 'retry_codes_name' in method_config: codes_name = method_config['retry_codes_name'] if codes_name in retry_codes and retry_codes[codes_name]: codes = [retry_names[name] for name in retry_codes[codes_name]] else: codes = [] backoff_settings = None if retry_params and 'retry_params_name' in method_config: params_name = method_config['retry_params_name'] if params_name and params_name in retry_params: backoff_settings = gax.BackoffSettings(**retry_params[params_name]) return gax.RetryOptions( backoff_settings=backoff_settings, retry_codes=codes, )
Helper for ``construct_settings()``. Args: method_config (dict): A dictionary representing a single ``methods`` entry of the standard API client config file. (See ``construct_settings()`` for information on this yaml.) retry_codes (dict): A dictionary parsed from the ``retry_codes`` entry of the standard API client config file. (See ``construct_settings()`` for information on this yaml.) retry_params (dict): A dictionary parsed from the ``retry_params`` entry of the standard API client config file. (See ``construct_settings()`` for information on this yaml.) retry_names (dict): A dictionary mapping the string names used in the standard API client config file to API response status codes. Returns: Optional[RetryOptions]: The retry options, if applicable.
def valid_file(cls, filename): """ Check if the provided file is a valid file for this plugin. :arg filename: the path to the file to check. """ file_ex = os.path.splitext(filename)[1].replace('.', '', 1) return file_ex in SUPPORTED_FILES and is_excel_file(filename)
Check if the provided file is a valid file for this plugin. :arg filename: the path to the file to check.
def call(self): """ call: ['mut'] ID ['(' parameters ')'] """ is_mutable = False if self.token.nature == Nature.MUT: is_mutable = True self._process(Nature.MUT) identifier = Identifier(name=self.token.value) self._process(Nature.ID) if self.token.nature == Nature.LPAREN: return FunctionCall(identifier=identifier, parameters=self.parameters()) else: return Variable(identifier=identifier, is_mutable=is_mutable)
call: ['mut'] ID ['(' parameters ')']
def prepare_framework(estimator, s3_operations): """Prepare S3 operations (specify where to upload `source_dir`) and environment variables related to framework. Args: estimator (sagemaker.estimator.Estimator): The framework estimator to get information from and update. s3_operations (dict): The dict to specify s3 operations (upload `source_dir`). """ if estimator.code_location is not None: bucket, key = fw_utils.parse_s3_url(estimator.code_location) key = os.path.join(key, estimator._current_job_name, 'source', 'sourcedir.tar.gz') else: bucket = estimator.sagemaker_session._default_bucket key = os.path.join(estimator._current_job_name, 'source', 'sourcedir.tar.gz') script = os.path.basename(estimator.entry_point) if estimator.source_dir and estimator.source_dir.lower().startswith('s3://'): code_dir = estimator.source_dir estimator.uploaded_code = fw_utils.UploadedCode(s3_prefix=code_dir, script_name=script) else: code_dir = 's3://{}/{}'.format(bucket, key) estimator.uploaded_code = fw_utils.UploadedCode(s3_prefix=code_dir, script_name=script) s3_operations['S3Upload'] = [{ 'Path': estimator.source_dir or script, 'Bucket': bucket, 'Key': key, 'Tar': True }] estimator._hyperparameters[sagemaker.model.DIR_PARAM_NAME] = code_dir estimator._hyperparameters[sagemaker.model.SCRIPT_PARAM_NAME] = script estimator._hyperparameters[sagemaker.model.CLOUDWATCH_METRICS_PARAM_NAME] = \ estimator.enable_cloudwatch_metrics estimator._hyperparameters[sagemaker.model.CONTAINER_LOG_LEVEL_PARAM_NAME] = estimator.container_log_level estimator._hyperparameters[sagemaker.model.JOB_NAME_PARAM_NAME] = estimator._current_job_name estimator._hyperparameters[sagemaker.model.SAGEMAKER_REGION_PARAM_NAME] = \ estimator.sagemaker_session.boto_region_name
Prepare S3 operations (specify where to upload `source_dir`) and environment variables related to framework. Args: estimator (sagemaker.estimator.Estimator): The framework estimator to get information from and update. s3_operations (dict): The dict to specify s3 operations (upload `source_dir`).
def translated(structure, values, lang_spec): """Return code associated to given structure and values, translate with given language specification.""" # LANGUAGE SPECS indentation = '\t' endline = '\n' object_code = "" stack = [] # define shortcuts to behavior push = lambda x: stack.append(x) pop = lambda : stack.pop() last = lambda : stack[-1] if len(stack) > 0 else ' ' def indented_code(s, level, end): return lang_spec[INDENTATION]*level + s + end # recreate python structure, and replace type by value level = 0 CONDITIONS = [LEXEM_TYPE_PREDICAT, LEXEM_TYPE_CONDITION] ACTION = LEXEM_TYPE_ACTION DOWNLEVEL = LEXEM_TYPE_DOWNLEVEL for lexem_type in structure: if lexem_type is ACTION: # place previous conditions if necessary if last() in CONDITIONS: # construct conditions lines value, values = values[0:len(stack)], values[len(stack):] object_code += (indented_code(lang_spec[BEG_CONDITION] + lang_spec[LOGICAL_AND].join(value) + lang_spec[END_CONDITION], level, lang_spec[END_LINE] )) # if provided, print the begin block token on a new line if len(lang_spec[BEG_BLOCK]) > 0: object_code += indented_code( lang_spec[BEG_BLOCK], level, lang_spec[END_LINE] ) stack = [] level += 1 # and place the action object_code += indented_code( lang_spec[BEG_ACTION] + values[0], level, lang_spec[END_ACTION]+lang_spec[END_LINE] ) values = values[1:] elif lexem_type in CONDITIONS: push(lexem_type) elif lexem_type is DOWNLEVEL: if last() not in CONDITIONS: # down level, and add a END_BLOCK only if needed level -= 1 if level >= 0: object_code += indented_code( lang_spec[END_BLOCK], level, lang_spec[END_LINE] ) else: level = 0 # add END_BLOCK while needed for reach level 0 while level > 0: level -= 1 if level >= 0: object_code += indented_code( lang_spec[END_BLOCK], level, lang_spec[END_LINE] ) else: level = 0 # Finished ! return object_code
Return code associated to given structure and values, translate with given language specification.
def simulate_moment_steps( self, circuit: circuits.Circuit, param_resolver: 'study.ParamResolverOrSimilarType' = None, qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT, initial_state: Any = None ) -> Iterator: """Returns an iterator of StepResults for each moment simulated. If the circuit being simulated is empty, a single step result should be returned with the state being set to the initial state. Args: circuit: The Circuit to simulate. param_resolver: A ParamResolver for determining values of Symbols. qubit_order: Determines the canonical ordering of the qubits. This is often used in specifying the initial state, i.e. the ordering of the computational basis states. initial_state: The initial state for the simulation. The form of this state depends on the simulation implementation. See documentation of the implementing class for details. Returns: Iterator that steps through the simulation, simulating each moment and returning a StepResult for each moment. """ return self._simulator_iterator( circuit, study.ParamResolver(param_resolver), qubit_order, initial_state)
Returns an iterator of StepResults for each moment simulated. If the circuit being simulated is empty, a single step result should be returned with the state being set to the initial state. Args: circuit: The Circuit to simulate. param_resolver: A ParamResolver for determining values of Symbols. qubit_order: Determines the canonical ordering of the qubits. This is often used in specifying the initial state, i.e. the ordering of the computational basis states. initial_state: The initial state for the simulation. The form of this state depends on the simulation implementation. See documentation of the implementing class for details. Returns: Iterator that steps through the simulation, simulating each moment and returning a StepResult for each moment.
def _getMethodsVoc(self): """Return the registered methods as DisplayList """ methods = api.search({ "portal_type": "Method", "is_active": True }, "bika_setup_catalog") items = map(lambda m: (api.get_uid(m), api.get_title(m)), methods) items.sort(lambda x, y: cmp(x[1], y[1])) items.insert(0, ("", _("Not specified"))) return DisplayList(list(items))
Return the registered methods as DisplayList
def create(self, locator): """ Creates a component identified by given locator. :param locator: a locator to identify component to be created. :return: the created component. """ for registration in self._registrations: this_locator = registration.locator if this_locator == locator: try: return registration.factory(locator) except Exception as ex: if isinstance(ex, CreateException): raise ex raise CreateException( None, "Failed to create object for " + str(locator) ).with_cause(ex)
Creates a component identified by given locator. :param locator: a locator to identify component to be created. :return: the created component.
def transform(self, Z): """Transform ArrayRDD's (or DictRDD's 'X' column's) feature->value dicts to array or sparse matrix. Named features not encountered during fit or fit_transform will be silently ignored. Parameters ---------- Z : ArrayRDD or DictRDD with column 'X' containing Mapping or iterable over Mappings, length = n_samples Dict(s) or Mapping(s) from feature names (arbitrary Python objects) to feature values (strings or convertible to dtype). Returns ------- Z : transformed, containing {array, sparse matrix} Feature vectors; always 2-d. """ mapper = self.broadcast(super(SparkDictVectorizer, self).transform, Z.context) dtype = sp.spmatrix if self.sparse else np.ndarray return Z.transform(mapper, column='X', dtype=dtype)
Transform ArrayRDD's (or DictRDD's 'X' column's) feature->value dicts to array or sparse matrix. Named features not encountered during fit or fit_transform will be silently ignored. Parameters ---------- Z : ArrayRDD or DictRDD with column 'X' containing Mapping or iterable over Mappings, length = n_samples Dict(s) or Mapping(s) from feature names (arbitrary Python objects) to feature values (strings or convertible to dtype). Returns ------- Z : transformed, containing {array, sparse matrix} Feature vectors; always 2-d.
def custom_pygments_guess_lexer_for_filename(_fn, _text, **options): """Overwrite pygments.lexers.guess_lexer_for_filename to customize the priority of different lexers based on popularity of languages.""" fn = basename(_fn) primary = {} matching_lexers = set() for lexer in _iter_lexerclasses(): for filename in lexer.filenames: if _fn_matches(fn, filename): matching_lexers.add(lexer) primary[lexer] = True for filename in lexer.alias_filenames: if _fn_matches(fn, filename): matching_lexers.add(lexer) primary[lexer] = False if not matching_lexers: raise ClassNotFound('no lexer for filename %r found' % fn) if len(matching_lexers) == 1: return matching_lexers.pop()(**options) result = [] for lexer in matching_lexers: rv = lexer.analyse_text(_text) if rv == 1.0: return lexer(**options) result.append(customize_lexer_priority(_fn, rv, lexer)) matlab = list(filter(lambda x: x[2].name.lower() == 'matlab', result)) if len(matlab) > 0: objc = list(filter(lambda x: x[2].name.lower() == 'objective-c', result)) if objc and objc[0][0] == matlab[0][0]: raise SkipHeartbeat('Skipping because not enough language accuracy.') def type_sort(t): # sort by: # - analyse score # - is primary filename pattern? # - priority # - last resort: class name return (t[0], primary[t[2]], t[1], t[2].__name__) result.sort(key=type_sort) return result[-1][2](**options)
Overwrite pygments.lexers.guess_lexer_for_filename to customize the priority of different lexers based on popularity of languages.
def load_training_rasters(response_raster, explanatory_rasters, selected=None): """ Parameters ---------- response_raster : Path to GDAL raster containing responses explanatory_rasters : List of Paths to GDAL rasters containing explanatory variables Returns ------- train_xs : Array of explanatory variables train_ys : 1xN array of known responses """ with rasterio.open(response_raster) as src: response_data = src.read().flatten() if selected is None: train_y = response_data else: train_y = response_data[selected] selected_data = [] for rast in explanatory_rasters: with rasterio.open(rast) as src: explanatory_data = src.read().flatten() assert explanatory_data.size == response_data.size if selected is None: selected_data.append(explanatory_data) else: selected_data.append(explanatory_data[selected]) train_xs = np.asarray(selected_data).T return train_xs, train_y
Parameters ---------- response_raster : Path to GDAL raster containing responses explanatory_rasters : List of Paths to GDAL rasters containing explanatory variables Returns ------- train_xs : Array of explanatory variables train_ys : 1xN array of known responses
def websocket( self, uri, host=None, strict_slashes=None, subprotocols=None, name=None ): """Decorate a function to be registered as a websocket route :param uri: path of the URL :param subprotocols: optional list of str with supported subprotocols :param host: :return: decorated function """ self.enable_websocket() # Fix case where the user did not prefix the URL with a / # and will probably get confused as to why it's not working if not uri.startswith("/"): uri = "/" + uri if strict_slashes is None: strict_slashes = self.strict_slashes def response(handler): async def websocket_handler(request, *args, **kwargs): request.app = self if not getattr(handler, "__blueprintname__", False): request.endpoint = handler.__name__ else: request.endpoint = ( getattr(handler, "__blueprintname__", "") + handler.__name__ ) try: protocol = request.transport.get_protocol() except AttributeError: # On Python3.5 the Transport classes in asyncio do not # have a get_protocol() method as in uvloop protocol = request.transport._protocol ws = await protocol.websocket_handshake(request, subprotocols) # schedule the application handler # its future is kept in self.websocket_tasks in case it # needs to be cancelled due to the server being stopped fut = ensure_future(handler(request, ws, *args, **kwargs)) self.websocket_tasks.add(fut) try: await fut except (CancelledError, ConnectionClosed): pass finally: self.websocket_tasks.remove(fut) await ws.close() self.router.add( uri=uri, handler=websocket_handler, methods=frozenset({"GET"}), host=host, strict_slashes=strict_slashes, name=name, ) return handler return response
Decorate a function to be registered as a websocket route :param uri: path of the URL :param subprotocols: optional list of str with supported subprotocols :param host: :return: decorated function
def _syspath_modname_to_modpath(modname, sys_path=None, exclude=None): """ syspath version of modname_to_modpath Args: modname (str): name of module to find sys_path (List[PathLike], default=None): if specified overrides `sys.path` exclude (List[PathLike], default=None): list of directory paths. if specified prevents these directories from being searched. Notes: This is much slower than the pkgutil mechanisms. CommandLine: python -m xdoctest.static_analysis _syspath_modname_to_modpath Example: >>> print(_syspath_modname_to_modpath('xdoctest.static_analysis')) ...static_analysis.py >>> print(_syspath_modname_to_modpath('xdoctest')) ...xdoctest >>> print(_syspath_modname_to_modpath('_ctypes')) ..._ctypes... >>> assert _syspath_modname_to_modpath('xdoctest', sys_path=[]) is None >>> assert _syspath_modname_to_modpath('xdoctest.static_analysis', sys_path=[]) is None >>> assert _syspath_modname_to_modpath('_ctypes', sys_path=[]) is None >>> assert _syspath_modname_to_modpath('this', sys_path=[]) is None Example: >>> # test what happens when the module is not visible in the path >>> modname = 'xdoctest.static_analysis' >>> modpath = _syspath_modname_to_modpath(modname) >>> exclude = [split_modpath(modpath)[0]] >>> found = _syspath_modname_to_modpath(modname, exclude=exclude) >>> # this only works if installed in dev mode, pypi fails >>> assert found is None, 'should not have found {}'.format(found) """ def _isvalid(modpath, base): # every directory up to the module, should have an init subdir = dirname(modpath) while subdir and subdir != base: if not exists(join(subdir, '__init__.py')): return False subdir = dirname(subdir) return True _fname_we = modname.replace('.', os.path.sep) candidate_fnames = [ _fname_we + '.py', # _fname_we + '.pyc', # _fname_we + '.pyo', ] # Add extension library suffixes candidate_fnames += [_fname_we + ext for ext in _platform_pylib_exts()] if sys_path is None: sys_path = sys.path # the empty string in sys.path indicates cwd. Change this to a '.' candidate_dpaths = ['.' if p == '' else p for p in sys_path] if exclude: def normalize(p): if sys.platform.startswith('win32'): # nocover return realpath(p).lower() else: return realpath(p) # Keep only the paths not in exclude real_exclude = {normalize(p) for p in exclude} candidate_dpaths = [p for p in candidate_dpaths if normalize(p) not in real_exclude] for dpath in candidate_dpaths: # Check for directory-based modules (has presidence over files) modpath = join(dpath, _fname_we) if exists(modpath): if isfile(join(modpath, '__init__.py')): if _isvalid(modpath, dpath): return modpath # If that fails, check for file-based modules for fname in candidate_fnames: modpath = join(dpath, fname) if isfile(modpath): if _isvalid(modpath, dpath): return modpath
syspath version of modname_to_modpath Args: modname (str): name of module to find sys_path (List[PathLike], default=None): if specified overrides `sys.path` exclude (List[PathLike], default=None): list of directory paths. if specified prevents these directories from being searched. Notes: This is much slower than the pkgutil mechanisms. CommandLine: python -m xdoctest.static_analysis _syspath_modname_to_modpath Example: >>> print(_syspath_modname_to_modpath('xdoctest.static_analysis')) ...static_analysis.py >>> print(_syspath_modname_to_modpath('xdoctest')) ...xdoctest >>> print(_syspath_modname_to_modpath('_ctypes')) ..._ctypes... >>> assert _syspath_modname_to_modpath('xdoctest', sys_path=[]) is None >>> assert _syspath_modname_to_modpath('xdoctest.static_analysis', sys_path=[]) is None >>> assert _syspath_modname_to_modpath('_ctypes', sys_path=[]) is None >>> assert _syspath_modname_to_modpath('this', sys_path=[]) is None Example: >>> # test what happens when the module is not visible in the path >>> modname = 'xdoctest.static_analysis' >>> modpath = _syspath_modname_to_modpath(modname) >>> exclude = [split_modpath(modpath)[0]] >>> found = _syspath_modname_to_modpath(modname, exclude=exclude) >>> # this only works if installed in dev mode, pypi fails >>> assert found is None, 'should not have found {}'.format(found)
def guess_segments_lines(segments, lines, nearline_tolerance=5.0): """ given segments, outputs a array of line numbers, or -1 if it doesn't belong to any """ ys = segments[:, 1] closeness = numpy.abs(numpy.subtract.outer(ys, lines)) # each row a y, each collumn a distance to each line line_of_y = numpy.argmin(closeness, axis=1) distance = numpy.min(closeness, axis=1) bad = distance > numpy.mean(distance) + nearline_tolerance * numpy.std(distance) line_of_y[bad] = -1 return line_of_y
given segments, outputs a array of line numbers, or -1 if it doesn't belong to any
def EncloseAnsiText(text): """Enclose ANSI/SGR escape sequences with ANSI_START and ANSI_END.""" return sgr_re.sub(lambda x: ANSI_START + x.group(1) + ANSI_END, text)
Enclose ANSI/SGR escape sequences with ANSI_START and ANSI_END.
def add_remote(self, path, name, remote_url, use_sudo=False, user=None, fetch=True): """ Add a remote Git repository into a directory. :param path: Path of the working copy directory. This directory must exist and be a Git working copy with a default remote to fetch from. :type path: str :param use_sudo: If ``True`` execute ``git`` with :func:`fabric.operations.sudo`, else with :func:`fabric.operations.run`. :type use_sudo: bool :param user: If ``use_sudo is True``, run :func:`fabric.operations.sudo` with the given user. If ``use_sudo is False`` this parameter has no effect. :type user: str :param name: name for the remote repository :type name: str :param remote_url: URL of the remote repository :type remote_url: str :param fetch: If ``True`` execute ``git remote add -f`` :type fetch: bool """ if path is None: raise ValueError("Path to the working copy is needed to add a remote") if fetch: cmd = 'git remote add -f %s %s' % (name, remote_url) else: cmd = 'git remote add %s %s' % (name, remote_url) with cd(path): if use_sudo and user is None: run_as_root(cmd) elif use_sudo: sudo(cmd, user=user) else: run(cmd)
Add a remote Git repository into a directory. :param path: Path of the working copy directory. This directory must exist and be a Git working copy with a default remote to fetch from. :type path: str :param use_sudo: If ``True`` execute ``git`` with :func:`fabric.operations.sudo`, else with :func:`fabric.operations.run`. :type use_sudo: bool :param user: If ``use_sudo is True``, run :func:`fabric.operations.sudo` with the given user. If ``use_sudo is False`` this parameter has no effect. :type user: str :param name: name for the remote repository :type name: str :param remote_url: URL of the remote repository :type remote_url: str :param fetch: If ``True`` execute ``git remote add -f`` :type fetch: bool
def monitor_running_process(context: RunContext): """ Runs an infinite loop that waits for the process to either exit on its or time out Captures all output from the running process :param context: run context :type context: RunContext """ while True: capture_output_from_running_process(context) if context.process_finished(): context.return_code = context.command.returncode break if context.process_timed_out(): context.return_code = -1 raise ProcessTimeoutError( exe_name=context.exe_short_name, timeout=context.timeout, )
Runs an infinite loop that waits for the process to either exit on its or time out Captures all output from the running process :param context: run context :type context: RunContext
def assert_called(_mock_self): """assert that the mock was called at least once """ self = _mock_self if self.call_count == 0: msg = ("Expected '%s' to have been called." % self._mock_name or 'mock') raise AssertionError(msg)
assert that the mock was called at least once
def _index(self): """Keys a list of file paths that have been pickled in this directory. The index is stored in a json file in the same directory as the pickled objects.""" if self.__index is None: try: with open(self._get_path('index.json')) as f: data = json.load(f) except (IOError, ValueError): self.__index = {} else: # 0 means version is not defined (= always delete cache): if data.get('version', 0) != self.version: self.clear_cache() self.__index = {} else: self.__index = data['index'] return self.__index
Keys a list of file paths that have been pickled in this directory. The index is stored in a json file in the same directory as the pickled objects.
def bandwidth(self, subid, params=None): ''' /v1/server/bandwidth GET - account Get the bandwidth used by a virtual machine Link: https://www.vultr.com/api/#server_bandwidth ''' params = update_params(params, {'SUBID': subid}) return self.request('/v1/server/bandwidth', params, 'GET')
/v1/server/bandwidth GET - account Get the bandwidth used by a virtual machine Link: https://www.vultr.com/api/#server_bandwidth
def list_metrics(self, project, page_size=None, page_token=None): """List metrics for the project associated with this client. See https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics/list :type project: str :param project: ID of the project whose metrics are to be listed. :type page_size: int :param page_size: maximum number of metrics to return, If not passed, defaults to a value set by the API. :type page_token: str :param page_token: opaque marker for the next "page" of metrics. If not passed, the API will return the first page of metrics. :rtype: :class:`~google.api_core.page_iterator.Iterator` :returns: Iterator of :class:`~google.cloud.logging.metric.Metric` accessible to the current API. """ extra_params = {} if page_size is not None: extra_params["pageSize"] = page_size path = "/projects/%s/metrics" % (project,) return page_iterator.HTTPIterator( client=self._client, api_request=self._client._connection.api_request, path=path, item_to_value=_item_to_metric, items_key="metrics", page_token=page_token, extra_params=extra_params, )
List metrics for the project associated with this client. See https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics/list :type project: str :param project: ID of the project whose metrics are to be listed. :type page_size: int :param page_size: maximum number of metrics to return, If not passed, defaults to a value set by the API. :type page_token: str :param page_token: opaque marker for the next "page" of metrics. If not passed, the API will return the first page of metrics. :rtype: :class:`~google.api_core.page_iterator.Iterator` :returns: Iterator of :class:`~google.cloud.logging.metric.Metric` accessible to the current API.
def handle_heartbeat_response_22(msg): """Process an internal heartbeat response message.""" if not msg.gateway.is_sensor(msg.node_id): return None msg.gateway.sensors[msg.node_id].heartbeat = msg.payload msg.gateway.alert(msg) return None
Process an internal heartbeat response message.
def persistent_menu(menu): """ more: https://developers.facebook.com/docs/messenger-platform/thread-settings/persistent-menu :param menu: :return: """ if len(menu) > 3: raise Invalid('menu should not exceed 3 call to actions') if any(len(item['call_to_actions']) > 5 for item in menu if item['type'] == 'nested'): raise Invalid('call_to_actions is limited to 5 for sub-levels') for item in menu: if len(item['title']) > 30: raise Invalid('menu item title should not exceed 30 characters') if item['type'] == 'postback' and len(item['payload']) > 1000: raise Invalid('menu item payload should not exceed 1000 characters')
more: https://developers.facebook.com/docs/messenger-platform/thread-settings/persistent-menu :param menu: :return:
def calc_measurement_error(self, tangents): ''' formula for measurement error sqrt ( (sum(1, n, (k_i - <k>)**2) / (n*(n-1))) ''' if len(tangents) < 2: return 0.0 avg_tan = float(sum(tangents) / len(tangents)) numerator = float() for i in tangents: numerator += (i - avg_tan) * (i - avg_tan) return math.sqrt(numerator / len(tangents) / (len(tangents) - 1))
formula for measurement error sqrt ( (sum(1, n, (k_i - <k>)**2) / (n*(n-1)))
def MultiReadClientSnapshot(self, client_ids, cursor=None): """Reads the latest client snapshots for a list of clients.""" int_ids = [db_utils.ClientIDToInt(cid) for cid in client_ids] query = ( "SELECT h.client_id, h.client_snapshot, UNIX_TIMESTAMP(h.timestamp)," " s.startup_info " "FROM clients as c FORCE INDEX (PRIMARY), " "client_snapshot_history as h FORCE INDEX (PRIMARY), " "client_startup_history as s FORCE INDEX (PRIMARY) " "WHERE h.client_id = c.client_id " "AND s.client_id = c.client_id " "AND h.timestamp = c.last_snapshot_timestamp " "AND s.timestamp = c.last_startup_timestamp " "AND c.client_id IN ({})").format(", ".join(["%s"] * len(client_ids))) ret = {cid: None for cid in client_ids} cursor.execute(query, int_ids) while True: row = cursor.fetchone() if not row: break cid, snapshot, timestamp, startup_info = row client_obj = mysql_utils.StringToRDFProto(rdf_objects.ClientSnapshot, snapshot) client_obj.startup_info = mysql_utils.StringToRDFProto( rdf_client.StartupInfo, startup_info) client_obj.timestamp = mysql_utils.TimestampToRDFDatetime(timestamp) ret[db_utils.IntToClientID(cid)] = client_obj return ret
Reads the latest client snapshots for a list of clients.
def main(args): '''For command line experimentation. Sample output: $ python l2cs.py 'foo:bar AND baz:bork' Lucene input: foo:bar AND baz:bork Parsed representation: And([Term(u'foo', u'bar'), Term(u'baz', u'bork')]) Lucene form: (foo:bar AND baz:bork) Cloudsearch form: (and (field foo 'bar') (field baz 'bork')) ''' args = [unicode(u, 'utf-8') for u in args[1:]] schema = __sample_schema() if "--schema" in args else None if schema: args.pop(args.index("--schema")) query = u' '.join(args) print "Lucene input:", query parser = __sample_parser(schema=schema) parsed = parser.parse(query) print "Parsed representation:", repr(parsed) print "Lucene form:", unicode(parsed) cloudsearch_query = ''.join(walk_clause(parsed)) print "Cloudsearch form:", cloudsearch_query
For command line experimentation. Sample output: $ python l2cs.py 'foo:bar AND baz:bork' Lucene input: foo:bar AND baz:bork Parsed representation: And([Term(u'foo', u'bar'), Term(u'baz', u'bork')]) Lucene form: (foo:bar AND baz:bork) Cloudsearch form: (and (field foo 'bar') (field baz 'bork'))
def select_source(self, source): """Select a source from the list of sources.""" status = self.status() if status['power']: # Changing source when off may hang NAD7050 if status['source'] != source: # Setting the source to the current source will hang the NAD7050 if source in self.SOURCES: self._send(self.CMD_SOURCE + self.SOURCES[source], read_reply=True)
Select a source from the list of sources.
def run_eidos(endpoint, *args): """Run a given enpoint of Eidos through the command line. Parameters ---------- endpoint : str The class within the Eidos package to run, for instance 'apps.ExtractFromDirectory' will run 'org.clulab.wm.eidos.apps.ExtractFromDirectory' *args Any further arguments to be passed as inputs to the class being run. """ # Make the full path to the class that should be used call_class = '%s.%s' % (eidos_package, endpoint) # Assemble the command line command and append optonal args cmd = ['java', '-Xmx12G', '-cp', eip, call_class] + list(args) logger.info('Running Eidos with command "%s"' % (' '.join(cmd))) subprocess.call(cmd)
Run a given enpoint of Eidos through the command line. Parameters ---------- endpoint : str The class within the Eidos package to run, for instance 'apps.ExtractFromDirectory' will run 'org.clulab.wm.eidos.apps.ExtractFromDirectory' *args Any further arguments to be passed as inputs to the class being run.
def cum_returns(returns, starting_value=0, out=None): """ Compute cumulative returns from simple returns. Parameters ---------- returns : pd.Series, np.ndarray, or pd.DataFrame Returns of the strategy as a percentage, noncumulative. - Time series with decimal returns. - Example:: 2015-07-16 -0.012143 2015-07-17 0.045350 2015-07-20 0.030957 2015-07-21 0.004902 - Also accepts two dimensional data. In this case, each column is cumulated. starting_value : float, optional The starting returns. out : array-like, optional Array to use as output buffer. If not passed, a new array will be created. Returns ------- cumulative_returns : array-like Series of cumulative returns. """ if len(returns) < 1: return returns.copy() nanmask = np.isnan(returns) if np.any(nanmask): returns = returns.copy() returns[nanmask] = 0 allocated_output = out is None if allocated_output: out = np.empty_like(returns) np.add(returns, 1, out=out) out.cumprod(axis=0, out=out) if starting_value == 0: np.subtract(out, 1, out=out) else: np.multiply(out, starting_value, out=out) if allocated_output: if returns.ndim == 1 and isinstance(returns, pd.Series): out = pd.Series(out, index=returns.index) elif isinstance(returns, pd.DataFrame): out = pd.DataFrame( out, index=returns.index, columns=returns.columns, ) return out
Compute cumulative returns from simple returns. Parameters ---------- returns : pd.Series, np.ndarray, or pd.DataFrame Returns of the strategy as a percentage, noncumulative. - Time series with decimal returns. - Example:: 2015-07-16 -0.012143 2015-07-17 0.045350 2015-07-20 0.030957 2015-07-21 0.004902 - Also accepts two dimensional data. In this case, each column is cumulated. starting_value : float, optional The starting returns. out : array-like, optional Array to use as output buffer. If not passed, a new array will be created. Returns ------- cumulative_returns : array-like Series of cumulative returns.
def copy(source, dest, name=None, shallow=False, without_attrs=False, log=None, if_exists='raise', dry_run=False, **create_kws): """Copy the `source` array or group into the `dest` group. Parameters ---------- source : group or array/dataset A zarr group or array, or an h5py group or dataset. dest : group A zarr or h5py group. name : str, optional Name to copy the object to. shallow : bool, optional If True, only copy immediate children of `source`. without_attrs : bool, optional Do not copy user attributes. log : callable, file path or file-like object, optional If provided, will be used to log progress information. if_exists : {'raise', 'replace', 'skip', 'skip_initialized'}, optional How to handle arrays that already exist in the destination group. If 'raise' then a CopyError is raised on the first array already present in the destination group. If 'replace' then any array will be replaced in the destination. If 'skip' then any existing arrays will not be copied. If 'skip_initialized' then any existing arrays with all chunks initialized will not be copied (not available when copying to h5py). dry_run : bool, optional If True, don't actually copy anything, just log what would have happened. **create_kws Passed through to the create_dataset method when copying an array/dataset. Returns ------- n_copied : int Number of items copied. n_skipped : int Number of items skipped. n_bytes_copied : int Number of bytes of data that were actually copied. Examples -------- Here's an example of copying a group named 'foo' from an HDF5 file to a Zarr group:: >>> import h5py >>> import zarr >>> import numpy as np >>> source = h5py.File('data/example.h5', mode='w') >>> foo = source.create_group('foo') >>> baz = foo.create_dataset('bar/baz', data=np.arange(100), chunks=(50,)) >>> spam = source.create_dataset('spam', data=np.arange(100, 200), chunks=(30,)) >>> zarr.tree(source) / ├── foo │ └── bar │ └── baz (100,) int64 └── spam (100,) int64 >>> dest = zarr.group() >>> from sys import stdout >>> zarr.copy(source['foo'], dest, log=stdout) copy /foo copy /foo/bar copy /foo/bar/baz (100,) int64 all done: 3 copied, 0 skipped, 800 bytes copied (3, 0, 800) >>> dest.tree() # N.B., no spam / └── foo └── bar └── baz (100,) int64 >>> source.close() The ``if_exists`` parameter provides options for how to handle pre-existing data in the destination. Here are some examples of these options, also using ``dry_run=True`` to find out what would happen without actually copying anything:: >>> source = zarr.group() >>> dest = zarr.group() >>> baz = source.create_dataset('foo/bar/baz', data=np.arange(100)) >>> spam = source.create_dataset('foo/spam', data=np.arange(1000)) >>> existing_spam = dest.create_dataset('foo/spam', data=np.arange(1000)) >>> from sys import stdout >>> try: ... zarr.copy(source['foo'], dest, log=stdout, dry_run=True) ... except zarr.CopyError as e: ... print(e) ... copy /foo copy /foo/bar copy /foo/bar/baz (100,) int64 an object 'spam' already exists in destination '/foo' >>> zarr.copy(source['foo'], dest, log=stdout, if_exists='replace', dry_run=True) copy /foo copy /foo/bar copy /foo/bar/baz (100,) int64 copy /foo/spam (1000,) int64 dry run: 4 copied, 0 skipped (4, 0, 0) >>> zarr.copy(source['foo'], dest, log=stdout, if_exists='skip', dry_run=True) copy /foo copy /foo/bar copy /foo/bar/baz (100,) int64 skip /foo/spam (1000,) int64 dry run: 3 copied, 1 skipped (3, 1, 0) Notes ----- Please note that this is an experimental feature. The behaviour of this function is still evolving and the default behaviour and/or parameters may change in future versions. """ # value checks _check_dest_is_group(dest) # setup logging with _LogWriter(log) as log: # do the copying n_copied, n_skipped, n_bytes_copied = _copy( log, source, dest, name=name, root=True, shallow=shallow, without_attrs=without_attrs, if_exists=if_exists, dry_run=dry_run, **create_kws ) # log a final message with a summary of what happened _log_copy_summary(log, dry_run, n_copied, n_skipped, n_bytes_copied) return n_copied, n_skipped, n_bytes_copied
Copy the `source` array or group into the `dest` group. Parameters ---------- source : group or array/dataset A zarr group or array, or an h5py group or dataset. dest : group A zarr or h5py group. name : str, optional Name to copy the object to. shallow : bool, optional If True, only copy immediate children of `source`. without_attrs : bool, optional Do not copy user attributes. log : callable, file path or file-like object, optional If provided, will be used to log progress information. if_exists : {'raise', 'replace', 'skip', 'skip_initialized'}, optional How to handle arrays that already exist in the destination group. If 'raise' then a CopyError is raised on the first array already present in the destination group. If 'replace' then any array will be replaced in the destination. If 'skip' then any existing arrays will not be copied. If 'skip_initialized' then any existing arrays with all chunks initialized will not be copied (not available when copying to h5py). dry_run : bool, optional If True, don't actually copy anything, just log what would have happened. **create_kws Passed through to the create_dataset method when copying an array/dataset. Returns ------- n_copied : int Number of items copied. n_skipped : int Number of items skipped. n_bytes_copied : int Number of bytes of data that were actually copied. Examples -------- Here's an example of copying a group named 'foo' from an HDF5 file to a Zarr group:: >>> import h5py >>> import zarr >>> import numpy as np >>> source = h5py.File('data/example.h5', mode='w') >>> foo = source.create_group('foo') >>> baz = foo.create_dataset('bar/baz', data=np.arange(100), chunks=(50,)) >>> spam = source.create_dataset('spam', data=np.arange(100, 200), chunks=(30,)) >>> zarr.tree(source) / ├── foo │ └── bar │ └── baz (100,) int64 └── spam (100,) int64 >>> dest = zarr.group() >>> from sys import stdout >>> zarr.copy(source['foo'], dest, log=stdout) copy /foo copy /foo/bar copy /foo/bar/baz (100,) int64 all done: 3 copied, 0 skipped, 800 bytes copied (3, 0, 800) >>> dest.tree() # N.B., no spam / └── foo └── bar └── baz (100,) int64 >>> source.close() The ``if_exists`` parameter provides options for how to handle pre-existing data in the destination. Here are some examples of these options, also using ``dry_run=True`` to find out what would happen without actually copying anything:: >>> source = zarr.group() >>> dest = zarr.group() >>> baz = source.create_dataset('foo/bar/baz', data=np.arange(100)) >>> spam = source.create_dataset('foo/spam', data=np.arange(1000)) >>> existing_spam = dest.create_dataset('foo/spam', data=np.arange(1000)) >>> from sys import stdout >>> try: ... zarr.copy(source['foo'], dest, log=stdout, dry_run=True) ... except zarr.CopyError as e: ... print(e) ... copy /foo copy /foo/bar copy /foo/bar/baz (100,) int64 an object 'spam' already exists in destination '/foo' >>> zarr.copy(source['foo'], dest, log=stdout, if_exists='replace', dry_run=True) copy /foo copy /foo/bar copy /foo/bar/baz (100,) int64 copy /foo/spam (1000,) int64 dry run: 4 copied, 0 skipped (4, 0, 0) >>> zarr.copy(source['foo'], dest, log=stdout, if_exists='skip', dry_run=True) copy /foo copy /foo/bar copy /foo/bar/baz (100,) int64 skip /foo/spam (1000,) int64 dry run: 3 copied, 1 skipped (3, 1, 0) Notes ----- Please note that this is an experimental feature. The behaviour of this function is still evolving and the default behaviour and/or parameters may change in future versions.
def draw_spectra(md, ds): """ Generate best-fit spectra for all the test objects Parameters ---------- md: model The Cannon spectral model ds: Dataset Dataset object Returns ------- best_fluxes: ndarray The best-fit test fluxes best_ivars: The best-fit test inverse variances """ coeffs_all, covs, scatters, red_chisqs, pivots, label_vector = model.model nstars = len(dataset.test_SNR) cannon_flux = np.zeros(dataset.test_flux.shape) cannon_ivar = np.zeros(dataset.test_ivar.shape) for i in range(nstars): x = label_vector[:,i,:] spec_fit = np.einsum('ij, ij->i', x, coeffs_all) cannon_flux[i,:] = spec_fit bad = dataset.test_ivar[i,:] == SMALL**2 cannon_ivar[i,:][~bad] = 1. / scatters[~bad] ** 2 return cannon_flux, cannon_ivar
Generate best-fit spectra for all the test objects Parameters ---------- md: model The Cannon spectral model ds: Dataset Dataset object Returns ------- best_fluxes: ndarray The best-fit test fluxes best_ivars: The best-fit test inverse variances
def all_with(self, x): """Returns all arguments containing given string (or list thereof)""" _args = [] for arg in self.all: if is_collection(x): for _x in x: if _x in arg: _args.append(arg) break else: if x in arg: _args.append(arg) return Args(_args, no_argv=True)
Returns all arguments containing given string (or list thereof)
def get_default_config(self): """ Return the default config for the handler """ config = super(ArchiveHandler, self).get_default_config() config.update({ 'log_file': '', 'when': 'midnight', 'days': 7, 'rollover_interval': 1, 'encoding': None, 'propagate': False, }) return config
Return the default config for the handler
def remove_direct_link_triples(train, valid, test): """Remove direct links in the training sets.""" pairs = set() merged = valid + test for t in merged: pairs.add((t.head, t.tail)) filtered = filterfalse(lambda t: (t.head, t.tail) in pairs or (t.tail, t.head) in pairs, train) return list(filtered)
Remove direct links in the training sets.
def load_dataframe(fobj, compression='gzip'): """Given an open file for `hip_main.dat.gz`, return a parsed dataframe. If your copy of ``hip_main.dat`` has already been unzipped, pass the optional argument ``compression=None``. """ try: from pandas import read_fwf except ImportError: raise ImportError(PANDAS_MESSAGE) names, colspecs = zip( ('hip', (2, 14)), ('magnitude', (41, 46)), ('ra_degrees', (51, 63)), ('dec_degrees', (64, 76)), ('parallax_mas', (79, 86)), # TODO: have Star load this ('ra_mas_per_year', (87, 95)), ('dec_mas_per_year', (96, 104)), ) df = read_fwf(fobj, colspecs, names=names, compression=compression) df = df.assign( ra_hours = df['ra_degrees'] / 15.0, epoch_year = 1991.25, ) return df.set_index('hip')
Given an open file for `hip_main.dat.gz`, return a parsed dataframe. If your copy of ``hip_main.dat`` has already been unzipped, pass the optional argument ``compression=None``.
def display_multi(annotations, fig_kw=None, meta=True, **kwargs): '''Display multiple annotations with shared axes Parameters ---------- annotations : jams.AnnotationArray A collection of annotations to display fig_kw : dict Keyword arguments to `plt.figure` meta : bool If `True`, display annotation metadata for each annotation kwargs Additional keyword arguments to the `mir_eval.display` routines Returns ------- fig The created figure axs List of subplot axes corresponding to each displayed annotation ''' if fig_kw is None: fig_kw = dict() fig_kw.setdefault('sharex', True) fig_kw.setdefault('squeeze', True) # Filter down to coercable annotations first display_annotations = [] for ann in annotations: for namespace in VIZ_MAPPING: if can_convert(ann, namespace): display_annotations.append(ann) break # If there are no displayable annotations, fail here if not len(display_annotations): raise ParameterError('No displayable annotations found') fig, axs = plt.subplots(nrows=len(display_annotations), ncols=1, **fig_kw) # MPL is stupid when making singleton subplots. # We catch this and make it always iterable. if len(display_annotations) == 1: axs = [axs] for ann, ax in zip(display_annotations, axs): kwargs['ax'] = ax display(ann, meta=meta, **kwargs) return fig, axs
Display multiple annotations with shared axes Parameters ---------- annotations : jams.AnnotationArray A collection of annotations to display fig_kw : dict Keyword arguments to `plt.figure` meta : bool If `True`, display annotation metadata for each annotation kwargs Additional keyword arguments to the `mir_eval.display` routines Returns ------- fig The created figure axs List of subplot axes corresponding to each displayed annotation
def get_instance(self, payload): """ Build an instance of ApplicationInstance :param dict payload: Payload response from the API :returns: twilio.rest.api.v2010.account.application.ApplicationInstance :rtype: twilio.rest.api.v2010.account.application.ApplicationInstance """ return ApplicationInstance(self._version, payload, account_sid=self._solution['account_sid'], )
Build an instance of ApplicationInstance :param dict payload: Payload response from the API :returns: twilio.rest.api.v2010.account.application.ApplicationInstance :rtype: twilio.rest.api.v2010.account.application.ApplicationInstance
def remove_pid_file(process_name): """ removes pid file """ pid_filename = get_pid_filename(process_name) try: os.remove(pid_filename) print('Removed pid file at: {0}'.format(pid_filename), file=sys.stdout) except Exception as e: print('Unable to remove pid file at: {0}, because of: {1}'.format(pid_filename, e), file=sys.stderr)
removes pid file
def from_path(cls, path): """Takes a path and returns a File object with the path as the PFN.""" urlparts = urlparse.urlsplit(path) site = 'nonlocal' if (urlparts.scheme == '' or urlparts.scheme == 'file'): if os.path.isfile(urlparts.path): path = os.path.abspath(urlparts.path) path = urlparse.urljoin('file:', urllib.pathname2url(path)) site = 'local' fil = File(os.path.basename(path)) fil.PFN(path, site) return fil
Takes a path and returns a File object with the path as the PFN.
def build_command(chunks): """ Create a command from various parts. The parts provided may include a base, flags, option-bound arguments, and positional arguments. Each element must be either a string or a two-tuple. Raw strings are interpreted as either the command base, a pre-joined pair (or multiple pairs) of option and argument, a series of positional arguments, or a combination of those elements. The only modification they undergo is trimming of any space characters from each end. :param Iterable[str | (str, str | NoneType)] chunks: the collection of the command components to interpret, modify, and join to create a single meaningful command :return str: the single meaningful command built from the given components :raise ValueError: if no command parts are provided """ if not chunks: raise ValueError( "No command parts: {} ({})".format(chunks, type(chunks))) if isinstance(chunks, str): return chunks parsed_pieces = [] for cmd_part in chunks: if cmd_part is None: continue try: # Trim just space, not all whitespace. # This prevents damage to an option that specifies, # say, tab as a delimiter. parsed_pieces.append(cmd_part.strip(" ")) except AttributeError: option, argument = cmd_part if argument is None or argument == "": continue option, argument = option.strip(" "), str(argument).strip(" ") parsed_pieces.append("{} {}".format(option, argument)) return " ".join(parsed_pieces)
Create a command from various parts. The parts provided may include a base, flags, option-bound arguments, and positional arguments. Each element must be either a string or a two-tuple. Raw strings are interpreted as either the command base, a pre-joined pair (or multiple pairs) of option and argument, a series of positional arguments, or a combination of those elements. The only modification they undergo is trimming of any space characters from each end. :param Iterable[str | (str, str | NoneType)] chunks: the collection of the command components to interpret, modify, and join to create a single meaningful command :return str: the single meaningful command built from the given components :raise ValueError: if no command parts are provided
def from_linearized(first, second, intersections): """Determine curve-curve intersection from pair of linearizations. .. note:: This assumes that at least one of ``first`` and ``second`` is not a line. The line-line case should be handled "early" by :func:`check_lines`. .. note:: This assumes the caller has verified that the bounding boxes for ``first`` and ``second`` actually intersect. If there is an intersection along the segments, adds that intersection to ``intersections``. Otherwise, returns without doing anything. Args: first (Linearization): First curve being intersected. second (Linearization): Second curve being intersected. intersections (list): A list of existing intersections. Raises: ValueError: If ``first`` and ``second`` both have linearization error of ``0.0`` (i.e. they are both lines). This is because this function expects the caller to have used :func:`check_lines` already. """ # NOTE: There is no corresponding "enable", but the disable only applies # in this lexical scope. # pylint: disable=too-many-return-statements s, t, success = segment_intersection( first.start_node, first.end_node, second.start_node, second.end_node ) bad_parameters = False if success: if not ( _helpers.in_interval(s, 0.0, 1.0) and _helpers.in_interval(t, 0.0, 1.0) ): bad_parameters = True else: if first.error == 0.0 and second.error == 0.0: raise ValueError(_UNHANDLED_LINES) # Just fall back to a Newton iteration starting in the middle of # the given intervals. bad_parameters = True s = 0.5 t = 0.5 if bad_parameters: # In the unlikely case that we have parallel segments or segments # that intersect outside of [0, 1] x [0, 1], we can still exit # if the convex hulls don't intersect. if not convex_hull_collide(first.curve.nodes, second.curve.nodes): return # Now, promote ``s`` and ``t`` onto the original curves. orig_s = (1 - s) * first.curve.start + s * first.curve.end orig_t = (1 - t) * second.curve.start + t * second.curve.end refined_s, refined_t = _intersection_helpers.full_newton( orig_s, first.curve.original_nodes, orig_t, second.curve.original_nodes ) refined_s, success = _helpers.wiggle_interval(refined_s) if not success: return refined_t, success = _helpers.wiggle_interval(refined_t) if not success: return add_intersection(refined_s, refined_t, intersections)
Determine curve-curve intersection from pair of linearizations. .. note:: This assumes that at least one of ``first`` and ``second`` is not a line. The line-line case should be handled "early" by :func:`check_lines`. .. note:: This assumes the caller has verified that the bounding boxes for ``first`` and ``second`` actually intersect. If there is an intersection along the segments, adds that intersection to ``intersections``. Otherwise, returns without doing anything. Args: first (Linearization): First curve being intersected. second (Linearization): Second curve being intersected. intersections (list): A list of existing intersections. Raises: ValueError: If ``first`` and ``second`` both have linearization error of ``0.0`` (i.e. they are both lines). This is because this function expects the caller to have used :func:`check_lines` already.
def get_indirect_url_lock_list(self, url, principal=None): """Return a list of valid lockDicts, that protect <path> directly or indirectly. If a principal is given, only locks owned by this principal are returned. Side effect: expired locks for this path and all parents are purged. """ url = normalize_lock_root(url) lockList = [] u = url while u: ll = self.storage.get_lock_list( u, include_root=True, include_children=False, token_only=False ) for l in ll: if u != url and l["depth"] != "infinity": continue # We only consider parents with Depth: infinity # TODO: handle shared locks in some way? # if (l["scope"] == "shared" and lock_scope == "shared" # and principal != l["principal"]): # continue # Only compatible with shared locks by other users if principal is None or principal == l["principal"]: lockList.append(l) u = util.get_uri_parent(u) return lockList
Return a list of valid lockDicts, that protect <path> directly or indirectly. If a principal is given, only locks owned by this principal are returned. Side effect: expired locks for this path and all parents are purged.
def generate_np(self, x, **kwargs): """ Generate adversarial images in a for loop. :param y: An array of shape (n, nb_classes) for true labels. :param y_target: An array of shape (n, nb_classes) for target labels. Required for targeted attack. :param image_target: An array of shape (n, **image shape) for initial target images. Required for targeted attack. See parse_params for other kwargs. """ x_adv = [] if 'image_target' in kwargs and kwargs['image_target'] is not None: image_target = np.copy(kwargs['image_target']) else: image_target = None if 'y_target' in kwargs and kwargs['y_target'] is not None: y_target = np.copy(kwargs['y_target']) else: y_target = None for i, x_single in enumerate(x): img = np.expand_dims(x_single, axis=0) if image_target is not None: single_img_target = np.expand_dims(image_target[i], axis=0) kwargs['image_target'] = single_img_target if y_target is not None: single_y_target = np.expand_dims(y_target[i], axis=0) kwargs['y_target'] = single_y_target adv_img = super(BoundaryAttackPlusPlus, self).generate_np(img, **kwargs) x_adv.append(adv_img) return np.concatenate(x_adv, axis=0)
Generate adversarial images in a for loop. :param y: An array of shape (n, nb_classes) for true labels. :param y_target: An array of shape (n, nb_classes) for target labels. Required for targeted attack. :param image_target: An array of shape (n, **image shape) for initial target images. Required for targeted attack. See parse_params for other kwargs.
def dec_ptr(self, ptr): """Get previous circular buffer data pointer.""" result = ptr - self.reading_len[self.ws_type] if result < self.data_start: result = 0x10000 - self.reading_len[self.ws_type] return result
Get previous circular buffer data pointer.
def bash(filename): """Runs a bash script in the local directory""" sys.stdout.flush() subprocess.call("bash {}".format(filename), shell=True)
Runs a bash script in the local directory
def get_group(self, t, i): """Get group number.""" try: value = [] if t in _DIGIT and t != '0': value.append(t) t = next(i) if t in _DIGIT: value.append(t) else: i.rewind(1) except StopIteration: pass return ''.join(value) if value else None
Get group number.
def _walk_tree(self, data, scheme, ancestors=None, property_name=None, prefix=None): """ This function takes configuration data and a validation scheme then walk the configuration tree validating the configuraton data agenst the scheme provided. Will raise error on failure otherwise return None. Usage:: >>> self._walk_tree( >>> OrderedDict([('root', config_data)]), >>> registries, >>> REGISTRIES_SCHEME >>> ) :param ancestors: A :OrderedDict:, The first element of the dict must be 'root'. :param data: The data that needs to be validated agents the scheme. :param scheme: A :dict:, The scheme defining the validations. :param property_name: A :string:, This is the name of the data getting validated. :param prefix: :rtype: :None: will raise error if a validation fails. """ if property_name is None: property_name = 'root' # hack until i add this to references # reorder validates putting required first. If the data doesn't exist there is no need to continue. order = ['registries'] + [key for key in scheme.keys() if key not in ('registries',)] scheme = OrderedDict(sorted(scheme.items(), key=lambda x: order.index(x[0]))) if data is None: return elif not isinstance(property_name, six.string_types): raise TypeError('property_name must be a string.') ancestors = self._update_ancestors(data, property_name, ancestors) if isinstance(ancestors, OrderedDict): if list(ancestors)[0] != 'root': raise LookupError('root must be the first item in ancestors.') else: raise TypeError('ancestors must be an OrderedDict. {0} was passed'.format(type(ancestors))) if not isinstance(scheme, dict): raise TypeError('scheme must be a dict. {0} was passed.'.format(type(scheme))) scheme = self._update_scheme(scheme, ancestors) if property_name is not None and data: data = self._get_cascading_attr( property_name, *list(ancestors)[1:] ) if scheme.get('cascading', False) else data for err in self.__execute_validations(scheme.get('is', {}), data, property_name, ancestors, prefix=prefix): if err: raise err else: self._create_attr(property_name, data, ancestors) self.__validate_unrecognized_values(scheme, data, ancestors, prefix) self.__populate_scheme_references(scheme, property_name) self.__validate_config_properties(scheme, data, ancestors, prefix)
This function takes configuration data and a validation scheme then walk the configuration tree validating the configuraton data agenst the scheme provided. Will raise error on failure otherwise return None. Usage:: >>> self._walk_tree( >>> OrderedDict([('root', config_data)]), >>> registries, >>> REGISTRIES_SCHEME >>> ) :param ancestors: A :OrderedDict:, The first element of the dict must be 'root'. :param data: The data that needs to be validated agents the scheme. :param scheme: A :dict:, The scheme defining the validations. :param property_name: A :string:, This is the name of the data getting validated. :param prefix: :rtype: :None: will raise error if a validation fails.
def _add_arguments(param, parser, used_char_args, add_nos): ''' Add the argument(s) to an ArgumentParser (using add_argument) for a given parameter. used_char_args is the set of -short options currently already in use, and is updated (if necessary) by this function. If add_nos is True, this will also add an inverse switch for all boolean options. For instance, for the boolean parameter "verbose", this will create --verbose and --no-verbose. ''' # Impl note: This function is kept separate from make_parser because it's # already very long and I wanted to separate out as much as possible into # its own call scope, to prevent even the possibility of suble mutation # bugs. if param.kind is param.POSITIONAL_ONLY: raise PositionalArgError(param) elif param.kind is param.VAR_KEYWORD: raise KWArgError(param) # These are the kwargs for the add_argument function. arg_spec = {} is_option = False # Get the type and default from the annotation. arg_type, description = _get_type_description(param.annotation) # Get the default value default = param.default # If there is no explicit type, and the default is present and not None, # infer the type from the default. if arg_type is None and default not in {_empty, None}: arg_type = type(default) # Add default. The presence of a default means this is an option, not an # argument. if default is not _empty: arg_spec['default'] = default is_option = True # Add the type if arg_type is not None: # Special case for bool: make it just a --switch if arg_type is bool: if not default or default is _empty: arg_spec['action'] = 'store_true' else: arg_spec['action'] = 'store_false' # Switches are always options is_option = True # Special case for file types: make it a string type, for filename elif isinstance(default, IOBase): arg_spec['type'] = str # TODO: special case for list type. # - How to specificy type of list members? # - param: [int] # - param: int =[] # - action='append' vs nargs='*' else: arg_spec['type'] = arg_type # nargs: if the signature includes *args, collect them as trailing CLI # arguments in a list. *args can't have a default value, so it can never be # an option. if param.kind is param.VAR_POSITIONAL: # TODO: consider depluralizing metavar/name here. arg_spec['nargs'] = '*' # Add description. if description is not None: arg_spec['help'] = description # Get the --flags flags = [] name = param.name if is_option: # Add the first letter as a -short option. for letter in name[0], name[0].swapcase(): if letter not in used_char_args: used_char_args.add(letter) flags.append('-{}'.format(letter)) break # If the parameter is a --long option, or is a -short option that # somehow failed to get a flag, add it. if len(name) > 1 or not flags: flags.append('--{}'.format(name)) arg_spec['dest'] = name else: flags.append(name) parser.add_argument(*flags, **arg_spec) # Create the --no- version for boolean switches if add_nos and arg_type is bool: parser.add_argument( '--no-{}'.format(name), action='store_const', dest=name, const=default if default is not _empty else False)
Add the argument(s) to an ArgumentParser (using add_argument) for a given parameter. used_char_args is the set of -short options currently already in use, and is updated (if necessary) by this function. If add_nos is True, this will also add an inverse switch for all boolean options. For instance, for the boolean parameter "verbose", this will create --verbose and --no-verbose.
def _build_default_options(self): """"Provide the default value for all allowed fields. Custom FactoryOptions classes should override this method to update() its return value. """ return [ OptionDefault('model', None, inherit=True), OptionDefault('abstract', False, inherit=False), OptionDefault('strategy', enums.CREATE_STRATEGY, inherit=True), OptionDefault('inline_args', (), inherit=True), OptionDefault('exclude', (), inherit=True), OptionDefault('rename', {}, inherit=True), ]
Provide the default value for all allowed fields. Custom FactoryOptions classes should override this method to update() its return value.
def display(contents, domain=DEFAULT_DOMAIN, force_gist=False): """ Open a web browser pointing to geojson.io with the specified content. If the content is large, an anonymous gist will be created on github and the URL will instruct geojson.io to download the gist data and then display. If the content is small, this step is not needed as the data can be included in the URL Parameters ---------- content - (see make_geojson) domain - string, default http://geojson.io force_gist - bool, default False Create an anonymous gist on Github regardless of the size of the contents """ url = make_url(contents, domain, force_gist) webbrowser.open(url) return url
Open a web browser pointing to geojson.io with the specified content. If the content is large, an anonymous gist will be created on github and the URL will instruct geojson.io to download the gist data and then display. If the content is small, this step is not needed as the data can be included in the URL Parameters ---------- content - (see make_geojson) domain - string, default http://geojson.io force_gist - bool, default False Create an anonymous gist on Github regardless of the size of the contents
def posterior(self, x, s=1.): """Model is X_1,...,X_n ~ N(theta, s^2), theta~self, s fixed""" pr0 = 1. / self.sigma**2 # prior precision prd = x.size / s**2 # data precision varp = 1. / (pr0 + prd) # posterior variance mu = varp * (pr0 * self.mu + prd * x.mean()) return TruncNormal(mu=mu, sigma=np.sqrt(varp), a=self.a, b=self.b)
Model is X_1,...,X_n ~ N(theta, s^2), theta~self, s fixed
def stat_file(self, id=None, path="/"): """ Stat a file in an allocation directory. https://www.nomadproject.io/docs/http/client-fs-stat.html arguments: - id - path returns: dict raises: - nomad.api.exceptions.BaseNomadException - nomad.api.exceptions.URLNotFoundNomadException """ if id: return self.request(id, params={"path": path}, method="get").json() else: return self.request(params={"path": path}, method="get").json()
Stat a file in an allocation directory. https://www.nomadproject.io/docs/http/client-fs-stat.html arguments: - id - path returns: dict raises: - nomad.api.exceptions.BaseNomadException - nomad.api.exceptions.URLNotFoundNomadException
def close(self): """Close open resources.""" self._close_proc() super(PipeReader, self).close() if self._tempfile: try: os.unlink(self._tempfile) except OSError: pass self._tempfile = None
Close open resources.
def from_dict(d: Dict[str, Any]) -> 'CoverageInstructions': """ Loads a set of coverage instructions from a given dictionary. Raises: BadCoverageInstructions: if the given coverage instructions are illegal. """ name_type = d['type'] cls = _NAME_TO_INSTRUCTIONS[name_type] return cls.from_dict(d)
Loads a set of coverage instructions from a given dictionary. Raises: BadCoverageInstructions: if the given coverage instructions are illegal.
def iter_list_market_profit_and_loss( self, market_ids, chunk_size, **kwargs): """Split call to `list_market_profit_and_loss` into separate requests. :param list market_ids: List of market IDs :param int chunk_size: Number of records per chunk :param dict kwargs: Arguments passed to `list_market_profit_and_loss` """ return itertools.chain(*( self.list_market_profit_and_loss(market_chunk, **kwargs) for market_chunk in utils.get_chunks(market_ids, chunk_size) ))
Split call to `list_market_profit_and_loss` into separate requests. :param list market_ids: List of market IDs :param int chunk_size: Number of records per chunk :param dict kwargs: Arguments passed to `list_market_profit_and_loss`
def branch(self): """ Get a flattened representation of the branch. @return: A flat list of nodes. @rtype: [L{Element},..] """ branch = [self] for c in self.children: branch += c.branch() return branch
Get a flattened representation of the branch. @return: A flat list of nodes. @rtype: [L{Element},..]
def calculate_derivative_P(self, P, T, zs, ws, method, order=1): r'''Method to calculate a derivative of a mixture property with respect to pressure at constant temperature and composition of a given order using a specified method. Uses SciPy's derivative function, with a delta of 0.01 Pa and a number of points equal to 2*order + 1. This method can be overwritten by subclasses who may perfer to add analytical methods for some or all methods as this is much faster. If the calculation does not succeed, returns the actual error encountered. Parameters ---------- P : float Pressure at which to calculate the derivative, [Pa] T : float Temperature at which to calculate the derivative, [K] zs : list[float] Mole fractions of all species in the mixture, [-] ws : list[float] Weight fractions of all species in the mixture, [-] method : str Method for which to find the derivative order : int Order of the derivative, >= 1 Returns ------- d_prop_d_P_at_T : float Calculated derivative property at constant temperature, [`units/Pa^order`] ''' f = lambda P: self.calculate(T, P, zs, ws, method) return derivative(f, P, dx=1e-2, n=order, order=1+order*2)
r'''Method to calculate a derivative of a mixture property with respect to pressure at constant temperature and composition of a given order using a specified method. Uses SciPy's derivative function, with a delta of 0.01 Pa and a number of points equal to 2*order + 1. This method can be overwritten by subclasses who may perfer to add analytical methods for some or all methods as this is much faster. If the calculation does not succeed, returns the actual error encountered. Parameters ---------- P : float Pressure at which to calculate the derivative, [Pa] T : float Temperature at which to calculate the derivative, [K] zs : list[float] Mole fractions of all species in the mixture, [-] ws : list[float] Weight fractions of all species in the mixture, [-] method : str Method for which to find the derivative order : int Order of the derivative, >= 1 Returns ------- d_prop_d_P_at_T : float Calculated derivative property at constant temperature, [`units/Pa^order`]
def create(model_config, reinforcer, optimizer, storage, total_frames, batches_per_epoch, callbacks=None, scheduler=None, openai_logging=False): """ Vel factory function """ from vel.openai.baselines import logger logger.configure(dir=model_config.openai_dir()) return RlTrainCommand( model_config=model_config, reinforcer=reinforcer, optimizer_factory=optimizer, scheduler_factory=scheduler, storage=storage, callbacks=callbacks, total_frames=int(float(total_frames)), batches_per_epoch=int(batches_per_epoch), openai_logging=openai_logging )
Vel factory function
def _check_custom_url_parameters(self): """Checks if custom url parameters are valid parameters. Throws ValueError if the provided parameter is not a valid parameter. """ for param in self.custom_url_params.keys(): if param is not CustomUrlParam.TRANSPARENT: raise ValueError('Parameter {} is currently not supported.'.format(param))
Checks if custom url parameters are valid parameters. Throws ValueError if the provided parameter is not a valid parameter.
def __neuron_evolution(self, index): """! @brief Calculates state of the neuron with specified index. @param[in] index (uint): Index of neuron in the network. @return (double) New output of the specified neuron. """ value = 0.0 for index_neighbor in range(self.__num_osc): value += self.__weights[index][index_neighbor] * (1.0 - 2.0 * (self.__output[index_neighbor] ** 2)) return value / self.__weights_summary[index]
! @brief Calculates state of the neuron with specified index. @param[in] index (uint): Index of neuron in the network. @return (double) New output of the specified neuron.
def __generate_file(self, template_filename, context, generated_filename, force=False): """ Generate **one** (source code) file from a template. The file is **only** generated if needed, i.e. if ``force`` is set to ``True`` or if generated file is older than the template file. The generated file is written in the same directory as the template file. Args: template_filename (str): **Absolute** filename of a template file to translate. context (dict): Dictionary with ``(key, val)`` replacements. generated_filename (str): **Absolute** filename of the generated file filename. force (bool): If set to ``True``, file is generated no matter what. """ # TODO: maybe avoid reading same template file again and again... i.e. parse it once and generate all needed files without reparsing the template. # test if file is non existing or needs to be regenerated if force or (not os.path.isfile(generated_filename) or os.stat(template_filename).st_mtime - os.stat(generated_filename).st_mtime > 1): self.log_info(' Parsing file %s' % template_filename) code_generated = self.__jinja2_environment.get_template(template_filename).render(context) with open(generated_filename, 'w') as f: self.log_info(' Generating file %s' % generated_filename) f.write(code_generated.encode('utf8'))
Generate **one** (source code) file from a template. The file is **only** generated if needed, i.e. if ``force`` is set to ``True`` or if generated file is older than the template file. The generated file is written in the same directory as the template file. Args: template_filename (str): **Absolute** filename of a template file to translate. context (dict): Dictionary with ``(key, val)`` replacements. generated_filename (str): **Absolute** filename of the generated file filename. force (bool): If set to ``True``, file is generated no matter what.
def unstructure_attrs_astuple(self, obj): # type: (Any) -> Tuple """Our version of `attrs.astuple`, so we can call back to us.""" attrs = obj.__class__.__attrs_attrs__ return tuple(self.unstructure(getattr(obj, a.name)) for a in attrs)
Our version of `attrs.astuple`, so we can call back to us.
def _format_background(background): """Formats the background section :param background: the background content or file. :type background: str or file :returns: the background content. :rtype: str """ # Getting the background if os.path.isfile(background): with open(background, "r") as i_file: background = i_file.read().splitlines() else: background = background.splitlines() # Formatting final_background = "" for line in background: if line == "": final_background += r"\\" + "\n\n" continue final_background += latex.wrap_lines(latex.sanitize_tex(line)) return final_background
Formats the background section :param background: the background content or file. :type background: str or file :returns: the background content. :rtype: str
def make_describe_attrs(self): """Create tokens for setting is_noy_spec on describes""" lst = [] if self.all_groups: lst.append((NEWLINE, '\n')) lst.append((INDENT, '')) for group in self.all_groups: if group.name: lst.extend(self.tokens.make_describe_attr(group.kls_name)) return lst
Create tokens for setting is_noy_spec on describes