code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def dihedral(x, dih): """ Perform any of 8 permutations of 90-degrees rotations or flips for image x. """ x = np.rot90(x, dih%4) return x if dih<4 else np.fliplr(x)
Perform any of 8 permutations of 90-degrees rotations or flips for image x.
def get_bounding_box(self): """ Get the bounding box of a pointlist. """ pointlist = self.get_pointlist() # Initialize bounding box parameters to save values minx, maxx = pointlist[0][0]["x"], pointlist[0][0]["x"] miny, maxy = pointlist[0][0]["y"], pointlist[0][0]["y"] mint, maxt = pointlist[0][0]["time"], pointlist[0][0]["time"] # Adjust parameters for stroke in pointlist: for p in stroke: minx, maxx = min(minx, p["x"]), max(maxx, p["x"]) miny, maxy = min(miny, p["y"]), max(maxy, p["y"]) mint, maxt = min(mint, p["time"]), max(maxt, p["time"]) return {"minx": minx, "maxx": maxx, "miny": miny, "maxy": maxy, "mint": mint, "maxt": maxt}
Get the bounding box of a pointlist.
def box_model_domain(num_points=2, **kwargs): """Creates a box model domain (a single abstract axis). :param int num_points: number of boxes [default: 2] :returns: Domain with single axis of type ``'abstract'`` and ``self.domain_type = 'box'`` :rtype: :class:`_Domain` :Example: :: >>> from climlab import domain >>> box = domain.box_model_domain(num_points=2) >>> print box climlab Domain object with domain_type=box and shape=(2,) """ ax = Axis(axis_type='abstract', num_points=num_points) boxes = _Domain(axes=ax, **kwargs) boxes.domain_type = 'box' return boxes
Creates a box model domain (a single abstract axis). :param int num_points: number of boxes [default: 2] :returns: Domain with single axis of type ``'abstract'`` and ``self.domain_type = 'box'`` :rtype: :class:`_Domain` :Example: :: >>> from climlab import domain >>> box = domain.box_model_domain(num_points=2) >>> print box climlab Domain object with domain_type=box and shape=(2,)
def get_key_from_envs(envs, key): """Return the value of a key from the given dict respecting namespaces. Data can also be a list of data dicts. """ # if it barks like a dict, make it a list have to use `get` since dicts and # lists both have __getitem__ if hasattr(envs, 'get'): envs = [envs] for env in envs: if key in env: return env[key] return NO_VALUE
Return the value of a key from the given dict respecting namespaces. Data can also be a list of data dicts.
def vcenter_discovery_ignore_delete_all_response_ignore_value(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") vcenter = ET.SubElement(config, "vcenter", xmlns="urn:brocade.com:mgmt:brocade-vswitch") id_key = ET.SubElement(vcenter, "id") id_key.text = kwargs.pop('id') discovery = ET.SubElement(vcenter, "discovery") ignore_delete_all_response = ET.SubElement(discovery, "ignore-delete-all-response") ignore_value = ET.SubElement(ignore_delete_all_response, "ignore-value") ignore_value.text = kwargs.pop('ignore_value') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def htmlTable(tableData, reads1, reads2, square, matchAmbiguous, colors, concise=False, showLengths=False, showGaps=False, showNs=False, footer=False, div=False, gapChars='-'): """ Make an HTML table showing inter-sequence distances. @param tableData: A C{defaultdict(dict)} keyed by read ids, whose values are the dictionaries returned by compareDNAReads. @param reads1: An C{OrderedDict} of C{str} read ids whose values are C{Read} instances. These will be the rows of the table. @param reads2: An C{OrderedDict} of C{str} read ids whose values are C{Read} instances. These will be the columns of the table. @param square: If C{True} we are making a square table of a set of sequences against themselves (in which case we show nothing on the diagonal). @param matchAmbiguous: If C{True}, count ambiguous nucleotides that are possibly correct as actually being correct. Otherwise, we are strict and insist that only non-ambiguous nucleotides can contribute to the matching nucleotide count. @param colors: A C{list} of (threshold, color) tuples, where threshold is a C{float} and color is a C{str} to be used as a cell background. This is as returned by C{parseColors}. @param concise: If C{True}, do not show match details. @param showLengths: If C{True}, include the lengths of sequences. @param showGaps: If C{True}, include the number of gaps in sequences. @param showGaps: If C{True}, include the number of N characters in sequences. @param footer: If C{True}, incude a footer row giving the same information as found in the table header. @param div: If C{True}, return an HTML <div> fragment only, not a full HTML document. @param gapChars: A C{str} of sequence characters considered to be gaps. @return: An HTML C{str} showing inter-sequence distances. """ readLengths1 = getReadLengths(reads1.values(), gapChars) readLengths2 = getReadLengths(reads2.values(), gapChars) result = [] append = result.append def writeHeader(): # The header row of the table. append(' <tr>') append(' <td>&nbsp;</td>') for read2 in reads2.values(): append(' <td class="title"><span class="name">%s</span>' % read2.id) if showLengths and not square: append(' <br>L:%d' % readLengths2[read2.id]) if showGaps and not square: append(' <br>G:%d' % (len(read2) - readLengths2[read2.id])) if showNs and not square: append(' <br>N:%d' % read2.sequence.count('N')) append(' </td>') append(' </tr>') if div: append('<div>') else: append('<!DOCTYPE HTML>') append('<html>') append('<head>') append('<meta charset="UTF-8">') append('</head>') append('<body>') append('<style>') append(""" table { border-collapse: collapse; } table, td { border: 1px solid #ccc; } tr:hover { background-color: #f2f2f2; } td { vertical-align: top; font-size: 14px; } span.name { font-weight: bold; } span.best { font-weight: bold; } """) # Add color style information for the identity thresholds. for threshold, color in colors: append('.%s { background-color: %s; }' % ( thresholdToCssName(threshold), color)) append('</style>') if not div: append(explanation( matchAmbiguous, concise, showLengths, showGaps, showNs)) append('<div style="overflow-x:auto;">') append('<table>') append(' <tbody>') # Pre-process to find the best identities in each sample row. bestIdentityForId = {} for id1, read1 in reads1.items(): # Look for best identity for the sample. read1Len = readLengths1[id1] bestIdentity = -1.0 for id2, read2 in reads2.items(): if id1 != id2 or not square: stats = tableData[id1][id2] identity = ( stats['identicalMatchCount'] + (stats['ambiguousMatchCount'] if matchAmbiguous else 0) ) / read1Len if identity > bestIdentity: bestIdentity = identity bestIdentityForId[id1] = bestIdentity writeHeader() # The main body of the table. for id1, read1 in reads1.items(): read1Len = readLengths1[id1] append(' <tr>') append(' <td class="title"><span class="name">%s</span>' % id1) if showLengths: append('<br/>L:%d' % read1Len) if showGaps: append('<br/>G:%d' % (len(read1) - read1Len)) if showNs: append('<br/>N:%d' % read1.sequence.count('N')) append('</td>') for id2, read2 in reads2.items(): if id1 == id2 and square: append('<td>&nbsp;</td>') continue stats = tableData[id1][id2] identity = ( stats['identicalMatchCount'] + (stats['ambiguousMatchCount'] if matchAmbiguous else 0) ) / read1Len append(' <td class="%s">' % thresholdToCssName( thresholdForIdentity(identity, colors))) # The maximum percent identity. if identity == bestIdentityForId[id1]: scoreStyle = ' class="best"' else: scoreStyle = '' append('<span%s>%.4f</span>' % (scoreStyle, identity)) if not concise: append('<br/>IM:%d' % stats['identicalMatchCount']) if matchAmbiguous: append('<br/>AM:%d' % stats['ambiguousMatchCount']) append( '<br/>GG:%d' '<br/>G?:%d' '<br/>NE:%d' % (stats['gapGapMismatchCount'], stats['gapMismatchCount'], stats['nonGapMismatchCount'])) append(' </td>') append(' </tr>') if footer: writeHeader() append(' </tbody>') append('</table>') append('</div>') if div: append('</div>') else: append('</body>') append('</html>') return '\n'.join(result)
Make an HTML table showing inter-sequence distances. @param tableData: A C{defaultdict(dict)} keyed by read ids, whose values are the dictionaries returned by compareDNAReads. @param reads1: An C{OrderedDict} of C{str} read ids whose values are C{Read} instances. These will be the rows of the table. @param reads2: An C{OrderedDict} of C{str} read ids whose values are C{Read} instances. These will be the columns of the table. @param square: If C{True} we are making a square table of a set of sequences against themselves (in which case we show nothing on the diagonal). @param matchAmbiguous: If C{True}, count ambiguous nucleotides that are possibly correct as actually being correct. Otherwise, we are strict and insist that only non-ambiguous nucleotides can contribute to the matching nucleotide count. @param colors: A C{list} of (threshold, color) tuples, where threshold is a C{float} and color is a C{str} to be used as a cell background. This is as returned by C{parseColors}. @param concise: If C{True}, do not show match details. @param showLengths: If C{True}, include the lengths of sequences. @param showGaps: If C{True}, include the number of gaps in sequences. @param showGaps: If C{True}, include the number of N characters in sequences. @param footer: If C{True}, incude a footer row giving the same information as found in the table header. @param div: If C{True}, return an HTML <div> fragment only, not a full HTML document. @param gapChars: A C{str} of sequence characters considered to be gaps. @return: An HTML C{str} showing inter-sequence distances.
def output_dict(self): """Get dictionary representation of output arrays. Returns ------- output_dict : dict of str to NDArray The dictionary that maps name of output names to NDArrays. Raises ------ ValueError : if there are duplicated names in the outputs. """ if self._output_dict is None: self._output_dict = Executor._get_dict( self._symbol.list_outputs(), self.outputs) return self._output_dict
Get dictionary representation of output arrays. Returns ------- output_dict : dict of str to NDArray The dictionary that maps name of output names to NDArrays. Raises ------ ValueError : if there are duplicated names in the outputs.
def monitor(self): """Commit this batch after sufficient time has elapsed. This simply sleeps for ``self._settings.max_latency`` seconds, and then calls commit unless the batch has already been committed. """ # NOTE: This blocks; it is up to the calling code to call it # in a separate thread. # Sleep for however long we should be waiting. time.sleep(self._settings.max_latency) _LOGGER.debug("Monitor is waking up") return self._commit()
Commit this batch after sufficient time has elapsed. This simply sleeps for ``self._settings.max_latency`` seconds, and then calls commit unless the batch has already been committed.
def get_file_range(ase, offsets, timeout=None): # type: (blobxfer.models.azure.StorageEntity, # blobxfer.models.download.Offsets, int) -> bytes """Retrieve file range :param blobxfer.models.azure.StorageEntity ase: Azure StorageEntity :param blobxfer.models.download.Offsets offsets: download offsets :param int timeout: timeout :rtype: bytes :return: content for file range """ dir, fpath, _ = parse_file_path(ase.name) return ase.client._get_file( share_name=ase.container, directory_name=dir, file_name=fpath, start_range=offsets.range_start, end_range=offsets.range_end, validate_content=False, # HTTPS takes care of integrity during xfer timeout=timeout, snapshot=ase.snapshot, ).content
Retrieve file range :param blobxfer.models.azure.StorageEntity ase: Azure StorageEntity :param blobxfer.models.download.Offsets offsets: download offsets :param int timeout: timeout :rtype: bytes :return: content for file range
def from_image(cls, image, partname): """ Return an |ImagePart| instance newly created from *image* and assigned *partname*. """ return ImagePart(partname, image.content_type, image.blob, image)
Return an |ImagePart| instance newly created from *image* and assigned *partname*.
def checkPidFile(pidfile): """ mostly comes from _twistd_unix.py which is not twisted public API :-/ except it returns an exception instead of exiting """ if os.path.exists(pidfile): try: with open(pidfile) as f: pid = int(f.read()) except ValueError: raise ValueError('Pidfile {} contains non-numeric value'.format(pidfile)) try: os.kill(pid, 0) except OSError as why: if why.errno == errno.ESRCH: # The pid doesn't exist. print('Removing stale pidfile {}'.format(pidfile)) os.remove(pidfile) else: raise OSError("Can't check status of PID {} from pidfile {}: {}".format( pid, pidfile, why)) else: raise BusyError("'{}' exists - is this master still running?".format(pidfile))
mostly comes from _twistd_unix.py which is not twisted public API :-/ except it returns an exception instead of exiting
def privmsg(self, target, message, nowait=False): """send a privmsg to target""" if message: messages = utils.split_message(message, self.config.max_length) if isinstance(target, DCCChat): for message in messages: target.send_line(message) elif target: f = None for message in messages: f = self.send_line('PRIVMSG %s :%s' % (target, message), nowait=nowait) return f
send a privmsg to target
def get_template_setting(template_key, default=None): """ Read template settings """ templates_var = getattr(settings, 'TEMPLATES', None) if templates_var: for tdict in templates_var: if template_key in tdict: return tdict[template_key] return default
Read template settings
def export(self): """ Returns a representation of the Mechanism Name which is suitable for direct string comparison against other exported Mechanism Names. Its form is defined in the GSSAPI specification (RFC 2743). It can also be re-imported by constructing a :class:`Name` with the `name_type` param set to :const:`gssapi.C_NT_EXPORT_NAME`. :returns: an exported bytestring representation of this mechanism name :rtype: bytes """ minor_status = ffi.new('OM_uint32[1]') output_buffer = ffi.new('gss_buffer_desc[1]') retval = C.gss_export_name( minor_status, self._name[0], output_buffer ) try: if GSS_ERROR(retval): if minor_status[0] and self._mech_type: raise _exception_for_status(retval, minor_status[0], self._mech_type) else: raise _exception_for_status(retval, minor_status[0]) return _buf_to_str(output_buffer[0]) finally: if output_buffer[0].length != 0: C.gss_release_buffer(minor_status, output_buffer)
Returns a representation of the Mechanism Name which is suitable for direct string comparison against other exported Mechanism Names. Its form is defined in the GSSAPI specification (RFC 2743). It can also be re-imported by constructing a :class:`Name` with the `name_type` param set to :const:`gssapi.C_NT_EXPORT_NAME`. :returns: an exported bytestring representation of this mechanism name :rtype: bytes
def check_marginal_likelihoods(tree, feature): """ Sanity check: combined bottom-up and top-down likelihood of each node of the tree must be the same. :param tree: ete3.Tree, the tree of interest :param feature: str, character for which the likelihood is calculated :return: void, stores the node marginal likelihoods in the get_personalised_feature_name(feature, LH) feature. """ lh_feature = get_personalized_feature_name(feature, LH) lh_sf_feature = get_personalized_feature_name(feature, LH_SF) for node in tree.traverse(): if not node.is_root() and not (node.is_leaf() and node.dist == 0): node_loglh = np.log10(getattr(node, lh_feature).sum()) - getattr(node, lh_sf_feature) parent_loglh = np.log10(getattr(node.up, lh_feature).sum()) - getattr(node.up, lh_sf_feature) assert (round(node_loglh, 2) == round(parent_loglh, 2))
Sanity check: combined bottom-up and top-down likelihood of each node of the tree must be the same. :param tree: ete3.Tree, the tree of interest :param feature: str, character for which the likelihood is calculated :return: void, stores the node marginal likelihoods in the get_personalised_feature_name(feature, LH) feature.
def play_beat( self, frequencys, play_time, sample_rate=44100, volume=0.01 ): ''' 引数で指定した条件でビートを鳴らす Args: frequencys: (左の周波数(Hz), 右の周波数(Hz))のtuple play_time: 再生時間(秒) sample_rate: サンプルレート volume: 音量 Returns: void ''' # 依存するライブラリの基底オブジェクト audio = pyaudio.PyAudio() # ストリーム stream = audio.open( format=pyaudio.paFloat32, channels=2, rate=sample_rate, output=1 ) left_frequency, right_frequency = frequencys left_chunk = self.__create_chunk(left_frequency, play_time, sample_rate) right_chunk = self.__create_chunk(right_frequency, play_time, sample_rate) self.write_stream(stream, left_chunk, right_chunk, volume) stream.stop_stream() stream.close() audio.terminate()
引数で指定した条件でビートを鳴らす Args: frequencys: (左の周波数(Hz), 右の周波数(Hz))のtuple play_time: 再生時間(秒) sample_rate: サンプルレート volume: 音量 Returns: void
def combination_step(self): """Update auxiliary state by a smart combination of previous updates in the frequency domain (standard FISTA :cite:`beck-2009-fast`). """ # Update t step tprv = self.t self.t = 0.5 * float(1. + np.sqrt(1. + 4. * tprv**2)) # Update Y if not self.opt['FastSolve']: self.Yfprv = self.Yf.copy() self.Yf = self.Xf + ((tprv - 1.) / self.t) * (self.Xf - self.Xfprv)
Update auxiliary state by a smart combination of previous updates in the frequency domain (standard FISTA :cite:`beck-2009-fast`).
def agg_grid(grid, agg=None): """ Many functions return a 2d list with a complex data type in each cell. For instance, grids representing environments have a set of resources, while reading in multiple data files at once will yield a list containing the values for that cell from each file. In order to visualize these data types it is helpful to summarize the more complex data types with a single number. For instance, you might want to take the length of a resource set to see how many resource types are present. Alternately, you might want to take the mode of a list to see the most common phenotype in a cell. This function facilitates this analysis by calling the given aggregation function (agg) on each cell of the given grid and returning the result. agg - A function indicating how to summarize grid contents. Default: len. """ grid = deepcopy(grid) if agg is None: if type(grid[0][0]) is list and type(grid[0][0][0]) is str: agg = string_avg else: agg = mode for i in range(len(grid)): for j in range(len(grid[i])): grid[i][j] = agg(grid[i][j]) return grid
Many functions return a 2d list with a complex data type in each cell. For instance, grids representing environments have a set of resources, while reading in multiple data files at once will yield a list containing the values for that cell from each file. In order to visualize these data types it is helpful to summarize the more complex data types with a single number. For instance, you might want to take the length of a resource set to see how many resource types are present. Alternately, you might want to take the mode of a list to see the most common phenotype in a cell. This function facilitates this analysis by calling the given aggregation function (agg) on each cell of the given grid and returning the result. agg - A function indicating how to summarize grid contents. Default: len.
def decompose_code(code): """ Decomposes a MARC "code" into tag, ind1, ind2, subcode """ code = "%-6s" % code ind1 = code[3:4] if ind1 == " ": ind1 = "_" ind2 = code[4:5] if ind2 == " ": ind2 = "_" subcode = code[5:6] if subcode == " ": subcode = None return (code[0:3], ind1, ind2, subcode)
Decomposes a MARC "code" into tag, ind1, ind2, subcode
def get_mapping(self, meta_fields=True): ''' Returns the mapping for the index as a dictionary. :param meta_fields: Also include elasticsearch meta fields in the dictionary. :return: a dictionary which can be used to generate the elasticsearch index mapping for this doctype. ''' return {'properties': dict((name, field.json()) for name, field in iteritems(self.fields) if meta_fields or name not in AbstractField.meta_fields)}
Returns the mapping for the index as a dictionary. :param meta_fields: Also include elasticsearch meta fields in the dictionary. :return: a dictionary which can be used to generate the elasticsearch index mapping for this doctype.
def normalize_variable_name(node, reachability_tester): # type: (Dict[str, Any], ReferenceReachabilityTester) -> Optional[str] """ Returns normalized variable name. Normalizing means that variable names get explicit visibility by visibility prefix such as: "g:", "s:", ... Returns None if the specified node is unanalyzable. A node is unanalyzable if: - the node is not identifier-like - the node is named dynamically """ node_type = NodeType(node['type']) if not is_analyzable_identifier(node): return None if node_type is NodeType.IDENTIFIER: return _normalize_identifier_value(node, reachability_tester) # Nodes identifier-like without identifier is always normalized because # the nodes can not have a visibility prefix. if node_type in IdentifierLikeNodeTypes: return node['value']
Returns normalized variable name. Normalizing means that variable names get explicit visibility by visibility prefix such as: "g:", "s:", ... Returns None if the specified node is unanalyzable. A node is unanalyzable if: - the node is not identifier-like - the node is named dynamically
def _get_container(self, path): """Return single container.""" cont = self.native_conn.get_container(path) return self.cont_cls(self, cont.name, cont.object_count, cont.size_used)
Return single container.
def execute(self): """ Execute the query with optional timeout. The response to the execute query is the raw payload received from the websocket and will contain multiple dict keys and values. It is more common to call query.fetch_XXX which will filter the return result based on the method. Each result set will have a max batch size of 200 records. This method will also continuously return results until terminated. To make a single bounded fetch, call :meth:`.fetch_batch` or :meth:`.fetch_raw`. :param int sock_timeout: event loop interval :return: raw dict returned from query :rtype: dict(list) """ with SMCSocketProtocol(self, **self.sockopt) as protocol: for result in protocol.receive(): yield result
Execute the query with optional timeout. The response to the execute query is the raw payload received from the websocket and will contain multiple dict keys and values. It is more common to call query.fetch_XXX which will filter the return result based on the method. Each result set will have a max batch size of 200 records. This method will also continuously return results until terminated. To make a single bounded fetch, call :meth:`.fetch_batch` or :meth:`.fetch_raw`. :param int sock_timeout: event loop interval :return: raw dict returned from query :rtype: dict(list)
def http_basic_auth_group_member_required(groups): """Decorator. Use it to specify a RPC method is available only to logged users with given permissions""" if isinstance(groups, six.string_types): # Check user is in a group return auth.set_authentication_predicate(http_basic_auth_check_user, [auth.user_in_group, groups]) else: # Check user is in many group return auth.set_authentication_predicate(http_basic_auth_check_user, [auth.user_in_any_group, groups])
Decorator. Use it to specify a RPC method is available only to logged users with given permissions
def get_neighbor_out_filter(neigh_ip_address): """Returns a neighbor out_filter for given ip address if exists.""" core = CORE_MANAGER.get_core_service() ret = core.peer_manager.get_by_addr(neigh_ip_address).out_filters return ret
Returns a neighbor out_filter for given ip address if exists.
def multi_index(idx, dim): """ Single to multi-index using graded reverse lexicographical notation. Parameters ---------- idx : int Index in interger notation dim : int The number of dimensions in the multi-index notation Returns ------- out : tuple Multi-index of `idx` with `len(out)=dim` Examples -------- >>> for idx in range(5): ... print(chaospy.bertran.multi_index(idx, 3)) (0, 0, 0) (1, 0, 0) (0, 1, 0) (0, 0, 1) (2, 0, 0) See Also -------- single_index """ def _rec(idx, dim): idxn = idxm = 0 if not dim: return () if idx == 0: return (0, )*dim while terms(idxn, dim) <= idx: idxn += 1 idx -= terms(idxn-1, dim) if idx == 0: return (idxn,) + (0,)*(dim-1) while terms(idxm, dim-1) <= idx: idxm += 1 return (int(idxn-idxm),) + _rec(idx, dim-1) return _rec(idx, dim)
Single to multi-index using graded reverse lexicographical notation. Parameters ---------- idx : int Index in interger notation dim : int The number of dimensions in the multi-index notation Returns ------- out : tuple Multi-index of `idx` with `len(out)=dim` Examples -------- >>> for idx in range(5): ... print(chaospy.bertran.multi_index(idx, 3)) (0, 0, 0) (1, 0, 0) (0, 1, 0) (0, 0, 1) (2, 0, 0) See Also -------- single_index
def update_event(self, event, action, flags): """ Called by libcouchbase to add/remove event watchers """ if action == PYCBC_EVACTION_UNWATCH: if event.flags & LCB_READ_EVENT: self.reactor.removeReader(event) if event.flags & LCB_WRITE_EVENT: self.reactor.removeWriter(event) elif action == PYCBC_EVACTION_WATCH: if flags & LCB_READ_EVENT: self.reactor.addReader(event) if flags & LCB_WRITE_EVENT: self.reactor.addWriter(event) if flags & LCB_READ_EVENT == 0: self.reactor.removeReader(event) if flags & LCB_WRITE_EVENT == 0: self.reactor.removeWriter(event)
Called by libcouchbase to add/remove event watchers
def plot_reliability_diagram(confidence, labels, filepath): """ Takes in confidence values for predictions and correct labels for the data, plots a reliability diagram. :param confidence: nb_samples x nb_classes (e.g., output of softmax) :param labels: vector of nb_samples :param filepath: where to save the diagram :return: """ assert len(confidence.shape) == 2 assert len(labels.shape) == 1 assert confidence.shape[0] == labels.shape[0] print('Saving reliability diagram at: ' + str(filepath)) if confidence.max() <= 1.: # confidence array is output of softmax bins_start = [b / 10. for b in xrange(0, 10)] bins_end = [b / 10. for b in xrange(1, 11)] bins_center = [(b + .5) / 10. for b in xrange(0, 10)] preds_conf = np.max(confidence, axis=1) preds_l = np.argmax(confidence, axis=1) else: raise ValueError('Confidence values go above 1.') print(preds_conf.shape, preds_l.shape) # Create var for reliability diagram # Will contain mean accuracies for each bin reliability_diag = [] num_points = [] # keeps the number of points in each bar # Find average accuracy per confidence bin for bin_start, bin_end in zip(bins_start, bins_end): above = preds_conf >= bin_start if bin_end == 1.: below = preds_conf <= bin_end else: below = preds_conf < bin_end mask = np.multiply(above, below) num_points.append(np.sum(mask)) bin_mean_acc = max(0, np.mean(preds_l[mask] == labels[mask])) reliability_diag.append(bin_mean_acc) # Plot diagram assert len(reliability_diag) == len(bins_center) print(reliability_diag) print(bins_center) print(num_points) fig, ax1 = plt.subplots() _ = ax1.bar(bins_center, reliability_diag, width=.1, alpha=0.8) plt.xlim([0, 1.]) ax1.set_ylim([0, 1.]) ax2 = ax1.twinx() print(sum(num_points)) ax2.plot(bins_center, num_points, color='r', linestyle='-', linewidth=7.0) ax2.set_ylabel('Number of points in the data', fontsize=16, color='r') if len(np.argwhere(confidence[0] != 0.)) == 1: # This is a DkNN diagram ax1.set_xlabel('Prediction Credibility', fontsize=16) else: # This is a softmax diagram ax1.set_xlabel('Prediction Confidence', fontsize=16) ax1.set_ylabel('Prediction Accuracy', fontsize=16) ax1.tick_params(axis='both', labelsize=14) ax2.tick_params(axis='both', labelsize=14, colors='r') fig.tight_layout() plt.savefig(filepath, bbox_inches='tight')
Takes in confidence values for predictions and correct labels for the data, plots a reliability diagram. :param confidence: nb_samples x nb_classes (e.g., output of softmax) :param labels: vector of nb_samples :param filepath: where to save the diagram :return:
def password_credentials(self, username, password, **kwargs): """ Retrieve access token by 'password credentials' grant. https://tools.ietf.org/html/rfc6749#section-4.3 :param str username: The user name to obtain an access token for :param str password: The user's password :rtype: dict :return: Access token response """ return self._token_request(grant_type='password', username=username, password=password, **kwargs)
Retrieve access token by 'password credentials' grant. https://tools.ietf.org/html/rfc6749#section-4.3 :param str username: The user name to obtain an access token for :param str password: The user's password :rtype: dict :return: Access token response
def tile_2d(physical_shape, tile_shape, outer_name="outer", inner_name="inner", cores_name=None): """2D tiling of a 3d physical mesh. The "outer" mesh dimension corresponds to which tile. The "inner" mesh dimension corresponds to the position within a tile of processors. Optionally, if cores_name is specified, then a 3 dimensional logical mesh is returned, with the third dimension representing the two different cores within a chip. If cores_name is not specified, then the cores-in-a-chip dimension is folded into the inner dimension. TODO(noam): explain this better. Example: tile_2d(physical_shape=[8, 16, 2], tile_shape=[4, 4]) The "inner" dimension has size 4x4x2=32 and corresponds to the position within a 4x4 tile of processors. The "outer" dimension has size 8/4 * 16/4 = 8, and corresponds to the 8 tiles in the mesh. Args: physical_shape: a triple of integers [X, Y, cores] tile_shape: a pair outer_name: a string inner_name: a string cores_name: an optional string Returns: mesh_shape: a mtf.Shape logical_to_physical: a list """ logical_to_physical = [] p0, p1, p2 = physical_shape t0, t1 = tile_shape tile_ring = _ring_2d(t0, t1) tiles_ring = _ring_2d(p0 // t0, p1 // t1) for logical_pnum in range(p0 * p1 * p2): core_on_chip = logical_pnum % p2 logical_chip_num = logical_pnum // p2 logical_pos_in_tile = logical_chip_num % (t0 * t1) logical_tile_num = logical_chip_num // (t0 * t1) tile_i, tile_j = tile_ring[logical_pos_in_tile] tiles_i, tiles_j = tiles_ring[logical_tile_num] physical_pnum = core_on_chip + p2 * ( tile_i * p1 + tile_j + tiles_i * p1 * t0 + tiles_j * t1) logical_to_physical.append(physical_pnum) assert sorted(logical_to_physical) == list(range(p0 * p1 * p2)) tile_size = t0 * t1 * p2 num_tiles = p0 * p1 // (t0 * t1) if cores_name: mesh_shape = mtf.Shape( [mtf.Dimension(outer_name, int(num_tiles)), mtf.Dimension(inner_name, int(t0 * t1)), mtf.Dimension(cores_name, int(p2))]) else: mesh_shape = mtf.Shape( [mtf.Dimension(outer_name, int(num_tiles)), mtf.Dimension(inner_name, int(tile_size))]) return mesh_shape, logical_to_physical
2D tiling of a 3d physical mesh. The "outer" mesh dimension corresponds to which tile. The "inner" mesh dimension corresponds to the position within a tile of processors. Optionally, if cores_name is specified, then a 3 dimensional logical mesh is returned, with the third dimension representing the two different cores within a chip. If cores_name is not specified, then the cores-in-a-chip dimension is folded into the inner dimension. TODO(noam): explain this better. Example: tile_2d(physical_shape=[8, 16, 2], tile_shape=[4, 4]) The "inner" dimension has size 4x4x2=32 and corresponds to the position within a 4x4 tile of processors. The "outer" dimension has size 8/4 * 16/4 = 8, and corresponds to the 8 tiles in the mesh. Args: physical_shape: a triple of integers [X, Y, cores] tile_shape: a pair outer_name: a string inner_name: a string cores_name: an optional string Returns: mesh_shape: a mtf.Shape logical_to_physical: a list
def _Members(self, group): """Unify members of a group and accounts with the group as primary gid.""" group.members = set(group.members).union(self.gids.get(group.gid, [])) return group
Unify members of a group and accounts with the group as primary gid.
def _bfgs_inv_hessian_update(grad_delta, position_delta, normalization_factor, inv_hessian_estimate): """Applies the BFGS update to the inverse Hessian estimate. The BFGS update rule is (note A^T denotes the transpose of a vector/matrix A). ```None rho = 1/(grad_delta^T * position_delta) U = (I - rho * position_delta * grad_delta^T) H_1 = U * H_0 * U^T + rho * position_delta * position_delta^T ``` Here, `H_0` is the inverse Hessian estimate at the previous iteration and `H_1` is the next estimate. Note that `*` should be interpreted as the matrix multiplication (with the understanding that matrix multiplication for scalars is usual multiplication and for matrix with vector is the action of the matrix on the vector.). The implementation below utilizes an expanded version of the above formula to avoid the matrix multiplications that would be needed otherwise. By expansion it is easy to see that one only needs matrix-vector or vector-vector operations. The expanded version is: ```None f = 1 + rho * (grad_delta^T * H_0 * grad_delta) H_1 - H_0 = - rho * [position_delta * (H_0 * grad_delta)^T + (H_0 * grad_delta) * position_delta^T] + rho * f * [position_delta * position_delta^T] ``` All the terms in square brackets are matrices and are constructed using vector outer products. All the other terms on the right hand side are scalars. Also worth noting that the first and second lines are both rank 1 updates applied to the current inverse Hessian estimate. Args: grad_delta: Real `Tensor` of shape `[..., n]`. The difference between the gradient at the new position and the old position. position_delta: Real `Tensor` of shape `[..., n]`. The change in position from the previous iteration to the current one. normalization_factor: Real `Tensor` of shape `[...]`. Should be equal to `grad_delta^T * position_delta`, i.e. `1/rho` as defined above. inv_hessian_estimate: Real `Tensor` of shape `[..., n, n]`. The previous estimate of the inverse Hessian. Should be positive definite and symmetric. Returns: A tuple containing the following fields is_valid: A Boolean `Tensor` of shape `[...]` indicating batch members where the update succeeded. The update can fail if the position change becomes orthogonal to the gradient change. next_inv_hessian_estimate: A `Tensor` of shape `[..., n, n]`. The next Hessian estimate updated using the BFGS update scheme. If the `inv_hessian_estimate` is symmetric and positive definite, the `next_inv_hessian_estimate` is guaranteed to satisfy the same conditions. """ # The quadratic form: y^T.H.y; where H is the inverse Hessian and y is the # gradient change. conditioned_grad_delta = _mul_right(inv_hessian_estimate, grad_delta) conditioned_grad_delta_norm = tf.reduce_sum( input_tensor=conditioned_grad_delta * grad_delta, axis=-1) # The first rank 1 update term requires the outer product: s.y^T. cross_term = _tensor_product(position_delta, conditioned_grad_delta) def _expand_scalar(s): # Expand dimensions of a batch of scalars to multiply or divide a matrix. return s[..., tf.newaxis, tf.newaxis] # Symmetrize cross_term += _tensor_product(conditioned_grad_delta, position_delta) position_term = _tensor_product(position_delta, position_delta) with tf.control_dependencies([position_term]): position_term *= _expand_scalar( 1 + conditioned_grad_delta_norm / normalization_factor) return (inv_hessian_estimate + (position_term - cross_term) / _expand_scalar(normalization_factor))
Applies the BFGS update to the inverse Hessian estimate. The BFGS update rule is (note A^T denotes the transpose of a vector/matrix A). ```None rho = 1/(grad_delta^T * position_delta) U = (I - rho * position_delta * grad_delta^T) H_1 = U * H_0 * U^T + rho * position_delta * position_delta^T ``` Here, `H_0` is the inverse Hessian estimate at the previous iteration and `H_1` is the next estimate. Note that `*` should be interpreted as the matrix multiplication (with the understanding that matrix multiplication for scalars is usual multiplication and for matrix with vector is the action of the matrix on the vector.). The implementation below utilizes an expanded version of the above formula to avoid the matrix multiplications that would be needed otherwise. By expansion it is easy to see that one only needs matrix-vector or vector-vector operations. The expanded version is: ```None f = 1 + rho * (grad_delta^T * H_0 * grad_delta) H_1 - H_0 = - rho * [position_delta * (H_0 * grad_delta)^T + (H_0 * grad_delta) * position_delta^T] + rho * f * [position_delta * position_delta^T] ``` All the terms in square brackets are matrices and are constructed using vector outer products. All the other terms on the right hand side are scalars. Also worth noting that the first and second lines are both rank 1 updates applied to the current inverse Hessian estimate. Args: grad_delta: Real `Tensor` of shape `[..., n]`. The difference between the gradient at the new position and the old position. position_delta: Real `Tensor` of shape `[..., n]`. The change in position from the previous iteration to the current one. normalization_factor: Real `Tensor` of shape `[...]`. Should be equal to `grad_delta^T * position_delta`, i.e. `1/rho` as defined above. inv_hessian_estimate: Real `Tensor` of shape `[..., n, n]`. The previous estimate of the inverse Hessian. Should be positive definite and symmetric. Returns: A tuple containing the following fields is_valid: A Boolean `Tensor` of shape `[...]` indicating batch members where the update succeeded. The update can fail if the position change becomes orthogonal to the gradient change. next_inv_hessian_estimate: A `Tensor` of shape `[..., n, n]`. The next Hessian estimate updated using the BFGS update scheme. If the `inv_hessian_estimate` is symmetric and positive definite, the `next_inv_hessian_estimate` is guaranteed to satisfy the same conditions.
def from_json(cls, json): """Creates an instance of the InputReader for the given input shard state. Args: json: The InputReader state as a dict-like object. Returns: An instance of the InputReader configured using the values of json. """ result = super(_ReducerReader, cls).from_json(json) result.current_key = _ReducerReader.decode_data(json["current_key"]) result.current_values = _ReducerReader.decode_data(json["current_values"]) return result
Creates an instance of the InputReader for the given input shard state. Args: json: The InputReader state as a dict-like object. Returns: An instance of the InputReader configured using the values of json.
def print_progress_bar(self, task): """ Draws a progress bar on screen based on the given information using standard output (stdout). :param task: TaskProgress object containing all required information to draw a progress bar at the given state. """ str_format = "{0:." + str(task.decimals) + "f}" percents = str_format.format(100 * (task.progress / float(task.total))) filled_length = int(round(task.bar_length * task.progress / float(task.total))) bar = '█' * filled_length + '-' * (task.bar_length - filled_length) # Build elapsed time if needed elapsed_time = None if task.display_time: if task.end_time: # If the task has ended, stop the chrono if not task.elapsed_time_at_end: task.elapsed_time_at_end = self.millis_to_human_readable(task.end_time - task.begin_time) elapsed_time = task.elapsed_time_at_end else: # If task is new then start the chrono if not task.begin_time: task.begin_time = millis() elapsed_time = self.millis_to_human_readable(millis() - task.begin_time) prefix_pattern = '%{}s'.format(self.longest_bar_prefix_size) time_container_pattern = '(%s)' if task.display_time and not task.end_time else '[%s]' if len(task.suffix) > 0 and task.display_time: sys.stdout.write('\n {} |%s| %3s %% {} - %s'.format(prefix_pattern, time_container_pattern) % (task.prefix, bar, percents, elapsed_time, task.suffix)) elif len(task.suffix) > 0 and not task.display_time: sys.stdout.write('\n {} |%s| %3s %% - %s'.format(prefix_pattern) % (task.prefix, bar, percents, task.suffix)) elif task.display_time and not len(task.suffix) > 0: sys.stdout.write('\n {} |%s| %3s %% {}'.format(prefix_pattern, time_container_pattern) % (task.prefix, bar, percents, elapsed_time)) else: sys.stdout.write('\n {} |%s| %3s %%'.format(prefix_pattern) % (task.prefix, bar, percents)) sys.stdout.write('\n') sys.stdout.flush()
Draws a progress bar on screen based on the given information using standard output (stdout). :param task: TaskProgress object containing all required information to draw a progress bar at the given state.
def _waitFor(self, timeout, notification, **kwargs): """Wait for a particular UI event to occur; this can be built upon in NativeUIElement for specific convenience methods. """ callback = self._matchOther retelem = None callbackArgs = None callbackKwargs = None # Allow customization of the callback, though by default use the basic # _match() method if 'callback' in kwargs: callback = kwargs['callback'] del kwargs['callback'] # Deal with these only if callback is provided: if 'args' in kwargs: if not isinstance(kwargs['args'], tuple): errStr = 'Notification callback args not given as a tuple' raise TypeError(errStr) # If args are given, notification will pass back the returned # element in the first positional arg callbackArgs = kwargs['args'] del kwargs['args'] if 'kwargs' in kwargs: if not isinstance(kwargs['kwargs'], dict): errStr = 'Notification callback kwargs not given as a dict' raise TypeError(errStr) callbackKwargs = kwargs['kwargs'] del kwargs['kwargs'] # If kwargs are not given as a dictionary but individually listed # need to update the callbackKwargs dict with the remaining items in # kwargs if kwargs: if callbackKwargs: callbackKwargs.update(kwargs) else: callbackKwargs = kwargs else: callbackArgs = (retelem,) # Pass the kwargs to the default callback callbackKwargs = kwargs return self._setNotification(timeout, notification, callback, callbackArgs, callbackKwargs)
Wait for a particular UI event to occur; this can be built upon in NativeUIElement for specific convenience methods.
def _get(relative_path, genome=None): """ :param relative_path: relative path of the file inside the repository :param genome: genome name. Can contain chromosome name after comma, like hg19-chr20, in case of BED, the returning BedTool will be with added filter. :return: BedTools object if it's a BED file, or filepath """ chrom = None if genome: if '-chr' in genome: genome, chrom = genome.split('-') check_genome(genome) relative_path = relative_path.format(genome=genome) path = abspath(join(dirname(__file__), relative_path)) if not isfile(path) and isfile(path + '.gz'): path += '.gz' if path.endswith('.bed') or path.endswith('.bed.gz'): if path.endswith('.bed.gz'): bedtools = which('bedtools') if not bedtools: critical('bedtools not found in PATH: ' + str(os.environ['PATH'])) debug('BED is compressed, creating BedTool') bed = BedTool(path) else: debug('BED is uncompressed, creating BedTool') bed = BedTool(path) if chrom: debug('Filtering BEDTool for chrom ' + chrom) bed = bed.filter(lambda r: r.chrom == chrom) return bed else: return path
:param relative_path: relative path of the file inside the repository :param genome: genome name. Can contain chromosome name after comma, like hg19-chr20, in case of BED, the returning BedTool will be with added filter. :return: BedTools object if it's a BED file, or filepath
def get_select_items(items): """Return list of possible select items.""" option_items = list() for item in items: if isinstance(item, dict) and defs.VALUE in item and defs.LABEL in item: option_items.append(item[defs.VALUE]) else: raise exceptions.ParametersFieldError(item, "a dictionary with {} and {}" .format(defs.LABEL, defs.VALUE)) return option_items
Return list of possible select items.
def _create_delete_one_query(self, row_id, ctx): """ Delete row by id query creation. :param int row_id: Identifier of the deleted row. :param ResourceQueryContext ctx: The context of this delete query. """ assert isinstance(ctx, ResourceQueryContext) return self._orm.query(self.model_cls).filter(self._model_pk == row_id)
Delete row by id query creation. :param int row_id: Identifier of the deleted row. :param ResourceQueryContext ctx: The context of this delete query.
def transfer_to(self, cloudpath, bbox, block_size=None, compress=True): """ Transfer files from one storage location to another, bypassing volume painting. This enables using a single CloudVolume instance to transfer big volumes. In some cases, gsutil or aws s3 cli tools may be more appropriate. This method is provided for convenience. It may be optimized for better performance over time as demand requires. cloudpath (str): path to storage layer bbox (Bbox object): ROI to transfer block_size (int): number of file chunks to transfer per I/O batch. compress (bool): Set to False to upload as uncompressed """ if type(bbox) is Bbox: requested_bbox = bbox else: (requested_bbox, _, _) = self.__interpret_slices(bbox) realized_bbox = self.__realized_bbox(requested_bbox) if requested_bbox != realized_bbox: raise exceptions.AlignmentError( "Unable to transfer non-chunk aligned bounding boxes. Requested: {}, Realized: {}".format( requested_bbox, realized_bbox )) default_block_size_MB = 50 # MB chunk_MB = self.underlying.rectVolume() * np.dtype(self.dtype).itemsize * self.num_channels if self.layer_type == 'image': # kind of an average guess for some EM datasets, have seen up to 1.9x and as low as 1.1 # affinites are also images, but have very different compression ratios. e.g. 3x for kempressed chunk_MB /= 1.3 else: # segmentation chunk_MB /= 100.0 # compression ratios between 80 and 800.... chunk_MB /= 1024.0 * 1024.0 if block_size: step = block_size else: step = int(default_block_size_MB // chunk_MB) + 1 try: destvol = CloudVolume(cloudpath, mip=self.mip) except exceptions.InfoUnavailableError: destvol = CloudVolume(cloudpath, mip=self.mip, info=self.info, provenance=self.provenance.serialize()) destvol.commit_info() destvol.commit_provenance() except exceptions.ScaleUnavailableError: destvol = CloudVolume(cloudpath) for i in range(len(destvol.scales) + 1, len(self.scales)): destvol.scales.append( self.scales[i] ) destvol.commit_info() destvol.commit_provenance() num_blocks = np.ceil(self.bounds.volume() / self.underlying.rectVolume()) / step num_blocks = int(np.ceil(num_blocks)) cloudpaths = txrx.chunknames(realized_bbox, self.bounds, self.key, self.underlying) pbar = tqdm( desc='Transferring Blocks of {} Chunks'.format(step), unit='blocks', disable=(not self.progress), total=num_blocks, ) with pbar: with Storage(self.layer_cloudpath) as src_stor: with Storage(cloudpath) as dest_stor: for _ in range(num_blocks, 0, -1): srcpaths = list(itertools.islice(cloudpaths, step)) files = src_stor.get_files(srcpaths) files = [ (f['filename'], f['content']) for f in files ] dest_stor.put_files( files=files, compress=compress, content_type=txrx.content_type(destvol), ) pbar.update()
Transfer files from one storage location to another, bypassing volume painting. This enables using a single CloudVolume instance to transfer big volumes. In some cases, gsutil or aws s3 cli tools may be more appropriate. This method is provided for convenience. It may be optimized for better performance over time as demand requires. cloudpath (str): path to storage layer bbox (Bbox object): ROI to transfer block_size (int): number of file chunks to transfer per I/O batch. compress (bool): Set to False to upload as uncompressed
def success(item): '''Successful finish''' try: # mv to done trg_queue = item.queue os.rename(fsq_path.item(trg_queue, item.id, host=item.host), os.path.join(fsq_path.done(trg_queue, host=item.host), item.id)) except AttributeError, e: # DuckType TypeError'ing raise TypeError(u'item must be an FSQWorkItem, not:'\ u' {0}'.format(item.__class__.__name__)) except (OSError, IOError, ), e: raise FSQDoneError(e.errno, u'cannot mv item to done: {0}:'\ u' {1}'.format(item.id, wrap_io_os_err(e)))
Successful finish
def append_to_table(self, table): """Add this instance as a row on a `astropy.table.Table` """ table.add_row(dict(path=self.path, key=self.key, creator=self.creator, timestamp=self.timestamp, status=self.status, flags=self.flags))
Add this instance as a row on a `astropy.table.Table`
def _get_font_size(document, style): """Get font size defined for this style. It will try to get font size from it's parent style if it is not defined by original style. :Args: - document (:class:`ooxml.doc.Document`): Document object - style (:class:`ooxml.doc.Style`): Style object :Returns: Returns font size as a number. -1 if it can not get font size. """ font_size = style.get_font_size() if font_size == -1: if style.based_on: based_on = document.styles.get_by_id(style.based_on) if based_on: return _get_font_size(document, based_on) return font_size
Get font size defined for this style. It will try to get font size from it's parent style if it is not defined by original style. :Args: - document (:class:`ooxml.doc.Document`): Document object - style (:class:`ooxml.doc.Style`): Style object :Returns: Returns font size as a number. -1 if it can not get font size.
def connect(self, ctrl): """Connect to the device.""" if self.prompt: self.prompt_re = self.driver.make_dynamic_prompt(self.prompt) else: self.prompt_re = self.driver.prompt_re self.ctrl = ctrl if self.protocol.connect(self.driver): if self.protocol.authenticate(self.driver): self.ctrl.try_read_prompt(1) if not self.prompt: self.prompt = self.ctrl.detect_prompt() if self.is_target: self.update_config_mode() if self.mode is not None and self.mode != 'global': self.last_error_msg = "Device is not in global mode. Disconnected." self.chain.disconnect() return False self.prompt_re = self.driver.make_dynamic_prompt(self.prompt) self.connected = True if self.is_target is False: if self.os_version is None: self.update_os_version() self.update_hostname() else: self._connected_to_target() return True else: self.connected = False return False
Connect to the device.
def _get_config_int(self, section: str, key: str, fallback: int=object()) -> int: """ Gets an int config value :param section: Section :param key: Key :param fallback: Optional fallback value """ return self._config.getint(section, key, fallback=fallback)
Gets an int config value :param section: Section :param key: Key :param fallback: Optional fallback value
def init_batch(raw_constraints: List[Optional[RawConstraintList]], beam_size: int, start_id: int, eos_id: int) -> List[Optional[ConstrainedHypothesis]]: """ :param raw_constraints: The list of raw constraints (list of list of IDs). :param beam_size: The beam size. :param start_id: The target-language vocabulary ID of the SOS symbol. :param eos_id: The target-language vocabulary ID of the EOS symbol. :return: A list of ConstrainedHypothesis objects (shape: (batch_size * beam_size,)). """ constraints = [None] * (len(raw_constraints) * beam_size) # type: List[Optional[ConstrainedHypothesis]] if any(raw_constraints): for i, raw_list in enumerate(raw_constraints): num_constraints = sum([len(phrase) for phrase in raw_list]) if raw_list is not None else 0 if num_constraints > 0: hyp = ConstrainedHypothesis(raw_list, eos_id) idx = i * beam_size constraints[idx:idx + beam_size] = [hyp.advance(start_id) for x in range(beam_size)] return constraints
:param raw_constraints: The list of raw constraints (list of list of IDs). :param beam_size: The beam size. :param start_id: The target-language vocabulary ID of the SOS symbol. :param eos_id: The target-language vocabulary ID of the EOS symbol. :return: A list of ConstrainedHypothesis objects (shape: (batch_size * beam_size,)).
def h3(data, *args, **kwargs): """Facade function to create 3D histograms. Parameters ---------- data : array_like or list[array_like] or tuple[array_like] Can be a single array (with three columns) or three different arrays (for each component) Returns ------- physt.histogram_nd.HistogramND """ import numpy as np if data is not None and isinstance(data, (list, tuple)) and not np.isscalar(data[0]): if "axis_names" not in kwargs: kwargs["axis_names"] = [(column.name if hasattr(column, "name") else None) for column in data] data = np.concatenate([item[:, np.newaxis] for item in data], axis=1) else: kwargs["dim"] = 3 return histogramdd(data, *args, **kwargs)
Facade function to create 3D histograms. Parameters ---------- data : array_like or list[array_like] or tuple[array_like] Can be a single array (with three columns) or three different arrays (for each component) Returns ------- physt.histogram_nd.HistogramND
def report_saved(report_stats): """Record the percent saved & print it.""" if Settings.verbose: report = '' truncated_filename = truncate_cwd(report_stats.final_filename) report += '{}: '.format(truncated_filename) total = new_percent_saved(report_stats) if total: report += total else: report += '0%' if Settings.test: report += ' could be saved.' if Settings.verbose > 1: tools_report = ', '.join(report_stats.report_list) if tools_report: report += '\n\t' + tools_report print(report)
Record the percent saved & print it.
def _get_value(context, key): """ Retrieve a key's value from a context item. Returns _NOT_FOUND if the key does not exist. The ContextStack.get() docstring documents this function's intended behavior. """ if isinstance(context, dict): # Then we consider the argument a "hash" for the purposes of the spec. # # We do a membership test to avoid using exceptions for flow control # (e.g. catching KeyError). if key in context: return context[key] elif type(context).__module__ != _BUILTIN_MODULE: # Then we consider the argument an "object" for the purposes of # the spec. # # The elif test above lets us avoid treating instances of built-in # types like integers and strings as objects (cf. issue #81). # Instances of user-defined classes on the other hand, for example, # are considered objects by the test above. try: attr = getattr(context, key) except AttributeError: # TODO: distinguish the case of the attribute not existing from # an AttributeError being raised by the call to the attribute. # See the following issue for implementation ideas: # http://bugs.python.org/issue7559 pass else: # TODO: consider using EAFP here instead. # http://docs.python.org/glossary.html#term-eafp if callable(attr): return attr() return attr return _NOT_FOUND
Retrieve a key's value from a context item. Returns _NOT_FOUND if the key does not exist. The ContextStack.get() docstring documents this function's intended behavior.
def _find_files(root, includes, excludes, follow_symlinks): """List files inside a directory based on include and exclude rules. This is a more advanced version of `glob.glob`, that accepts multiple complex patterns. Args: root (str): base directory to list files from. includes (list[str]): inclusion patterns. Only files matching those patterns will be included in the result. excludes (list[str]): exclusion patterns. Files matching those patterns will be excluded from the result. Exclusions take precedence over inclusions. follow_symlinks (bool): If true, symlinks will be included in the resulting zip file Yields: str: a file name relative to the root. Note: Documentation for the patterns can be found at http://www.aviser.asia/formic/doc/index.html """ root = os.path.abspath(root) file_set = formic.FileSet( directory=root, include=includes, exclude=excludes, symlinks=follow_symlinks, ) for filename in file_set.qualified_files(absolute=False): yield filename
List files inside a directory based on include and exclude rules. This is a more advanced version of `glob.glob`, that accepts multiple complex patterns. Args: root (str): base directory to list files from. includes (list[str]): inclusion patterns. Only files matching those patterns will be included in the result. excludes (list[str]): exclusion patterns. Files matching those patterns will be excluded from the result. Exclusions take precedence over inclusions. follow_symlinks (bool): If true, symlinks will be included in the resulting zip file Yields: str: a file name relative to the root. Note: Documentation for the patterns can be found at http://www.aviser.asia/formic/doc/index.html
def convert_objects(self, convert_dates=True, convert_numeric=False, convert_timedeltas=True, copy=True): """ Attempt to infer better dtype for object columns. .. deprecated:: 0.21.0 Parameters ---------- convert_dates : boolean, default True If True, convert to date where possible. If 'coerce', force conversion, with unconvertible values becoming NaT. convert_numeric : boolean, default False If True, attempt to coerce to numbers (including strings), with unconvertible values becoming NaN. convert_timedeltas : boolean, default True If True, convert to timedelta where possible. If 'coerce', force conversion, with unconvertible values becoming NaT. copy : boolean, default True If True, return a copy even if no copy is necessary (e.g. no conversion was done). Note: This is meant for internal use, and should not be confused with inplace. Returns ------- converted : same as input object See Also -------- to_datetime : Convert argument to datetime. to_timedelta : Convert argument to timedelta. to_numeric : Convert argument to numeric type. """ msg = ("convert_objects is deprecated. To re-infer data dtypes for " "object columns, use {klass}.infer_objects()\nFor all " "other conversions use the data-type specific converters " "pd.to_datetime, pd.to_timedelta and pd.to_numeric." ).format(klass=self.__class__.__name__) warnings.warn(msg, FutureWarning, stacklevel=2) return self._constructor( self._data.convert(convert_dates=convert_dates, convert_numeric=convert_numeric, convert_timedeltas=convert_timedeltas, copy=copy)).__finalize__(self)
Attempt to infer better dtype for object columns. .. deprecated:: 0.21.0 Parameters ---------- convert_dates : boolean, default True If True, convert to date where possible. If 'coerce', force conversion, with unconvertible values becoming NaT. convert_numeric : boolean, default False If True, attempt to coerce to numbers (including strings), with unconvertible values becoming NaN. convert_timedeltas : boolean, default True If True, convert to timedelta where possible. If 'coerce', force conversion, with unconvertible values becoming NaT. copy : boolean, default True If True, return a copy even if no copy is necessary (e.g. no conversion was done). Note: This is meant for internal use, and should not be confused with inplace. Returns ------- converted : same as input object See Also -------- to_datetime : Convert argument to datetime. to_timedelta : Convert argument to timedelta. to_numeric : Convert argument to numeric type.
def parse_headers( self, lines: List[bytes] ) -> Tuple['CIMultiDictProxy[str]', RawHeaders, Optional[bool], Optional[str], bool, bool]: """Parses RFC 5322 headers from a stream. Line continuations are supported. Returns list of header name and value pairs. Header name is in upper case. """ headers, raw_headers = self._headers_parser.parse_headers(lines) close_conn = None encoding = None upgrade = False chunked = False # keep-alive conn = headers.get(hdrs.CONNECTION) if conn: v = conn.lower() if v == 'close': close_conn = True elif v == 'keep-alive': close_conn = False elif v == 'upgrade': upgrade = True # encoding enc = headers.get(hdrs.CONTENT_ENCODING) if enc: enc = enc.lower() if enc in ('gzip', 'deflate', 'br'): encoding = enc # chunking te = headers.get(hdrs.TRANSFER_ENCODING) if te and 'chunked' in te.lower(): chunked = True return (headers, raw_headers, close_conn, encoding, upgrade, chunked)
Parses RFC 5322 headers from a stream. Line continuations are supported. Returns list of header name and value pairs. Header name is in upper case.
def _filter_xpath_grouping(xpath): """ This method removes the outer parentheses for xpath grouping. The xpath converter will break otherwise. Example: "(//button[@type='submit'])[1]" becomes "//button[@type='submit'][1]" """ # First remove the first open parentheses xpath = xpath[1:] # Next remove the last closed parentheses index = xpath.rfind(')') if index == -1: raise XpathException("Invalid or unsupported Xpath: %s" % xpath) xpath = xpath[:index] + xpath[index + 1:] return xpath
This method removes the outer parentheses for xpath grouping. The xpath converter will break otherwise. Example: "(//button[@type='submit'])[1]" becomes "//button[@type='submit'][1]"
def _get_db(): """ Get database from server """ with cd(env.remote_path): file_path = '/tmp/' + _sql_paths('remote', str(base64.urlsafe_b64encode(uuid.uuid4().bytes)).replace('=', '')) run(env.python + ' manage.py dump_database | gzip > ' + file_path) local_file_path = './backups/' + _sql_paths('remote', datetime.now()) get(file_path, local_file_path) run('rm ' + file_path) return local_file_path
Get database from server
def get_io_write_task(self, fileobj, data, offset): """Get an IO write task for the requested set of data This task can be ran immediately or be submitted to the IO executor for it to run. :type fileobj: file-like object :param fileobj: The file-like object to write to :type data: bytes :param data: The data to write out :type offset: integer :param offset: The offset to write the data to in the file-like object :returns: An IO task to be used to write data to a file-like object """ return IOWriteTask( self._transfer_coordinator, main_kwargs={ 'fileobj': fileobj, 'data': data, 'offset': offset, } )
Get an IO write task for the requested set of data This task can be ran immediately or be submitted to the IO executor for it to run. :type fileobj: file-like object :param fileobj: The file-like object to write to :type data: bytes :param data: The data to write out :type offset: integer :param offset: The offset to write the data to in the file-like object :returns: An IO task to be used to write data to a file-like object
def critical(self, event=None, *args, **kw): """ Process event and call :meth:`logging.Logger.critical` with the result. """ if not self._logger.isEnabledFor(logging.CRITICAL): return kw = self._add_base_info(kw) kw['level'] = "critical" return self._proxy_to_logger('critical', event, *args, **kw)
Process event and call :meth:`logging.Logger.critical` with the result.
def mass_fractions(self): r'''Dictionary of atom:mass-weighted fractional occurence of elements. Useful when performing mass balances. For atom-fraction occurences, see :obj:`atom_fractions`. Examples -------- >>> Chemical('water').mass_fractions {'H': 0.11189834407236524, 'O': 0.8881016559276347} ''' if self.__mass_fractions: return self.__mass_fractions else: self.__mass_fractions = mass_fractions(self.atoms, self.MW) return self.__mass_fractions
r'''Dictionary of atom:mass-weighted fractional occurence of elements. Useful when performing mass balances. For atom-fraction occurences, see :obj:`atom_fractions`. Examples -------- >>> Chemical('water').mass_fractions {'H': 0.11189834407236524, 'O': 0.8881016559276347}
def _summarize_peaks(peaks): """ merge peaks position if closer than 10 """ previous = peaks[0] new_peaks = [previous] for pos in peaks: if pos > previous + 10: new_peaks.add(pos) previous = pos return new_peaks
merge peaks position if closer than 10
def log_run(self): """ Log timestamp and raiden version to help with debugging """ version = get_system_spec()['raiden'] cursor = self.conn.cursor() cursor.execute('INSERT INTO runs(raiden_version) VALUES (?)', [version]) self.maybe_commit()
Log timestamp and raiden version to help with debugging
def GetDetectorData(detectorName): """ GetDetectorData - function to return the locations and detector response tensor of the detector described by detectorName. detectorName - Name of required GW IFO. Can be 'H1', 'L1', 'V1', 'I1' or 'K1'. Returns detRespTensor - Detector response tensor of the IFO. detPosVector - Position vector of the IFO. Sarah Gossan 2012. Last updated 02/18/14. """ # Allocate direction vectors and position of IFO given detectorName detPosVector = np.zeros(3) nx = np.zeros(3) ny = np.zeros(3) ################# aLIGO HANFORD ############# if detectorName == 'H1': nx[0] = -0.22389266154 nx[1] = 0.79983062746 nx[2] = 0.55690487831 ny[0] = -0.91397818574 ny[1] = 0.02609403989 ny[2] = -0.40492342125 detPosVector[0] = -2.16141492636e6 detPosVector[1] = -3.83469517889e6 detPosVector[2] = 4.60035022664e6 ################# aLIGO LIVINGSTON ########## elif detectorName == 'L1': nx[0] = -0.95457412153 nx[1] = -0.14158077340 nx[2] = -0.26218911324 ny[0] = 0.29774156894 ny[1] = -0.48791033647 ny[2] = -0.82054461286 detPosVector[0] = -7.42760447238e4 detPosVector[1] = -5.49628371971e6 detPosVector[2] = 3.22425701744e6 ################# AdvVIRGO ################## elif detectorName == 'V1': nx[0] = -0.70045821479 nx[1] = 0.20848948619 nx[2] = 0.68256166277 ny[0] = -0.05379255368 ny[1] = -0.96908180549 ny[2] = 0.24080451708 detPosVector[0] = 4.54637409900e6 detPosVector[1] = 8.42989697626e5 detPosVector[2] = 4.37857696241e6 ################# INDIGO #################### elif detectorName == 'I1': # Not cached LIGO detector - construct x, nx and ny from other # quantities vxLat = (14. + 14./60.)*(np.pi/180.) # Vertex latitude vxLon = (76. + 26./60.)*(np.pi/180.) # Vertex longitude vxElev = 0. # Vertex elevation xAlt = 0. # Altitude of x-arm yAlt = 0. # Altitude of y-arm xAz = np.pi/2. # Azimuth of x-arm yAz = 0. # Azimuth of y-arm cosLat = np.cos(vxLat) sinLat = np.sin(vxLat) cosLon = np.cos(vxLon) sinLon = np.sin(vxLon) ellDenom = np.sqrt(AWGS84*AWGS84*cosLat*cosLat + \ BWGS84*BWGS84*sinLat*sinLat) locRho = cosLat*(AWGS84*AWGS84/ellDenom + vxElev) # Position detPosVector[0] = locRho*cosLon detPosVector[1] = locRho*sinLon detPosVector[2] = sinLat*(BWGS84*BWGS84/ellDenom + vxElev) # x-Arm vector cosxAlt = np.cos(xAlt) sinxAlt = np.sin(xAlt) cosxAz = np.cos(xAz) sinxAz = np.sin(xAz) uxNorth = cosxAlt*cosxAz uxEast = cosxAlt*sinxAz uxRho = -sinLat*uxNorth + cosLat*sinxAlt nx[0] = cosLon*uxRho - sinLon*uxEast nx[1] = sinLon*uxRho + cosLon*uxEast nx[2] = cosLat*uxNorth + sinLat*sinxAlt # y-Arm vector cosyAlt = np.cos(yAlt) sinyAlt = np.sin(yAlt) cosyAz = np.cos(yAz) sinyAz = np.sin(yAz) uyNorth = cosyAlt*cosyAz uyEast = cosyAlt*sinyAz uyRho = -sinLat*uyNorth + cosLat*sinyAlt ny[0] = cosLon*uyRho - sinLon*uyEast ny[1] = sinLon*uyRho + cosLon*uyEast ny[2] = cosLat*uyNorth + sinLat*sinyAlt ################# KAGRA ##################### elif detectorName == 'K1': # Not cached LIGO detector - construct x, nx and ny from other # quantities vxLat = (36.25)*(np.pi/180.) # Vertex latitude vxLon = (137.18)*(np.pi/180.) # Vertex longitude vxElev = 0. # Vertex elevation xAlt = 0. # Altitude of x-arm yAlt = 0. # Altitude of y-arm xMid = 1500. # Midpoint of x-arm yMid = 1500. # Midpoint of y-arm xAz = 19.*(np.pi/180.) + np.pi/2. # Azimuth of x-arm yAz = 19.*(np.pi/180.) # Azimuth of y-arm cosLat = np.cos(vxLat) sinLat = np.sin(vxLat) cosLon = np.cos(vxLon) sinLon = np.sin(vxLon) ellDenom = np.sqrt(AWGS84*AWGS84*cosLat*cosLat + \ BWGS84*BWGS84*sinLat*sinLat) locRho = cosLat*(AWGS84*AWGS84/ellDenom + vxElev) # Position detPosVector[0] = locRho*cosLon detPosVector[1] = locRho*sinLon detPosVector[2] = sinLat*(BWGS84*BWGS84/ellDenom + vxElev) # x-Arm vector cosxAlt = np.cos(xAlt) sinxAlt = np.sin(xAlt) cosxAz = np.cos(xAz) sinxAz = np.sin(xAz) uxNorth = cosxAlt*cosxAz uxEast = cosxAlt*sinxAz uxRho = -sinLat*uxNorth + cosLat*sinxAlt nx[0] = cosLon*uxRho - sinLon*uxEast nx[1] = sinLon*uxRho + cosLon*uxEast nx[2] = cosLat*uxNorth + sinLat*sinxAlt # y-Arm vector cosyAlt = np.cos(yAlt) sinyAlt = np.sin(yAlt) cosyAz = np.cos(yAz) sinyAz = np.sin(yAz) uyNorth = cosyAlt*cosyAz uyEast = cosyAlt*sinyAz uyRho = -sinLat*uyNorth + cosLat*sinyAlt ny[0] = cosLon*uyRho - sinLon*uyEast ny[1] = sinLon*uyRho + cosLon*uyEast ny[2] = cosLat*uyNorth + sinLat*sinyAlt # Compute detRespTensor as # detRespTensor = 0.5(nx \outerprod nx - ny \outerprod ny) detRespTensor = np.zeros((3,3),float) for i in range(0,3): for j in range(0,3): detRespTensor[i,j] = (nx[i]*nx[j] - ny[i]*ny[j])*0.5 return detRespTensor,detPosVector
GetDetectorData - function to return the locations and detector response tensor of the detector described by detectorName. detectorName - Name of required GW IFO. Can be 'H1', 'L1', 'V1', 'I1' or 'K1'. Returns detRespTensor - Detector response tensor of the IFO. detPosVector - Position vector of the IFO. Sarah Gossan 2012. Last updated 02/18/14.
def set_jenkins_rename_file(self, nodes_rename_file): """ File with nodes renaming mapping: Node,Comment arm-build1,remove arm-build2,keep ericsson-build3,merge into ericsson-build1 .... Once set in the next enrichment the rename will be done """ self.nodes_rename_file = nodes_rename_file self.__load_node_renames() logger.info("Jenkis node rename file active: %s", nodes_rename_file)
File with nodes renaming mapping: Node,Comment arm-build1,remove arm-build2,keep ericsson-build3,merge into ericsson-build1 .... Once set in the next enrichment the rename will be done
def reset(self): """ Clear all cell activity. """ self.L4.reset() for module in self.L6aModules: module.reset()
Clear all cell activity.
def get_comment_lookup_session_for_book(self, book_id, proxy): """Gets the ``OsidSession`` associated with the comment lookup service for the given book. arg: book_id (osid.id.Id): the ``Id`` of the ``Book`` arg: proxy (osid.proxy.Proxy): a proxy return: (osid.commenting.CommentLookupSession) - a ``CommentLookupSession`` raise: NotFound - no ``Book`` found by the given ``Id`` raise: NullArgument - ``book_id`` or ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_comment_lookup()`` or ``supports_visible_federation()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_comment_lookup()`` and ``supports_visible_federation()`` are ``true``* """ if not self.supports_comment_lookup(): raise errors.Unimplemented() ## # Also include check to see if the catalog Id is found otherwise raise errors.NotFound ## # pylint: disable=no-member return sessions.CommentLookupSession(book_id, proxy, self._runtime)
Gets the ``OsidSession`` associated with the comment lookup service for the given book. arg: book_id (osid.id.Id): the ``Id`` of the ``Book`` arg: proxy (osid.proxy.Proxy): a proxy return: (osid.commenting.CommentLookupSession) - a ``CommentLookupSession`` raise: NotFound - no ``Book`` found by the given ``Id`` raise: NullArgument - ``book_id`` or ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_comment_lookup()`` or ``supports_visible_federation()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_comment_lookup()`` and ``supports_visible_federation()`` are ``true``*
def add_derived_identity(self, id_stmt): """Add pattern def for `id_stmt` and all derived identities. The corresponding "ref" pattern is returned. """ p = self.add_namespace(id_stmt.main_module()) if id_stmt not in self.identities: # add named pattern def self.identities[id_stmt] = SchemaNode.define("__%s_%s" % (p, id_stmt.arg)) parent = self.identities[id_stmt] if id_stmt in self.identity_deps: parent = SchemaNode.choice(parent, occur=2) for i in self.identity_deps[id_stmt]: parent.subnode(self.add_derived_identity(i)) idval = SchemaNode("value", parent, p+":"+id_stmt.arg) idval.attr["type"] = "QName" res = SchemaNode("ref") res.attr["name"] = self.identities[id_stmt].attr["name"] return res
Add pattern def for `id_stmt` and all derived identities. The corresponding "ref" pattern is returned.
def call_use_cached_files(tup): """Importable helper for multi-proc calling of ArtifactCache.use_cached_files on a cache instance. Multiprocessing map/apply/etc require functions which can be imported, not bound methods. To call a bound method, instead call a helper like this and pass tuple of the instance and args. The helper can then call the original method on the deserialized instance. :param tup: A tuple of an ArtifactCache and args (eg CacheKey) for ArtifactCache.use_cached_files. """ try: cache, key, results_dir = tup res = cache.use_cached_files(key, results_dir) if res: sys.stderr.write('.') else: sys.stderr.write(' ') sys.stderr.flush() return res except NonfatalArtifactCacheError as e: logger.warn('Error calling use_cached_files in artifact cache: {0}'.format(e)) return False
Importable helper for multi-proc calling of ArtifactCache.use_cached_files on a cache instance. Multiprocessing map/apply/etc require functions which can be imported, not bound methods. To call a bound method, instead call a helper like this and pass tuple of the instance and args. The helper can then call the original method on the deserialized instance. :param tup: A tuple of an ArtifactCache and args (eg CacheKey) for ArtifactCache.use_cached_files.
def trans_history( self, from_=None, count=None, from_id=None, end_id=None, order=None, since=None, end=None ): """ Returns the history of transactions. To use this method you need a privilege of the info key. :param int or None from_: transaction ID, from which the display starts (default 0) :param int or None count: number of transaction to be displayed (default 1000) :param int or None from_id: transaction ID, from which the display starts (default 0) :param int or None end_id: transaction ID on which the display ends (default inf.) :param str or None order: sorting (default 'DESC') :param int or None since: the time to start the display (default 0) :param int or None end: the time to end the display (default inf.) """ return self._trade_api_call( 'TransHistory', from_=from_, count=count, from_id=from_id, end_id=end_id, order=order, since=since, end=end )
Returns the history of transactions. To use this method you need a privilege of the info key. :param int or None from_: transaction ID, from which the display starts (default 0) :param int or None count: number of transaction to be displayed (default 1000) :param int or None from_id: transaction ID, from which the display starts (default 0) :param int or None end_id: transaction ID on which the display ends (default inf.) :param str or None order: sorting (default 'DESC') :param int or None since: the time to start the display (default 0) :param int or None end: the time to end the display (default inf.)
def find_checkpoints(model_path: str, size=4, strategy="best", metric: str = C.PERPLEXITY) -> List[str]: """ Finds N best points from .metrics file according to strategy. :param model_path: Path to model. :param size: Number of checkpoints to combine. :param strategy: Combination strategy. :param metric: Metric according to which checkpoints are selected. Corresponds to columns in model/metrics file. :return: List of paths corresponding to chosen checkpoints. """ maximize = C.METRIC_MAXIMIZE[metric] points = utils.get_validation_metric_points(model_path=model_path, metric=metric) # keep only points for which .param files exist param_path = os.path.join(model_path, C.PARAMS_NAME) points = [(value, checkpoint) for value, checkpoint in points if os.path.exists(param_path % checkpoint)] if strategy == "best": # N best scoring points top_n = _strategy_best(points, size, maximize) elif strategy == "last": # N sequential points ending with overall best top_n = _strategy_last(points, size, maximize) elif strategy == "lifespan": # Track lifespan of every "new best" point # Points dominated by a previous better point have lifespan 0 top_n = _strategy_lifespan(points, size, maximize) else: raise RuntimeError("Unknown strategy, options: best last lifespan") # Assemble paths for params files corresponding to chosen checkpoints # Last element in point is always the checkpoint id params_paths = [ os.path.join(model_path, C.PARAMS_NAME % point[-1]) for point in top_n ] # Report logger.info("Found: " + ", ".join(str(point) for point in top_n)) return params_paths
Finds N best points from .metrics file according to strategy. :param model_path: Path to model. :param size: Number of checkpoints to combine. :param strategy: Combination strategy. :param metric: Metric according to which checkpoints are selected. Corresponds to columns in model/metrics file. :return: List of paths corresponding to chosen checkpoints.
def add_permission_role(self, role, perm_view): """ Add permission-ViewMenu object to Role :param role: The role object :param perm_view: The PermissionViewMenu object """ if perm_view not in role.permissions: try: role.permissions.append(perm_view) self.get_session.merge(role) self.get_session.commit() log.info( c.LOGMSG_INF_SEC_ADD_PERMROLE.format(str(perm_view), role.name) ) except Exception as e: log.error(c.LOGMSG_ERR_SEC_ADD_PERMROLE.format(str(e))) self.get_session.rollback()
Add permission-ViewMenu object to Role :param role: The role object :param perm_view: The PermissionViewMenu object
def set_window_size_callback(window, cbfun): """ Sets the size callback for the specified window. Wrapper for: GLFWwindowsizefun glfwSetWindowSizeCallback(GLFWwindow* window, GLFWwindowsizefun cbfun); """ window_addr = ctypes.cast(ctypes.pointer(window), ctypes.POINTER(ctypes.c_long)).contents.value if window_addr in _window_size_callback_repository: previous_callback = _window_size_callback_repository[window_addr] else: previous_callback = None if cbfun is None: cbfun = 0 c_cbfun = _GLFWwindowsizefun(cbfun) _window_size_callback_repository[window_addr] = (cbfun, c_cbfun) cbfun = c_cbfun _glfw.glfwSetWindowSizeCallback(window, cbfun) if previous_callback is not None and previous_callback[0] != 0: return previous_callback[0]
Sets the size callback for the specified window. Wrapper for: GLFWwindowsizefun glfwSetWindowSizeCallback(GLFWwindow* window, GLFWwindowsizefun cbfun);
def log_request(request: str, trim_log_values: bool = False, **kwargs: Any) -> None: """Log a request""" return log_(request, request_logger, logging.INFO, trim=trim_log_values, **kwargs)
Log a request
def fix_config(self, options): """ Fixes the options, if necessary. I.e., it adds all required elements to the dictionary. :param options: the options to fix :type options: dict :return: the (potentially) fixed options :rtype: dict """ opt = "db_url" if opt not in options: options[opt] = "jdbc:mysql://somehost:3306/somedatabase" if opt not in self.help: self.help[opt] = "The JDBC database URL to connect to (str)." opt = "user" if opt not in options: options[opt] = "user" if opt not in self.help: self.help[opt] = "The database user to use for connecting (str)." opt = "password" if opt not in options: options[opt] = "secret" if opt not in self.help: self.help[opt] = "The password for the database user (str)." opt = "query" if opt not in options: options[opt] = "SELECT * FROM table" if opt not in self.help: self.help[opt] = "The SQL query for generating the dataset (str)." opt = "sparse" if opt not in options: options[opt] = False if opt not in self.help: self.help[opt] = "Whether to return the data in sparse format (bool)." opt = "custom_props" if opt not in options: options[opt] = "" if opt not in self.help: self.help[opt] = "Custom properties filename (str)." return super(LoadDatabase, self).fix_config(options)
Fixes the options, if necessary. I.e., it adds all required elements to the dictionary. :param options: the options to fix :type options: dict :return: the (potentially) fixed options :rtype: dict
def create_raw(self, key, value): """Create method of CRUD operation for raw data. Args: key (string): The variable to write to the DB. value (any): The data to write to the DB. Returns: (string): Result of DB write. """ data = None if key is not None and value is not None: data = self.db.create(key.strip(), value) else: self.tcex.log.warning(u'The key or value field was None.') return data
Create method of CRUD operation for raw data. Args: key (string): The variable to write to the DB. value (any): The data to write to the DB. Returns: (string): Result of DB write.
def call(): """Execute command line helper.""" args = get_arguments() # Set up logging if args.debug: log_level = logging.DEBUG elif args.quiet: log_level = logging.WARN else: log_level = logging.INFO setup_logging(log_level) abode = None if not args.cache: if not args.username or not args.password: raise Exception("Please supply a cache or username and password.") try: # Create abodepy instance. if args.cache and args.username and args.password: abode = abodepy.Abode(username=args.username, password=args.password, get_devices=True, cache_path=args.cache) elif args.cache and not (not args.username or not args.password): abode = abodepy.Abode(get_devices=True, cache_path=args.cache) else: abode = abodepy.Abode(username=args.username, password=args.password, get_devices=True) # Output current mode. if args.mode: _LOGGER.info("Current alarm mode: %s", abode.get_alarm().mode) # Change system mode. if args.arm: if abode.get_alarm().set_mode(args.arm): _LOGGER.info("Alarm mode changed to: %s", args.arm) else: _LOGGER.warning("Failed to change alarm mode to: %s", args.arm) # Set setting for setting in args.set or []: keyval = setting.split("=") if abode.set_setting(keyval[0], keyval[1]): _LOGGER.info("Setting %s changed to %s", keyval[0], keyval[1]) # Switch on for device_id in args.on or []: device = abode.get_device(device_id) if device: if device.switch_on(): _LOGGER.info("Switched on device with id: %s", device_id) else: _LOGGER.warning("Could not find device with id: %s", device_id) # Switch off for device_id in args.off or []: device = abode.get_device(device_id) if device: if device.switch_off(): _LOGGER.info("Switched off device with id: %s", device_id) else: _LOGGER.warning("Could not find device with id: %s", device_id) # Lock for device_id in args.lock or []: device = abode.get_device(device_id) if device: if device.lock(): _LOGGER.info("Locked device with id: %s", device_id) else: _LOGGER.warning("Could not find device with id: %s", device_id) # Unlock for device_id in args.unlock or []: device = abode.get_device(device_id) if device: if device.unlock(): _LOGGER.info("Unlocked device with id: %s", device_id) else: _LOGGER.warning("Could not find device with id: %s", device_id) # Output Json for device_id in args.json or []: device = abode.get_device(device_id) if device: # pylint: disable=protected-access _LOGGER.info(device_id + " JSON:\n" + json.dumps(device._json_state, sort_keys=True, indent=4, separators=(',', ': '))) else: _LOGGER.warning("Could not find device with id: %s", device_id) # Print def _device_print(dev, append=''): _LOGGER.info("%s%s", dev.desc, append) # Print out all automations if args.automations: for automation in abode.get_automations(): _device_print(automation) # Enable automation for automation_id in args.activate or []: automation = abode.get_automation(automation_id) if automation: if automation.set_active(True): _LOGGER.info( "Activated automation with id: %s", automation_id) else: _LOGGER.warning( "Could not find automation with id: %s", automation_id) # Disable automation for automation_id in args.deactivate or []: automation = abode.get_automation(automation_id) if automation: if automation.set_active(False): _LOGGER.info( "Deactivated automation with id: %s", automation_id) else: _LOGGER.warning( "Could not find automation with id: %s", automation_id) # Trigger automation for automation_id in args.trigger or []: automation = abode.get_automation(automation_id) if automation: if automation.trigger(): _LOGGER.info( "Triggered automation with id: %s", automation_id) else: _LOGGER.warning( "Could not find automation with id: %s", automation_id) # Trigger image capture for device_id in args.capture or []: device = abode.get_device(device_id) if device: if device.capture(): _LOGGER.info( "Image requested from device with id: %s", device_id) else: _LOGGER.warning( "Failed to request image from device with id: %s", device_id) else: _LOGGER.warning("Could not find device with id: %s", device_id) # Save camera image for keyval in args.image or []: devloc = keyval.split("=") device = abode.get_device(devloc[0]) if device: try: if (device.refresh_image() and device.image_to_file(devloc[1])): _LOGGER.info( "Saved image to %s for device id: %s", devloc[1], devloc[0]) except AbodeException as exc: _LOGGER.warning("Unable to save image: %s", exc) else: _LOGGER.warning( "Could not find device with id: %s", devloc[0]) # Print out all devices. if args.devices: for device in abode.get_devices(): _device_print(device) def _device_callback(dev): _device_print(dev, ", At: " + time.strftime("%Y-%m-%d %H:%M:%S")) def _timeline_callback(tl_json): event_code = int(tl_json['event_code']) if 5100 <= event_code <= 5199: # Ignore device changes return _LOGGER.info("%s - %s at %s %s", tl_json['event_name'], tl_json['event_type'], tl_json['date'], tl_json['time']) # Print out specific devices by device id. if args.device: for device_id in args.device: device = abode.get_device(device_id) if device: _device_print(device) # Register the specific devices if we decide to listen. abode.events.add_device_callback(device_id, _device_callback) else: _LOGGER.warning( "Could not find device with id: %s", device_id) # Start device change listener. if args.listen: # If no devices were specified then we listen to all devices. if args.device is None: _LOGGER.info("Adding all devices to listener...") for device in abode.get_devices(): abode.events.add_device_callback(device.device_id, _device_callback) abode.events.add_timeline_callback(TIMELINE.ALL, _timeline_callback) _LOGGER.info("Listening for device and timeline updates...") abode.events.start() try: while True: time.sleep(1) except KeyboardInterrupt: abode.events.stop() _LOGGER.info("Device update listening stopped.") except abodepy.AbodeException as exc: _LOGGER.error(exc) finally: if abode: abode.logout()
Execute command line helper.
def idle_all_workers(self): '''Set the global mode to :attr:`IDLE` and wait for workers to stop. This can wait arbitrarily long before returning. The worst case in "normal" usage involves waiting five minutes for a "lost" job to expire; a well-behaved but very-long-running job can extend its own lease further, and this function will not return until that job finishes (if ever). .. deprecated:: 0.4.5 There isn't an obvious use case for this function, and its "maybe wait forever for something out of my control" nature makes it hard to use in real code. Polling all of the work specs and their :meth:`num_pending` in application code if you really needed this operation would have the same semantics and database load. ''' self.set_mode(self.IDLE) while 1: num_pending = dict() for work_spec_name in self.registry.pull(NICE_LEVELS).keys(): num_pending[work_spec_name] = self.num_pending(work_spec_name) if sum(num_pending.values()) == 0: break logger.warn('waiting for pending work_units: %r', num_pending) time.sleep(1)
Set the global mode to :attr:`IDLE` and wait for workers to stop. This can wait arbitrarily long before returning. The worst case in "normal" usage involves waiting five minutes for a "lost" job to expire; a well-behaved but very-long-running job can extend its own lease further, and this function will not return until that job finishes (if ever). .. deprecated:: 0.4.5 There isn't an obvious use case for this function, and its "maybe wait forever for something out of my control" nature makes it hard to use in real code. Polling all of the work specs and their :meth:`num_pending` in application code if you really needed this operation would have the same semantics and database load.
def sample(self, bqm, scalar=None, bias_range=1, quadratic_range=None, ignored_variables=None, ignored_interactions=None, ignore_offset=False, **parameters): """ Scale and sample from the provided binary quadratic model. if scalar is not given, problem is scaled based on bias and quadratic ranges. See :meth:`.BinaryQuadraticModel.scale` and :meth:`.BinaryQuadraticModel.normalize` Args: bqm (:obj:`dimod.BinaryQuadraticModel`): Binary quadratic model to be sampled from. scalar (number): Value by which to scale the energy range of the binary quadratic model. bias_range (number/pair): Value/range by which to normalize the all the biases, or if `quadratic_range` is provided, just the linear biases. quadratic_range (number/pair): Value/range by which to normalize the quadratic biases. ignored_variables (iterable, optional): Biases associated with these variables are not scaled. ignored_interactions (iterable[tuple], optional): As an iterable of 2-tuples. Biases associated with these interactions are not scaled. ignore_offset (bool, default=False): If True, the offset is not scaled. **parameters: Parameters for the sampling method, specified by the child sampler. Returns: :obj:`dimod.SampleSet` """ ignored_variables, ignored_interactions = _check_params( ignored_variables, ignored_interactions) child = self.child bqm_copy = _scaled_bqm(bqm, scalar, bias_range, quadratic_range, ignored_variables, ignored_interactions, ignore_offset) response = child.sample(bqm_copy, **parameters) return _scale_back_response(bqm, response, bqm_copy.info['scalar'], ignored_variables, ignored_interactions, ignore_offset)
Scale and sample from the provided binary quadratic model. if scalar is not given, problem is scaled based on bias and quadratic ranges. See :meth:`.BinaryQuadraticModel.scale` and :meth:`.BinaryQuadraticModel.normalize` Args: bqm (:obj:`dimod.BinaryQuadraticModel`): Binary quadratic model to be sampled from. scalar (number): Value by which to scale the energy range of the binary quadratic model. bias_range (number/pair): Value/range by which to normalize the all the biases, or if `quadratic_range` is provided, just the linear biases. quadratic_range (number/pair): Value/range by which to normalize the quadratic biases. ignored_variables (iterable, optional): Biases associated with these variables are not scaled. ignored_interactions (iterable[tuple], optional): As an iterable of 2-tuples. Biases associated with these interactions are not scaled. ignore_offset (bool, default=False): If True, the offset is not scaled. **parameters: Parameters for the sampling method, specified by the child sampler. Returns: :obj:`dimod.SampleSet`
def safe_search_detection( self, image, max_results=None, retry=None, timeout=None, additional_properties=None ): """ For the documentation see: :py:class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionDetectImageSafeSearchOperator` """ client = self.annotator_client self.log.info("Detecting safe search") if additional_properties is None: additional_properties = {} response = client.safe_search_detection( image=image, max_results=max_results, retry=retry, timeout=timeout, **additional_properties ) response = MessageToDict(response) self._check_for_error(response) self.log.info("Safe search detection finished") return response
For the documentation see: :py:class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionDetectImageSafeSearchOperator`
def salt_call(): ''' Directly call a salt command in the modules, does not require a running salt minion to run. ''' import salt.cli.call if '' in sys.path: sys.path.remove('') client = salt.cli.call.SaltCall() _install_signal_handlers(client) client.run()
Directly call a salt command in the modules, does not require a running salt minion to run.
def download_file_content(self, file_id, etag=None): '''Download file content. Args: file_id (str): The UUID of the file whose content is requested etag (str): If the content is not changed since the provided ETag, the content won't be downloaded. If the content is changed, it will be downloaded and returned with its new ETag. Note: ETags should be enclosed in double quotes:: my_etag = '"71e1ed9ee52e565a56aec66bc648a32c"' Returns: A tuple of ETag and content (etag, content) if the content was retrieved. If an etag was provided, and content didn't change returns (None, None):: ('"71e1ed9ee52e565a56aec66bc648a32c"', 'Hello world!') Raises: StorageArgumentException: Invalid arguments StorageForbiddenException: Server response code 403 StorageNotFoundException: Server response code 404 StorageException: other 400-600 error codes ''' if not is_valid_uuid(file_id): raise StorageArgumentException( 'Invalid UUID for file_id: {0}'.format(file_id)) headers = {'Accept': '*/*'} if etag: headers['If-None-Match'] = etag resp = self._authenticated_request \ .to_endpoint('file/{}/content/'.format(file_id)) \ .with_headers(headers) \ .get() if resp.status_code == 304: return (None, None) if 'ETag' not in resp.headers: raise StorageException('No ETag received from the service with the download') return (resp.headers['ETag'], resp.content)
Download file content. Args: file_id (str): The UUID of the file whose content is requested etag (str): If the content is not changed since the provided ETag, the content won't be downloaded. If the content is changed, it will be downloaded and returned with its new ETag. Note: ETags should be enclosed in double quotes:: my_etag = '"71e1ed9ee52e565a56aec66bc648a32c"' Returns: A tuple of ETag and content (etag, content) if the content was retrieved. If an etag was provided, and content didn't change returns (None, None):: ('"71e1ed9ee52e565a56aec66bc648a32c"', 'Hello world!') Raises: StorageArgumentException: Invalid arguments StorageForbiddenException: Server response code 403 StorageNotFoundException: Server response code 404 StorageException: other 400-600 error codes
def unzip(seq, elem_len=None): """Unzip a length n sequence of length m sequences into m seperate length n sequences. Parameters ---------- seq : iterable[iterable] The sequence to unzip. elem_len : int, optional The expected length of each element of ``seq``. If not provided this will be infered from the length of the first element of ``seq``. This can be used to ensure that code like: ``a, b = unzip(seq)`` does not fail even when ``seq`` is empty. Returns ------- seqs : iterable[iterable] The new sequences pulled out of the first iterable. Raises ------ ValueError Raised when ``seq`` is empty and ``elem_len`` is not provided. Raised when elements of ``seq`` do not match the given ``elem_len`` or the length of the first element of ``seq``. Examples -------- >>> seq = [('a', 1), ('b', 2), ('c', 3)] >>> cs, ns = unzip(seq) >>> cs ('a', 'b', 'c') >>> ns (1, 2, 3) # checks that the elements are the same length >>> seq = [('a', 1), ('b', 2), ('c', 3, 'extra')] >>> cs, ns = unzip(seq) Traceback (most recent call last): ... ValueError: element at index 2 was length 3, expected 2 # allows an explicit element length instead of infering >>> seq = [('a', 1, 'extra'), ('b', 2), ('c', 3)] >>> cs, ns = unzip(seq, 2) Traceback (most recent call last): ... ValueError: element at index 0 was length 3, expected 2 # handles empty sequences when a length is given >>> cs, ns = unzip([], elem_len=2) >>> cs == ns == () True Notes ----- This function will force ``seq`` to completion. """ ret = tuple(zip(*_gen_unzip(map(tuple, seq), elem_len))) if ret: return ret if elem_len is None: raise ValueError("cannot unzip empty sequence without 'elem_len'") return ((),) * elem_len
Unzip a length n sequence of length m sequences into m seperate length n sequences. Parameters ---------- seq : iterable[iterable] The sequence to unzip. elem_len : int, optional The expected length of each element of ``seq``. If not provided this will be infered from the length of the first element of ``seq``. This can be used to ensure that code like: ``a, b = unzip(seq)`` does not fail even when ``seq`` is empty. Returns ------- seqs : iterable[iterable] The new sequences pulled out of the first iterable. Raises ------ ValueError Raised when ``seq`` is empty and ``elem_len`` is not provided. Raised when elements of ``seq`` do not match the given ``elem_len`` or the length of the first element of ``seq``. Examples -------- >>> seq = [('a', 1), ('b', 2), ('c', 3)] >>> cs, ns = unzip(seq) >>> cs ('a', 'b', 'c') >>> ns (1, 2, 3) # checks that the elements are the same length >>> seq = [('a', 1), ('b', 2), ('c', 3, 'extra')] >>> cs, ns = unzip(seq) Traceback (most recent call last): ... ValueError: element at index 2 was length 3, expected 2 # allows an explicit element length instead of infering >>> seq = [('a', 1, 'extra'), ('b', 2), ('c', 3)] >>> cs, ns = unzip(seq, 2) Traceback (most recent call last): ... ValueError: element at index 0 was length 3, expected 2 # handles empty sequences when a length is given >>> cs, ns = unzip([], elem_len=2) >>> cs == ns == () True Notes ----- This function will force ``seq`` to completion.
def encode(self, key): """Encodes a user key into a particular format. The result of this method will be used by swauth for storing user credentials. If salt is not manually set in conf file, a random salt will be generated and used. :param key: User's secret key :returns: A string representing user credentials """ salt = self.salt or os.urandom(32).encode('base64').rstrip() return self.encode_w_salt(salt, key)
Encodes a user key into a particular format. The result of this method will be used by swauth for storing user credentials. If salt is not manually set in conf file, a random salt will be generated and used. :param key: User's secret key :returns: A string representing user credentials
def with_timeout( timeout: Union[float, datetime.timedelta], future: _Yieldable, quiet_exceptions: "Union[Type[Exception], Tuple[Type[Exception], ...]]" = (), ) -> Future: """Wraps a `.Future` (or other yieldable object) in a timeout. Raises `tornado.util.TimeoutError` if the input future does not complete before ``timeout``, which may be specified in any form allowed by `.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or an absolute time relative to `.IOLoop.time`) If the wrapped `.Future` fails after it has timed out, the exception will be logged unless it is of a type contained in ``quiet_exceptions`` (which may be an exception type or a sequence of types). The wrapped `.Future` is not canceled when the timeout expires, permitting it to be reused. `asyncio.wait_for` is similar to this function but it does cancel the wrapped `.Future` on timeout. .. versionadded:: 4.0 .. versionchanged:: 4.1 Added the ``quiet_exceptions`` argument and the logging of unhandled exceptions. .. versionchanged:: 4.4 Added support for yieldable objects other than `.Future`. """ # It's tempting to optimize this by cancelling the input future on timeout # instead of creating a new one, but A) we can't know if we are the only # one waiting on the input future, so cancelling it might disrupt other # callers and B) concurrent futures can only be cancelled while they are # in the queue, so cancellation cannot reliably bound our waiting time. future_converted = convert_yielded(future) result = _create_future() chain_future(future_converted, result) io_loop = IOLoop.current() def error_callback(future: Future) -> None: try: future.result() except Exception as e: if not isinstance(e, quiet_exceptions): app_log.error( "Exception in Future %r after timeout", future, exc_info=True ) def timeout_callback() -> None: if not result.done(): result.set_exception(TimeoutError("Timeout")) # In case the wrapped future goes on to fail, log it. future_add_done_callback(future_converted, error_callback) timeout_handle = io_loop.add_timeout(timeout, timeout_callback) if isinstance(future_converted, Future): # We know this future will resolve on the IOLoop, so we don't # need the extra thread-safety of IOLoop.add_future (and we also # don't care about StackContext here. future_add_done_callback( future_converted, lambda future: io_loop.remove_timeout(timeout_handle) ) else: # concurrent.futures.Futures may resolve on any thread, so we # need to route them back to the IOLoop. io_loop.add_future( future_converted, lambda future: io_loop.remove_timeout(timeout_handle) ) return result
Wraps a `.Future` (or other yieldable object) in a timeout. Raises `tornado.util.TimeoutError` if the input future does not complete before ``timeout``, which may be specified in any form allowed by `.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or an absolute time relative to `.IOLoop.time`) If the wrapped `.Future` fails after it has timed out, the exception will be logged unless it is of a type contained in ``quiet_exceptions`` (which may be an exception type or a sequence of types). The wrapped `.Future` is not canceled when the timeout expires, permitting it to be reused. `asyncio.wait_for` is similar to this function but it does cancel the wrapped `.Future` on timeout. .. versionadded:: 4.0 .. versionchanged:: 4.1 Added the ``quiet_exceptions`` argument and the logging of unhandled exceptions. .. versionchanged:: 4.4 Added support for yieldable objects other than `.Future`.
def dr( self, r1, r2, cutoff=None ): """ Calculate the distance between two fractional coordinates in the cell. Args: r1 (np.array): fractional coordinates for position 1. r2 (np.array): fractional coordinates for position 2. cutoff (optional:Bool): If set, returns None for distances greater than the cutoff. Default None (unset). Returns: (float): the distance between r1 and r2. """ delta_r_cartesian = ( r1 - r2 ).dot( self.matrix ) delta_r_squared = sum( delta_r_cartesian**2 ) if cutoff != None: cutoff_squared = cutoff ** 2 if delta_r_squared > cutoff_squared: return None return( math.sqrt( delta_r_squared ) )
Calculate the distance between two fractional coordinates in the cell. Args: r1 (np.array): fractional coordinates for position 1. r2 (np.array): fractional coordinates for position 2. cutoff (optional:Bool): If set, returns None for distances greater than the cutoff. Default None (unset). Returns: (float): the distance between r1 and r2.
def _is_accepted_input(self, input_string): """ vlc input filtering """ ret = False accept_filter = (self.volume_string, "http stream debug: ") reject_filter = () for n in accept_filter: if n in input_string: ret = True break if ret: for n in reject_filter: if n in input_string: ret = False break return ret
vlc input filtering
def main(print_cfg): """Main function which parses user options and generate jobs accordingly""" parser = argparse.ArgumentParser(description="mvmany.py -- MVtest helper script.", epilog=""" mvmany.py should be run with all arguments you would pass to mvtest.py, except for those which are specific to a single node's settings (such as chromosome and BP range). It will then generate a number of scripts which can be run simultaneously on a cluster. Please be aware that this script does not attempt to adjust walltimes according to size, and that the user should choose a walltime that is long enough to allow the largest run to complete. """) parser.add_argument("-v", action="store_true", help="Print version number") parser.add_argument("--mvpath", type=str, default="mvtest.py", help="The path to mvtest.py if it's not found in PATH") parser.add_argument("--logpath", type=str, default="log", help="Where to write the logs to") parser.add_argument("--res-path", type=str, default="result", help="Where to write the results to") parser.add_argument("--script-path", type=str, default="scripts", help="Where to write the various scripts to") parser.add_argument("--template", type=argparse.FileType('r'), help="File containing cluster related information that will be written as header to the various files") parser.add_argument("--snps-per-job", type=float, default=1.0, help="Allow the user to .") parser.add_argument("--mem", type=str, default="2G", help="Memory required during runs") parser.add_argument("--walltime", type=str, default="3:00:00", help="Walltime to use") # Most of these will just be passed on directly to the various MVTest scripts. parser.add_argument("--exclude", type=str, help="Comma-delimited list of rsids to be excluded") parser.add_argument("--keep", type=str, help="Comma-delimited list of individuals to be analyzed") parser.add_argument("--remove", type=str, help="Comma-delimited list of individuals to be removed from analysis") parser.add_argument("--file", type=str, help="Prefix for .ped and .map files") parser.add_argument("--ped", type=argparse.FileType('r'), help="PLINK compatible .ped file") parser.add_argument("--map", type=argparse.FileType('r'), help="PLINK compatible .map file") parser.add_argument("--map3", action='store_true', help="MAP file has only 3 columns") parser.add_argument("--no-sex", action='store_true', help="Pedigree file doesn't have column 5 (sex)") parser.add_argument("--no-parents", action="store_true", help="Pedigree file doesn't have columns 3 and 4 (parents)") parser.add_argument("--no-fid", action="store_true", help="Pedigree file doesn't have column 1 (family ID)") parser.add_argument("--no-pheno", action="store_true", help="Pedigree file doesn't have column 6 (phenotype") parser.add_argument("--liability", action="store_true", help="Pedigree file has column 7 (liability)") parser.add_argument("--bfile", type=str, help="Prefix for .bed, .bim and .fam files") parser.add_argument("--bed", type=argparse.FileType('r'), help="Binary Ped file (.bed)") parser.add_argument("--bim", type=argparse.FileType('r'), help="Binary ped marker file (.bim)") parser.add_argument("--fam", type=argparse.FileType('r'), help="Binary ped family file (.fam)") parser.add_argument("--tfile", type=str, help="Prefix for .tped and .tfam files") parser.add_argument("--tped", type=argparse.FileType('r'), help="Transposed Pedigree file (.tped)") parser.add_argument("--tfam", type=argparse.FileType('r'), help="Transposed pedigre Family file (.tfam)") parser.add_argument("--compressed", action="store_true", help="Ped/TPed compressed with gzip (named .ped.tgz or .tped.tgz)") parser.add_argument("--impute", type=argparse.FileType('r'), help="File containing list of impute output for analysis") parser.add_argument("--impute-fam", type=argparse.FileType('r'), help="File containing family details for impute data") parser.add_argument("--impute-count", type=int, help="Number of impute files to process on a single node") parser.add_argument("--impute-uncompressed", action="store_true", help="Indicate that the impute input is not gzipped, but plain text") parser.add_argument("--impute-encoding", type=str, choices=['additive', 'dominant', 'recessive', 'genotype'], help='Genetic model to be used') parser.add_argument("--impute-info-ext", type=str, help="Portion of filename denotes info filename") parser.add_argument("--impute-gen-ext", type=str, help="Portion of filename that denotes gen file") parser.add_argument("--impute-info-thresh", type=float, help="Threshold for filtering imputed SNPs with poor 'info' values") parser.add_argument("--impute-files-per-job", type=int, help="How many gen files to run per job") parser.add_argument("--mach", type=argparse.FileType('r'), help="File containing list of MACH output for analysis") parser.add_argument("--mach-uncompressed", action="store_true", help="Indicate that the mach input is not gzipped") parser.add_argument("--mach-info-ext", type=str, help="Portion of filename denotes info filenames") parser.add_argument("--mach-dose-ext", type=str, help="Portion of filename that denotes dose files") parser.add_argument("--mach-min-rsquared", type=float, help="Filter out loci with RSquared < this value") parser.add_argument("--mach-files-per-job", type=int, help="How many dosage files to run per job") parser.add_argument("--mach-chunk-size", type=int, help="How many loci to keep in memory at once") parser.add_argument("--mach-count", type=int, help="Number of mach files to process for each job") parser.add_argument("--pheno", type=argparse.FileType('r'), help="File containing phenotypes") parser.add_argument("--sample-pheno", type=argparse.FileType('r'), help="(Mach) Sample file containing phenotypes") parser.add_argument("--mphenos", type=str, help="Column number(s) for phenotype to be analyzed if number of columns > 1") parser.add_argument("--pheno-names", type=str, help="Name for phenotype(s) to be analyzed (must be in --pheno file)") parser.add_argument("--all-pheno", action="store_true", help="Analyze all columns from the phenotype file") parser.add_argument("--covar", type=argparse.FileType('r'), help="File containing covariates") parser.add_argument("--sample-covar", type=argparse.FileType('r'), help="(Mach) Sample file containing covariates") parser.add_argument("--covar-numbers", type=str, help="Comma-separated list of covariate indices") parser.add_argument("--covar-names", type=str, help="Comma-separated list of covariate names") parser.add_argument("--sex", action='store_true', help="Use sex from the pedigree file as a covariate") parser.add_argument("--missing-phenotype", type=float, help="Encoding for missing phenotypes") parser.add_argument("--maf", type=float, help="Minimum MAF allowed for analysis") parser.add_argument("--max-maf", type=float, help="MAX MAF allowed for analysis") parser.add_argument("--geno", type=float, help="MAX per-SNP missing for analysis") parser.add_argument("--mind", type=float, help="MAX per-person missing") parser.set_defaults(all_pheno=False, sex=False) args = parser.parse_args() # Report version, if requested, and exit if args.v: print >> sys.stderr, "%s: %s" % (os.path.basename(__file__), __version__) sys.exit(0) mkdir(args.script_path) mkdir(args.res_path) mkdir(args.logpath) if args.mach: args.mach = args.mach.name if args.impute: args.impute = args.impute.name if args.impute_fam: args.impute_fam = args.impute_fam.name if args.covar: args.covar = args.covar.name if args.pheno: args.pheno = args.pheno.name general_arguments = [] for flag in ("exclude,keep,remove,file,ped,map,map3,no-sex,no-parents,no-fid,no-pheno,liability," + \ "bfile,bed,bim,fam,tfile,tped,tfam,compressed," + \ "impute,impute-fam,impute-uncompresed,impute-encoding,impute-info-ext,impute-gen-ext,impute-info-thresh," +\ "mach,mach-uncompressed,mach-info-ext,mach-dose-ext,mach-min-rsquared,mach-chunk-size," +\ "pheno,sample-pheno,pheno-names,mphenos,all-pheno," + \ "covar,sample-covar,covar-numbers,covar-names,sex,missing-phenotype,maf,max-maf,gen,mind").split(","): check_and_append(args, flag, general_arguments) args.template = get_template_file(args) job_list = None map_file = None impute_file_list = None if args.file: map_file = "%s.map" % (args.file) elif args.map: map_file = args.map elif args.bfile: map_file = "%s.bim" % (args.bfile) elif args.bim: map_file = args.bim elif args.tfile: map_file = "%s.tped" elif args.tped: map_file = args.tped elif args.impute: impute_file_list = args.impute max_snp_count = args.snps_per_job if map_file is not None: job_list = split_chrom_jobs(args, map_file) elif args.impute is not None: job_list = split_impute_jobs(args, args.impute) elif args.mach is not None: job_list = split_mach_jobs(args, args.mach) if job_list is None or len(job_list) == 0: parser.print_usage(sys.stderr) print >> sys.stderr, "\nThere were not jobs created. Did you specify the necessary data?" else: generate_jobs(args, job_list, " ".join(general_arguments)) sys.exit(0)
Main function which parses user options and generate jobs accordingly
def from_spec(spec, kwargs=None): """ Creates a distribution from a specification dict. """ distribution = util.get_object( obj=spec, predefined_objects=tensorforce.core.distributions.distributions, kwargs=kwargs ) assert isinstance(distribution, Distribution) return distribution
Creates a distribution from a specification dict.
def sync(self, vault_client, opt): """Synchronizes the context to the Vault server. This has the effect of updating every resource which is in the context and has changes pending.""" active_mounts = [] for audit_log in self.logs(): audit_log.sync(vault_client) # Handle policies only on the first pass. This allows us # to ensure that ACL's are in place prior to actually # making any changes. not_policies = self.sync_policies(vault_client) # Handle auth wrapper resources on the next path. The resources # may update a path on their own. They may also provide mount # tuning information. not_auth = self.sync_auth(vault_client, not_policies) # Handle mounts only on the next pass. This allows us to # ensure that everything is in order prior to actually # provisioning secrets. Note we handle removals before # anything else, allowing us to address mount conflicts. active_mounts, not_mounts = self.sync_mounts(active_mounts, not_auth, vault_client) # Now handle everything else. If "best practices" are being # adhered to then every generic mountpoint should exist by now. # We handle "child" resources after the first batch sorted_resources = sorted(not_mounts, key=childless_first) for resource in sorted_resources: resource.sync(vault_client) for mount in self.mounts(): if not find_backend(mount.path, active_mounts): mount.unmount(vault_client) if opt.remove_unknown: self.prune(vault_client)
Synchronizes the context to the Vault server. This has the effect of updating every resource which is in the context and has changes pending.
def is_supported(cls, file=None, request=None, response=None, url_info=None): '''Given the hints, return whether the document is supported. Args: file: A file object containing the document. request (:class:`.http.request.Request`): An HTTP request. response (:class:`.http.request.Response`): An HTTP response. url_info (:class:`.url.URLInfo`): A URLInfo. Returns: bool: If True, the reader should be able to read it. ''' tests = ( (response, cls.is_response), (file, cls.is_file), (request, cls.is_request), (url_info, cls.is_url) ) for instance, method in tests: if instance: try: result = method(instance) except NotImplementedError: pass else: if result: return True elif result is VeryFalse: return VeryFalse
Given the hints, return whether the document is supported. Args: file: A file object containing the document. request (:class:`.http.request.Request`): An HTTP request. response (:class:`.http.request.Response`): An HTTP response. url_info (:class:`.url.URLInfo`): A URLInfo. Returns: bool: If True, the reader should be able to read it.
def read_sla(self,filename,params=None,force=False,timerange=None,datatype=None,**kwargs): """ Read AVISO Along-Track products :return outStr: Output data structure containing all recorded parameters as specificied by NetCDF file PARAMETER list. :author: Renaud Dussurget """ from time import time from datetime import timedelta # a = time() self.message(2,'Reading AVISO DT data ({0})'.format(datatype)) #Open file self._filename = filename try: self._ncfile = ncfile(self._filename, "r") except Exception,e: self.warning(1, repr(e)) return {} #Get delimiter if os.path.basename(filename).count('.') > os.path.basename(filename).count('_'): delim='.' else : delim = '_' #Gat sat name splitted=os.path.basename(filename).split(delim) if len(splitted) > 3 : if (datatype == 'DT') | (datatype == 'NRT') : sat_name = splitted[2] if splitted[0] == 'nrt' else splitted[3] elif datatype == 'PISTACH' : sat_name = 'J2' else : sat_name = 'J2' else : sat_name="N/A" #Get list of recorded parameters: par_list=[i.encode() for i in self._ncfile.variables.keys()] for i in ['BeginDates','Longitudes','Latitudes'] : par_list.pop(par_list.index(i)) nparam=len(par_list) self.message(2,'Recorded parameters : '+str(nparam)+' -> '+str(par_list)) lon = self.load_ncVar('Longitudes',**kwargs) lon['data'] = recale(lon['data'], degrees=True, zero_2pi=True) #shift longitudes lat = self.load_ncVar('Latitudes',**kwargs) # lon = self.load_ncVar('Longitudes',Longitudes=(self.limit[1],self.limit[3]),**kwargs) # lon['data'] = recale(lon['data'], degrees=True, zero_2pi=True) #shift longitudes # lat = self.load_ncVar('Latitudes',Latitudes=(self.limit[0],self.limit[1]),**kwargs) #Extract within limits ind, flag = in_limits(lon['data'],lat['data'],limit=self.limit) dim_lon = lon['_dimensions'] lat = lat['data'].compress(flag) lon = lon['data'].compress(flag) # dist=cumulative_distance(lat, lon) sz=np.shape(lon) ndims=np.size(sz) #Get dates stDate = self.load_ncVar('BeginDates',**kwargs)['data'] dumVar = self.load_ncVar('Cycles',**kwargs) nbCyc = dumVar['data'] Ncycs = dumVar['_dimensions']['Cycles'] Ntra = dumVar['_dimensions']['Tracks'] nbTra = self.load_ncVar('Tracks',**kwargs)['data'] # if np.size(stDate) == 1 : stDate. DeltaT = self._ncfile.variables['DeltaT'][:] / 86400. #* self._ncfile.variables['DeltaT'].scale_factor npts = self.load_ncVar('NbPoints',**kwargs)['data'] dumind=np.cumsum(npts) #Loop 1 # date=np.ma.array([],mask=[]) # cycles=np.ma.array([],mask=[]) # tracks=np.ma.array([],mask=[]) # for i in xrange(Ncycs) : # np.ma.concatenate((nbTra,nbTra)) # # for i,nc in enumerate(nbCyc.data.flatten()): # N=npts[i] # curInd=np.array(list(set(xrange(dumind[i]-N,dumind[i]) if N > 0 else []).intersection(ind))) # ncur=len(curInd) # date=np.ma.concatenate((date,(curInd - dumind[0])*DeltaT+stDate.flatten()[i])) # cycles=np.ma.concatenate((cycles,np.ma.array((nbCyc.data.flatten()[i],)*ncur)))#,mask=np.repeat(nbCyc.mask[i][j],ncur)))) # tracks=np.ma.concatenate((tracks,np.ma.array((nbTra.data.flatten()[i],)*ncur))) #Loop 2 date = () cycles = () tracks = () # rowind = (0,)*Ntra # nind=0 # for i in xrange(Ncycs): nind+=(npts*(~nbCyc.mask.T[i])).sum() indcopy=ind.copy() # npts_copy=npts.copy() npts[npts.mask]=0 dumind[dumind.mask]=0 nbTra_copy=nbTra.copy() toto=npts.copy() concat_npts = not( nbCyc.shape[-1] > 1) #loop over cycles for i in np.arange(1,Ncycs,1.0,dtype=int) : nbTra=np.ma.concatenate((nbTra,nbTra_copy)) if concat_npts : npts=np.ma.concatenate((npts,tuple((~nbCyc.T[i].mask)*1*npts))) # rowind+=(i,)*Ntra if concat_npts: npts=npts.reshape(nbCyc.shape[::-1]).T else : npts=nbCyc nbTra=nbTra.reshape(nbCyc.shape[::-1]).T # rowind=np.reshape(rowind,nbCyc.shape[::-1]).T # npts.mask=nbCyc.mask nbTra.mask=nbCyc.mask npts=npts.flatten() nbTra=nbTra.flatten() # rowind=rowind.flatten() nbCyc_flatten=nbCyc.flatten() nbTra_flatten=nbTra.flatten() stDate_flatten=stDate.flatten() # nind=0 outInd=[] for i,nc in enumerate(nbCyc.data.flatten()): N=npts[i] Nprev=npts[i-Ncycs] if i >= (Ncycs) and np.remainder(float(i),Ncycs) == 0 else 0 indcopy-=Nprev #if rowind[i] == 0 else 0 curInd=tuple(sorted(set(xrange(N) if N > 0 else []).intersection(indcopy))) ncur=len(curInd) # nind+=ncur outInd+=map(operator.sub, curInd,(( (curInd[0] if len(curInd) > 0 else 0) - (outInd[-1] +1 if len(outInd) > 0 else 0) - len(ind)*(np.remainder(float(i),Ncycs)),)*ncur)) curInd=tuple(map(operator.mul, curInd, (DeltaT,)*ncur)) date+=tuple(map(operator.add, curInd, (stDate_flatten[i],)*ncur)) cycles+=(nbCyc_flatten[i],)*ncur tracks+=(nbTra_flatten[i],)*ncur date=np.ma.masked_array(date,mask=False) cycles=np.ma.masked_array(cycles,mask=False) tracks=np.ma.masked_array(tracks,mask=False) #Loop 3 # date=np.ma.array([],mask=[]) # cycles=np.ma.array([],mask=[]) # tracks=np.ma.array([],mask=[]) # for j in xrange(Ncycs) : # for i,N in enumerate(npts.data) : ## curFg=(ind >= dumind[i]-N) & (ind <= dumind[i]) # curInd=np.array(list(set(xrange(dumind[i]-N,dumind[i]) if N > 0 else []).intersection(ind))) # ncur=len(curInd) # date=np.ma.concatenate((date,(curInd - dumind[0])*DeltaT+stDate[i][j])) # cycles=np.ma.concatenate((cycles,np.ma.array((nbCyc.data[i][j],)*ncur)))#,mask=np.repeat(nbCyc.mask[i][j],ncur)))) # tracks=np.ma.concatenate((tracks,np.ma.array((nbTra.data[i],)*ncur)))#,mask=np.repeat(nbCyc.mask[i][j],ncur)))) outInd=np.array(outInd,dtype=int) #Check output index # print outInd.shape[0],npts.cumsum().max() nt=len(date) date.mask=(False,)*nt cycles.mask=date.mask tracks.mask=date.mask # date=date.reshape((Ncycs,)+(npts.sum(),)).T # mask=date.mask # date=date.compressed() # cycles=cycles.reshape((Ncycs,)+(npts.sum(),)).T.compressed() # tracks=tracks.reshape((Ncycs,)+(npts.sum(),)).T.compressed() # lon=np.repeat(lon,Ncycs) # lat=np.repeat(lat,Ncycs) # mask=~lon.mask dimStr = dim_lon dimStr.pop('Data') nrec=len(date) dimStr.update({'time':nrec}) for i in ['DeltaT','NbPoints','Cycles','Tracks','DataIndexes'] : par_list.pop(par_list.index(i)) outStr={'_dimensions':dimStr, 'lon':lon, 'lat':lat, 'date':date, 'cycle':cycles, 'track':tracks} for param in par_list : a = time() dumVar = self.load_ncVar(param,Data=ind,**kwargs) #Load variables runtime = time() - a # print 'runtime:', timedelta(seconds=runtime) dimStr=dumVar['_dimensions'] dimStr.pop('Cycles') dimStr.pop('Data') dimStr['time']=nrec dimStr['_ndims']=len(dimStr.keys())-1 #update dimensions curDim = [str(dimname) for dimname in dimStr.keys()[1:]] #[str(dimname) for dimname in self._ncfile.variables['LONGITUDE'].dimensions] curDimval = [dimStr[dim] for dim in curDim] #[len(self._ncfile.dimensions[dimname]) for dimname in curDim] flag = [(np.array(dimname) == outStr['_dimensions'].keys()).sum() == 0 for dimname in curDim] #find dimensions to update dimUpdate = np.array(curDim).compress(flag) for enum in enumerate(dimUpdate) : self.message(3, 'Appending dimensions {0}:{1} to dataStructure'.format(enum[1],np.array(curDimval).compress(flag)[enum[0]])) outStr['_dimensions'].update({enum[1]:np.array(curDimval).compress(flag)[enum[0]]}) #Append new dimension outStr['_dimensions']['_ndims']+=1 #update dimension counts # dumStr = {param.lower() : dumVar['data']} dumStr = {param.lower() : dumVar['data'].flatten()[outInd]} # cmd = 'dumStr = {\''+param.lower()+'\':dumVar[\'data\']}' # self.message(4, 'exec : '+cmd) # exec(cmd) outStr.update(dumStr) id=np.repeat(sat_name,outStr['_dimensions']['time']) outStr.update({'id':id}) self._ncfile.close() #Checkit [len(outStr[k]) for k in outStr.keys()] return outStr
Read AVISO Along-Track products :return outStr: Output data structure containing all recorded parameters as specificied by NetCDF file PARAMETER list. :author: Renaud Dussurget
def outputMode(self, outputMode): """Specifies how data of a streaming DataFrame/Dataset is written to a streaming sink. Options include: * `append`:Only the new rows in the streaming DataFrame/Dataset will be written to the sink * `complete`:All the rows in the streaming DataFrame/Dataset will be written to the sink every time these is some updates * `update`:only the rows that were updated in the streaming DataFrame/Dataset will be written to the sink every time there are some updates. If the query doesn't contain aggregations, it will be equivalent to `append` mode. .. note:: Evolving. >>> writer = sdf.writeStream.outputMode('append') """ if not outputMode or type(outputMode) != str or len(outputMode.strip()) == 0: raise ValueError('The output mode must be a non-empty string. Got: %s' % outputMode) self._jwrite = self._jwrite.outputMode(outputMode) return self
Specifies how data of a streaming DataFrame/Dataset is written to a streaming sink. Options include: * `append`:Only the new rows in the streaming DataFrame/Dataset will be written to the sink * `complete`:All the rows in the streaming DataFrame/Dataset will be written to the sink every time these is some updates * `update`:only the rows that were updated in the streaming DataFrame/Dataset will be written to the sink every time there are some updates. If the query doesn't contain aggregations, it will be equivalent to `append` mode. .. note:: Evolving. >>> writer = sdf.writeStream.outputMode('append')
def parse_url(cls, string): # pylint: disable=redefined-outer-name """ If it can be parsed as a version_guid with no preceding org + offering, returns a dict with key 'version_guid' and the value, If it can be parsed as a org + offering, returns a dict with key 'id' and optional keys 'branch' and 'version_guid'. Raises: InvalidKeyError: if string cannot be parsed -or- string ends with a newline. """ match = cls.URL_RE.match(string) if not match: raise InvalidKeyError(cls, string) return match.groupdict()
If it can be parsed as a version_guid with no preceding org + offering, returns a dict with key 'version_guid' and the value, If it can be parsed as a org + offering, returns a dict with key 'id' and optional keys 'branch' and 'version_guid'. Raises: InvalidKeyError: if string cannot be parsed -or- string ends with a newline.
def color_msg(msg, color): " Return colored message " return ''.join((COLORS.get(color, COLORS['endc']), msg, COLORS['endc']))
Return colored message
def blast_records_to_object(blast_records): """Transforms biopython's blast record into blast object defined in django-blastplus app. """ # container for transformed objects blast_objects_list = [] for blast_record in blast_records: br = BlastRecord(**{'query': blast_record.query, 'version': blast_record.version, 'expect': blast_record.expect, 'application': blast_record.application, 'reference': blast_record.reference}) for alignment in blast_record.alignments: al = Alignment(**{ 'hit_def': alignment.hit_def, 'title': alignment.title, 'length': alignment.length, }) for hsp in alignment.hsps: h = Hsp(**{ 'align_length': hsp.align_length, 'bits': hsp.bits, 'expect': hsp.expect, 'frame': hsp.frame, 'gaps': hsp.gaps, 'identities': hsp.identities, 'match': hsp.match, 'num_alignments': hsp.num_alignments, 'positives': hsp.positives, 'query': hsp.query, 'query_end': hsp.query_end, 'query_start': hsp.query_start, 'sbjct': hsp.sbjct, 'sbjct_end': hsp.sbjct_end, 'sbjct_start': hsp.sbjct_start, 'score': hsp.score, 'strand': hsp.strand, 'str': str(hsp), }) al.hsp_list.append(h) br.alignments.append(al) blast_objects_list.append(br) return blast_objects_list
Transforms biopython's blast record into blast object defined in django-blastplus app.
def htmlFromThing(thing,title): """create pretty formatted HTML from a things dictionary.""" try: thing2 = copy.copy(thing) except: print("crashed copying the thing! I can't document it.") return False stuff=analyzeThing(thing2) names2=list(stuff.keys()) for i,name in enumerate(names2): if name.startswith("_"): names2[i]="zzzzzzzzzz"+name html="""<html><head><style> body {font-family: courier, monospace;} .name {font-weight: bold;} .type {font-style: italic; font-family: serif; color: #AAA;} .desc {} .itemEval {background-color: #DDFFDD;} .itemEvalFail {} table {font-size: .8em; margin-top: 20px; border-collapse: collapse;} tr {border: 1px solid #CCC; vertical-align: text-top;} td {padding: 2px 10px 2px 10px;} .credits {text-align: center; opacity: 0.5; margin-top: 50px; font-size: .8em; font-family: sans-serif;} </style></head><body>""" if title: html+='<span style="color: #CCC;">title: </span>%s<br>'%title textTitle="" textType="" try: textTitle=websafe(str(thing)) textType=websafe(type(thing).__name__) except: pass html+='<span style="color: #CCC;">value: </span>%s<br>'%textTitle html+='<span style="color: #CCC;">&nbsp;type: </span>%s<br>'%textType html+='<table cellpadding=3 align="center">' html+='<tr style="background-color: #000; color: #FFF; font-weight: bold;">' html+='<td>property</td><td>type</td><td>value</td>' html+='<td>evaluated (without arguments)</td></tr>' for name in sorted(names2): if name.startswith("zzzzzzzzzz"): name=name[10:] itemName=str(name) itemType=websafe(stuff[name][0]) itemStr=websafe(stuff[name][1]) itemEval=websafe(stuff[name][2]) color="DDDDFF" color2="" if "method" in itemType: itemName+="()" color="FFDDDD" if itemName.startswith("_"): color="EEEEEE" if itemStr.startswith("&lt;") and not ", " in itemStr: itemStr="""<span style="color: #CCC; font-family: serif; font-style: italic;">%s</span>"""%itemStr else: color2="DDFFDD" if itemEval=="": itemEval="FAILED TO EVALUATE" html+='<tr>' html+='<td class="name" style="background-color: #%s;">%s</td>'%(color,itemName) html+='<td class="type">%s</td>'%(itemType) html+='<td class="itemStr" style="background-color: #%s;">%s</td>'%(color2,itemStr) if itemEval=="FAILED TO EVALUATE": html+='<td class="itemEvalFail"></td>' else: html+='<td class="itemEval">%s</td>'%(itemEval) html+='</tr>' dt=datetime.datetime.now() html+="""</table><p class="credits"> page automatically generated by <a href="https://pypi.python.org/pypi/webinspect/">webinspect</a> (version %s) %s</p> </body></html>"""%(__version__,dt.strftime("at %I:%M %p on %B %d, %Y")) return html
create pretty formatted HTML from a things dictionary.
def rpc_get_oneline_docstring(self, filename, source, offset): """Return a oneline docstring for the symbol at offset""" line, column = pos_to_linecol(source, offset) definitions = run_with_debug(jedi, 'goto_definitions', source=source, line=line, column=column, path=filename, encoding='utf-8') assignments = run_with_debug(jedi, 'goto_assignments', source=source, line=line, column=column, path=filename, encoding='utf-8') if definitions: definition = definitions[0] else: definition = None if assignments: assignment = assignments[0] else: assignment = None if definition: # Get name if definition.type in ['function', 'class']: raw_name = definition.name name = '{}()'.format(raw_name) doc = definition.docstring().split('\n') elif definition.type in ['module']: raw_name = definition.name name = '{} {}'.format(raw_name, definition.type) doc = definition.docstring().split('\n') elif (definition.type in ['instance'] and hasattr(assignment, "name")): raw_name = assignment.name name = raw_name doc = assignment.docstring().split('\n') else: return None # Keep only the first paragraph that is not a function declaration lines = [] call = "{}(".format(raw_name) # last line doc.append('') for i in range(len(doc)): if doc[i] == '' and len(lines) != 0: paragraph = " ".join(lines) lines = [] if call != paragraph[0:len(call)]: break paragraph = "" continue lines.append(doc[i]) # Keep only the first sentence onelinedoc = paragraph.split('. ', 1) if len(onelinedoc) == 2: onelinedoc = onelinedoc[0] + '.' else: onelinedoc = onelinedoc[0] if onelinedoc == '': onelinedoc = "No documentation" return {"name": name, "doc": onelinedoc} return None
Return a oneline docstring for the symbol at offset
def set_branching_model(self, project, repository, data): """ Set branching model :param project: :param repository: :param data: :return: """ url = 'rest/branch-utils/1.0/projects/{project}/repos/{repository}/branchmodel/configuration'.format( project=project, repository=repository) return self.put(url, data=data)
Set branching model :param project: :param repository: :param data: :return:
def delete_items_from_dict(d, to_delete): """Recursively deletes items from a dict, if the item's value(s) is in ``to_delete``. """ if not isinstance(to_delete, list): to_delete = [to_delete] if isinstance(d, dict): for single_to_delete in set(to_delete): if single_to_delete in d.values(): for k, v in d.copy().items(): if v == single_to_delete: del d[k] for k, v in d.items(): delete_items_from_dict(v, to_delete) elif isinstance(d, list): for i in d: delete_items_from_dict(i, to_delete) return remove_none(d)
Recursively deletes items from a dict, if the item's value(s) is in ``to_delete``.
def set_cookie(self, key, value='', max_age=None, path='/', domain=None, secure=False, httponly=False, expires=None): """Set a response cookie. Parameters: key : The cookie name. value : The cookie value. max_age : The maximum age of the cookie in seconds, or as a datetime.timedelta object. path : Restrict the cookie to this path (default: '/'). domain : Restrict the cookie to his domain. secure : When True, instruct the client to only sent the cookie over HTTPS. httponly : When True, instruct the client to disallow javascript access to the cookie. expires : Another way of specifying the maximum age of the cookie. Accepts the same values as max_age (number of seconds, datetime.timedelta). Additionaly accepts a datetime.datetime object. Note: a value of type int or float is interpreted as a number of seconds in the future, *not* as Unix timestamp. """ key, value = key.encode('utf-8'), value.encode('utf-8') cookie = SimpleCookie({key: value}) m = cookie[key] if max_age is not None: if isinstance(max_age, timedelta): m['max-age'] = int(total_seconds(max_age)) else: m['max-age'] = int(max_age) if path is not None: m['path'] = path.encode('utf-8') if domain is not None: m['domain'] = domain.encode('utf-8') if secure: m['secure'] = True if httponly: m['httponly'] = True if expires is not None: # 'expires' expects an offset in seconds, like max-age if isinstance(expires, datetime): expires = total_seconds(expires - datetime.utcnow()) elif isinstance(expires, timedelta): expires = total_seconds(expires) m['expires'] = int(expires) self.headers.add_header('Set-Cookie', m.OutputString())
Set a response cookie. Parameters: key : The cookie name. value : The cookie value. max_age : The maximum age of the cookie in seconds, or as a datetime.timedelta object. path : Restrict the cookie to this path (default: '/'). domain : Restrict the cookie to his domain. secure : When True, instruct the client to only sent the cookie over HTTPS. httponly : When True, instruct the client to disallow javascript access to the cookie. expires : Another way of specifying the maximum age of the cookie. Accepts the same values as max_age (number of seconds, datetime.timedelta). Additionaly accepts a datetime.datetime object. Note: a value of type int or float is interpreted as a number of seconds in the future, *not* as Unix timestamp.
def initialize_weights(self, save_file): """Initialize the weights from the given save_file. Assumes that the graph has been constructed, and the save_file contains weights that match the graph. Used to set the weights to a different version of the player without redifining the entire graph.""" tf.train.Saver().restore(self.sess, save_file)
Initialize the weights from the given save_file. Assumes that the graph has been constructed, and the save_file contains weights that match the graph. Used to set the weights to a different version of the player without redifining the entire graph.
def ensure_dir_exists(func): "wrap a function that returns a dir, making sure it exists" @functools.wraps(func) def make_if_not_present(): dir = func() if not os.path.isdir(dir): os.makedirs(dir) return dir return make_if_not_present
wrap a function that returns a dir, making sure it exists
def send_mail_worker(config, mail, event): """Worker task to send out an email, which blocks the process unless it is threaded""" log = "" try: if config.mail_ssl: server = SMTP_SSL(config.mail_server, port=config.mail_server_port, timeout=30) else: server = SMTP(config.mail_server, port=config.mail_server_port, timeout=30) if config.mail_tls: log += 'Starting TLS\n' server.starttls() if config.mail_username != '': log += 'Logging in with ' + str(config.mail_username) + "\n" server.login(config.mail_username, config.mail_password) else: log += 'No username, trying anonymous access\n' log += 'Sending Mail\n' response_send = server.send_message(mail) server.quit() except timeout as e: log += 'Could not send email to enrollee, mailserver timeout: ' + str(e) + "\n" return False, log, event log += 'Server response:' + str(response_send) return True, log, event
Worker task to send out an email, which blocks the process unless it is threaded
def delete(self, queue, virtual_host='/'): """Delete a Queue. :param str queue: Queue name :param str virtual_host: Virtual host name :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :rtype: dict """ virtual_host = quote(virtual_host, '') return self.http_client.delete(API_QUEUE % ( virtual_host, queue ))
Delete a Queue. :param str queue: Queue name :param str virtual_host: Virtual host name :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :rtype: dict