code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def delPlayer(name): """forget about a previously defined PlayerRecord setting by deleting its disk file""" player = getPlayer(name) try: os.remove(player.filename) # delete from disk except IOError: pass # shouldn't happen, but don't crash if the disk data doesn't exist try: del getKnownPlayers()[player.name] # forget object from cache except: pass return player
forget about a previously defined PlayerRecord setting by deleting its disk file
def remove_users_from_user_group(self, id, **kwargs): # noqa: E501 """Remove multiple users from a specific user group # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.remove_users_from_user_group(id, async_req=True) >>> result = thread.get() :param async_req bool :param str id: (required) :param list[str] body: List of users that should be removed from user group :return: ResponseContainerUserGroup If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.remove_users_from_user_group_with_http_info(id, **kwargs) # noqa: E501 else: (data) = self.remove_users_from_user_group_with_http_info(id, **kwargs) # noqa: E501 return data
Remove multiple users from a specific user group # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.remove_users_from_user_group(id, async_req=True) >>> result = thread.get() :param async_req bool :param str id: (required) :param list[str] body: List of users that should be removed from user group :return: ResponseContainerUserGroup If the method is called asynchronously, returns the request thread.
def _delay_call(self): """ Makes sure that web service calls are at least 0.2 seconds apart. """ now = time.time() time_since_last = now - self.last_call_time if time_since_last < DELAY_TIME: time.sleep(DELAY_TIME - time_since_last) self.last_call_time = now
Makes sure that web service calls are at least 0.2 seconds apart.
def reraise(tpe, value, tb=None): " Reraise an exception from an exception info tuple. " Py3 = (sys.version_info[0] == 3) if value is None: value = tpe() if Py3: if value.__traceback__ is not tb: raise value.with_traceback(tb) raise value else: exec('raise tpe, value, tb')
Reraise an exception from an exception info tuple.
def mark_running(self): """Moves the service to the Running state. Raises if the service is not currently in the Paused state. """ with self._lock: self._set_state(self._RUNNING, self._PAUSED)
Moves the service to the Running state. Raises if the service is not currently in the Paused state.
def start_timer(self, duration, func, *args): """ Schedules a function to be called after some period of time. * duration - time in seconds to wait before firing * func - function to be called * args - arguments to pass to the function """ t = threading.Timer(duration, self._timer_callback, (func, args)) self._timer_callbacks[func] = t t.start() self.log.info("Scheduled call to %s in %ds", func.__name__, duration)
Schedules a function to be called after some period of time. * duration - time in seconds to wait before firing * func - function to be called * args - arguments to pass to the function
def reset(name): ''' Force power down and restart an existing VM ''' ret = {} client = salt.client.get_local_client(__opts__['conf_file']) data = vm_info(name, quiet=True) if not data: __jid_event__.fire_event({'message': 'Failed to find VM {0} to reset'.format(name)}, 'progress') return 'fail' host = next(six.iterkeys(data)) try: cmd_ret = client.cmd_iter( host, 'virt.reset', [name], timeout=600) for comp in cmd_ret: ret.update(comp) __jid_event__.fire_event({'message': 'Reset VM {0}'.format(name)}, 'progress') except SaltClientError as client_error: print(client_error) return ret
Force power down and restart an existing VM
def hardware_info(self, mask=0xFFFFFFFF): """Returns a list of 32 integer values corresponding to the bitfields specifying the power consumption of the target. The values returned by this function only have significance if the J-Link is powering the target. The words, indexed, have the following significance: 0. If ``1``, target is powered via J-Link. 1. Overcurrent bitfield: 0: No overcurrent. 1: Overcurrent happened. 2ms @ 3000mA 2: Overcurrent happened. 10ms @ 1000mA 3: Overcurrent happened. 40ms @ 400mA 2. Power consumption of target (mA). 3. Peak of target power consumption (mA). 4. Peak of target power consumption during J-Link operation (mA). Args: self (JLink): the ``JLink`` instance mask (int): bit mask to decide which hardware information words are returned (defaults to all the words). Returns: List of bitfields specifying different states based on their index within the list and their value. Raises: JLinkException: on hardware error. """ buf = (ctypes.c_uint32 * 32)() res = self._dll.JLINKARM_GetHWInfo(mask, ctypes.byref(buf)) if res != 0: raise errors.JLinkException(res) return list(buf)
Returns a list of 32 integer values corresponding to the bitfields specifying the power consumption of the target. The values returned by this function only have significance if the J-Link is powering the target. The words, indexed, have the following significance: 0. If ``1``, target is powered via J-Link. 1. Overcurrent bitfield: 0: No overcurrent. 1: Overcurrent happened. 2ms @ 3000mA 2: Overcurrent happened. 10ms @ 1000mA 3: Overcurrent happened. 40ms @ 400mA 2. Power consumption of target (mA). 3. Peak of target power consumption (mA). 4. Peak of target power consumption during J-Link operation (mA). Args: self (JLink): the ``JLink`` instance mask (int): bit mask to decide which hardware information words are returned (defaults to all the words). Returns: List of bitfields specifying different states based on their index within the list and their value. Raises: JLinkException: on hardware error.
def check_initial_subdomain(cls, subdomain_rec): """ Verify that a first-ever subdomain record is well-formed. * n must be 0 * the subdomain must not be independent of its domain """ if subdomain_rec.n != 0: return False if subdomain_rec.independent: return False return True
Verify that a first-ever subdomain record is well-formed. * n must be 0 * the subdomain must not be independent of its domain
def post_grade(self, grade): """ Post grade to LTI consumer using XML :param: grade: 0 <= grade <= 1 :return: True if post successful and grade valid :exception: LTIPostMessageException if call failed """ message_identifier_id = self.message_identifier_id() operation = 'replaceResult' lis_result_sourcedid = self.lis_result_sourcedid # # edX devbox fix score = float(grade) if 0 <= score <= 1.0: xml = generate_request_xml( message_identifier_id, operation, lis_result_sourcedid, score) ret = post_message(self._consumers(), self.key, self.response_url, xml) if not ret: raise LTIPostMessageException("Post Message Failed") return True return False
Post grade to LTI consumer using XML :param: grade: 0 <= grade <= 1 :return: True if post successful and grade valid :exception: LTIPostMessageException if call failed
def do_clearrep(self, line): """clearrep Set the replication policy to default. The default replication policy has no preferred or blocked member nodes, allows replication and sets the preferred number of replicas to 3. """ self._split_args(line, 0, 0) self._command_processor.get_session().get_replication_policy().clear() self._print_info_if_verbose("Cleared the replication policy")
clearrep Set the replication policy to default. The default replication policy has no preferred or blocked member nodes, allows replication and sets the preferred number of replicas to 3.
def notify_init(self): ''' run the queed callback for just the first session only ''' _session_count = len(self._sessions) self._update_session_count(1, _session_count) if _session_count == 1: self._run_queued_callbacks()
run the queed callback for just the first session only
def stat(self, path): """ Retrieve information about a file on the remote system. The return value is an object whose attributes correspond to the attributes of python's C{stat} structure as returned by C{os.stat}, except that it contains fewer fields. An SFTP server may return as much or as little info as it wants, so the results may vary from server to server. Unlike a python C{stat} object, the result may not be accessed as a tuple. This is mostly due to the author's slack factor. The fields supported are: C{st_mode}, C{st_size}, C{st_uid}, C{st_gid}, C{st_atime}, and C{st_mtime}. @param path: the filename to stat @type path: str @return: an object containing attributes about the given file @rtype: SFTPAttributes """ path = self._adjust_cwd(path) self._log(DEBUG, 'stat(%r)' % path) t, msg = self._request(CMD_STAT, path) if t != CMD_ATTRS: raise SFTPError('Expected attributes') return SFTPAttributes._from_msg(msg)
Retrieve information about a file on the remote system. The return value is an object whose attributes correspond to the attributes of python's C{stat} structure as returned by C{os.stat}, except that it contains fewer fields. An SFTP server may return as much or as little info as it wants, so the results may vary from server to server. Unlike a python C{stat} object, the result may not be accessed as a tuple. This is mostly due to the author's slack factor. The fields supported are: C{st_mode}, C{st_size}, C{st_uid}, C{st_gid}, C{st_atime}, and C{st_mtime}. @param path: the filename to stat @type path: str @return: an object containing attributes about the given file @rtype: SFTPAttributes
def find(wave, dep_var, der=None, inst=1, indep_min=None, indep_max=None): r""" Return the independent variable point associated with a dependent variable point. If the dependent variable point is not in the dependent variable vector the independent variable vector point is obtained by linear interpolation :param wave: Waveform :type wave: :py:class:`peng.eng.Waveform` :param dep_var: Dependent vector value to search for :type dep_var: integer, float or complex :param der: Dependent vector derivative filter. If +1 only independent vector points that have positive derivatives when crossing the requested dependent vector point are returned; if -1 only independent vector points that have negative derivatives when crossing the requested dependent vector point are returned; if 0 only independent vector points that have null derivatives when crossing the requested dependent vector point are returned; otherwise if None all independent vector points are returned regardless of the dependent vector derivative. The derivative of the first and last point of the waveform is assumed to be null :type der: integer, float or complex :param inst: Instance number filter. If, for example, **inst** equals 3, then the independent variable vector point at which the dependent variable vector equals the requested value for the third time is returned :type inst: positive integer :param indep_min: Independent vector start point of computation :type indep_min: integer or float :param indep_max: Independent vector stop point of computation :type indep_max: integer or float :rtype: integer, float or None if the dependent variable point is not found .. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]] .. Auto-generated exceptions documentation for .. peng.wave_functions.find :raises: * RuntimeError (Argument \`dep_var\` is not valid) * RuntimeError (Argument \`der\` is not valid) * RuntimeError (Argument \`indep_max\` is not valid) * RuntimeError (Argument \`indep_min\` is not valid) * RuntimeError (Argument \`inst\` is not valid) * RuntimeError (Argument \`wave\` is not valid) * RuntimeError (Incongruent \`indep_min\` and \`indep_max\` arguments) .. [[[end]]] """ # pylint: disable=C0325,R0914,W0613 ret = copy.copy(wave) _bound_waveform(ret, indep_min, indep_max) close_min = np.isclose(min(ret._dep_vector), dep_var, FP_RTOL, FP_ATOL) close_max = np.isclose(max(ret._dep_vector), dep_var, FP_RTOL, FP_ATOL) if ((np.amin(ret._dep_vector) > dep_var) and (not close_min)) or ( (np.amax(ret._dep_vector) < dep_var) and (not close_max) ): return None cross_wave = ret._dep_vector - dep_var sign_wave = np.sign(cross_wave) exact_idx = np.where(np.isclose(ret._dep_vector, dep_var, FP_RTOL, FP_ATOL))[0] # Locations where dep_vector crosses dep_var or it is equal to it left_idx = np.where(np.diff(sign_wave))[0] # Remove elements to the left of exact matches left_idx = np.setdiff1d(left_idx, exact_idx) left_idx = np.setdiff1d(left_idx, exact_idx - 1) right_idx = left_idx + 1 if left_idx.size else np.array([]) indep_var = ret._indep_vector[exact_idx] if exact_idx.size else np.array([]) dvector = np.zeros(exact_idx.size).astype(int) if exact_idx.size else np.array([]) if left_idx.size and (ret.interp == "STAIRCASE"): idvector = ( 2.0 * (ret._dep_vector[right_idx] > ret._dep_vector[left_idx]).astype(int) - 1 ) if indep_var.size: indep_var = np.concatenate((indep_var, ret._indep_vector[right_idx])) dvector = np.concatenate((dvector, idvector)) sidx = np.argsort(indep_var) indep_var = indep_var[sidx] dvector = dvector[sidx] else: indep_var = ret._indep_vector[right_idx] dvector = idvector elif left_idx.size: y_left = ret._dep_vector[left_idx] y_right = ret._dep_vector[right_idx] x_left = ret._indep_vector[left_idx] x_right = ret._indep_vector[right_idx] slope = ((y_left - y_right) / (x_left - x_right)).astype(float) # y = y0+slope*(x-x0) => x0+(y-y0)/slope if indep_var.size: indep_var = np.concatenate( (indep_var, x_left + ((dep_var - y_left) / slope)) ) dvector = np.concatenate((dvector, np.where(slope > 0, 1, -1))) sidx = np.argsort(indep_var) indep_var = indep_var[sidx] dvector = dvector[sidx] else: indep_var = x_left + ((dep_var - y_left) / slope) dvector = np.where(slope > 0, +1, -1) if der is not None: indep_var = np.extract(dvector == der, indep_var) return indep_var[inst - 1] if inst <= indep_var.size else None
r""" Return the independent variable point associated with a dependent variable point. If the dependent variable point is not in the dependent variable vector the independent variable vector point is obtained by linear interpolation :param wave: Waveform :type wave: :py:class:`peng.eng.Waveform` :param dep_var: Dependent vector value to search for :type dep_var: integer, float or complex :param der: Dependent vector derivative filter. If +1 only independent vector points that have positive derivatives when crossing the requested dependent vector point are returned; if -1 only independent vector points that have negative derivatives when crossing the requested dependent vector point are returned; if 0 only independent vector points that have null derivatives when crossing the requested dependent vector point are returned; otherwise if None all independent vector points are returned regardless of the dependent vector derivative. The derivative of the first and last point of the waveform is assumed to be null :type der: integer, float or complex :param inst: Instance number filter. If, for example, **inst** equals 3, then the independent variable vector point at which the dependent variable vector equals the requested value for the third time is returned :type inst: positive integer :param indep_min: Independent vector start point of computation :type indep_min: integer or float :param indep_max: Independent vector stop point of computation :type indep_max: integer or float :rtype: integer, float or None if the dependent variable point is not found .. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]] .. Auto-generated exceptions documentation for .. peng.wave_functions.find :raises: * RuntimeError (Argument \`dep_var\` is not valid) * RuntimeError (Argument \`der\` is not valid) * RuntimeError (Argument \`indep_max\` is not valid) * RuntimeError (Argument \`indep_min\` is not valid) * RuntimeError (Argument \`inst\` is not valid) * RuntimeError (Argument \`wave\` is not valid) * RuntimeError (Incongruent \`indep_min\` and \`indep_max\` arguments) .. [[[end]]]
def index_relations(sender, pid_type, json=None, record=None, index=None, **kwargs): """Add relations to the indexed record.""" if not json: json = {} pid = PersistentIdentifier.query.filter( PersistentIdentifier.object_uuid == record.id, PersistentIdentifier.pid_type == pid_type, ).one_or_none() relations = None if pid: relations = serialize_relations(pid) if relations: json['relations'] = relations return json
Add relations to the indexed record.
def imap_unordered(requests, stream=True, pool=None, size=2, exception_handler=None): """Concurrently converts a generator object of Requests to a generator of Responses. :param requests: a generator of Request objects. :param stream: If False, the content will not be downloaded immediately. :param size: Specifies the number of requests to make at a time. default is 2 :param exception_handler: Callback function, called when exception occured. Params: Request, Exception """ def send(r): return r.send(stream=stream) pool = pool if pool else Pool(size) with contextlib.closing(Pool(size)) as pool: for request in pool.imap_unordered(send, requests): if request.response is not None: yield request.response elif exception_handler: exception_handler(request, request.exception) if not pool: pool.close()
Concurrently converts a generator object of Requests to a generator of Responses. :param requests: a generator of Request objects. :param stream: If False, the content will not be downloaded immediately. :param size: Specifies the number of requests to make at a time. default is 2 :param exception_handler: Callback function, called when exception occured. Params: Request, Exception
def get_uids_from_record(self, record, key): """Returns a list of parsed UIDs from a single form field identified by the given key. A form field ending with `_uid` can contain an empty value, a single UID or multiple UIDs separated by a comma. This method parses the UID value and returns a list of non-empty UIDs. """ value = record.get(key, None) if value is None: return [] if isinstance(value, basestring): value = value.split(",") return filter(lambda uid: uid, value)
Returns a list of parsed UIDs from a single form field identified by the given key. A form field ending with `_uid` can contain an empty value, a single UID or multiple UIDs separated by a comma. This method parses the UID value and returns a list of non-empty UIDs.
def is_ready_update(self): """ Returns whether or not the trainer has enough elements to run update model :return: A boolean corresponding to whether or not update_model() can be run """ size_of_buffer = len(self.training_buffer.update_buffer['actions']) return size_of_buffer > max(int(self.trainer_parameters['buffer_size'] / self.policy.sequence_length), 1)
Returns whether or not the trainer has enough elements to run update model :return: A boolean corresponding to whether or not update_model() can be run
def get_nodes(self, coord, coords): """Get the variables containing the definition of the nodes Parameters ---------- coord: xarray.Coordinate The mesh variable coords: dict The coordinates to use to get node coordinates""" def get_coord(coord): return coords.get(coord, self.ds.coords.get(coord)) return list(map(get_coord, coord.attrs.get('node_coordinates', '').split()[:2]))
Get the variables containing the definition of the nodes Parameters ---------- coord: xarray.Coordinate The mesh variable coords: dict The coordinates to use to get node coordinates
def self_consistent_update(u_kn, N_k, f_k): """Return an improved guess for the dimensionless free energies Parameters ---------- u_kn : np.ndarray, shape=(n_states, n_samples), dtype='float' The reduced potential energies, i.e. -log unnormalized probabilities N_k : np.ndarray, shape=(n_states), dtype='int' The number of samples in each state f_k : np.ndarray, shape=(n_states), dtype='float' The reduced free energies of each state Returns ------- f_k : np.ndarray, shape=(n_states), dtype='float' Updated estimate of f_k Notes ----- Equation C3 in MBAR JCP paper. """ u_kn, N_k, f_k = validate_inputs(u_kn, N_k, f_k) states_with_samples = (N_k > 0) # Only the states with samples can contribute to the denominator term. log_denominator_n = logsumexp(f_k[states_with_samples] - u_kn[states_with_samples].T, b=N_k[states_with_samples], axis=1) # All states can contribute to the numerator term. return -1. * logsumexp(-log_denominator_n - u_kn, axis=1)
Return an improved guess for the dimensionless free energies Parameters ---------- u_kn : np.ndarray, shape=(n_states, n_samples), dtype='float' The reduced potential energies, i.e. -log unnormalized probabilities N_k : np.ndarray, shape=(n_states), dtype='int' The number of samples in each state f_k : np.ndarray, shape=(n_states), dtype='float' The reduced free energies of each state Returns ------- f_k : np.ndarray, shape=(n_states), dtype='float' Updated estimate of f_k Notes ----- Equation C3 in MBAR JCP paper.
def upsert(self, doc, namespace, timestamp, update_spec=None): """Insert a document into Elasticsearch.""" index, doc_type = self._index_and_mapping(namespace) # No need to duplicate '_id' in source document doc_id = u(doc.pop("_id")) metadata = { 'ns': namespace, '_ts': timestamp } # Index the source document, using lowercase namespace as index name. action = { '_op_type': 'index', '_index': index, '_type': doc_type, '_id': doc_id, '_source': self._formatter.format_document(doc) } # Index document metadata with original namespace (mixed upper/lower). meta_action = { '_op_type': 'index', '_index': self.meta_index_name, '_type': self.meta_type, '_id': doc_id, '_source': bson.json_util.dumps(metadata) } self.index(action, meta_action, doc, update_spec) # Leave _id, since it's part of the original document doc['_id'] = doc_id
Insert a document into Elasticsearch.
def main(args): """ main entry point for the GenomicIntIntersection script. :param args: the arguments for this script, as a list of string. Should already have had things like the script name stripped. That is, if there are no args provided, this should be an empty list. """ # get options and arguments ui = getUI(args) if ui.optionIsSet("test"): # just run unit tests unittest.main(argv=[sys.argv[0]]) elif ui.optionIsSet("help"): # just show help ui.usage() else: verbose = ui.optionIsSet("verbose") # stranded? stranded = ui.optionIsSet("stranded") if stranded: sys.stderr.write("Sorry, stranded mode hasn't been implemented yet.") sys.exit() # get output handle out_fh = sys.stdout if ui.optionIsSet("output"): out_fh = open(ui.getValue("output"), "w") # get input file-handles -- we know we'll get exactly two, since we # specified it in the UI definition regions_1 = [x for x in BEDIterator(ui.getArgument(0), verbose=verbose)] regions_2 = [x for x in BEDIterator(ui.getArgument(1), verbose=verbose)] for r in regionsIntersection(regions_1, regions_2): out_fh.write(str(r) + "\n")
main entry point for the GenomicIntIntersection script. :param args: the arguments for this script, as a list of string. Should already have had things like the script name stripped. That is, if there are no args provided, this should be an empty list.
def record_magic(dct, magic_kind, magic_name, func): """Utility function to store a function as a magic of a specific kind. Parameters ---------- dct : dict A dictionary with 'line' and 'cell' subdicts. magic_kind : str Kind of magic to be stored. magic_name : str Key to store the magic as. func : function Callable object to store. """ if magic_kind == 'line_cell': dct['line'][magic_name] = dct['cell'][magic_name] = func else: dct[magic_kind][magic_name] = func
Utility function to store a function as a magic of a specific kind. Parameters ---------- dct : dict A dictionary with 'line' and 'cell' subdicts. magic_kind : str Kind of magic to be stored. magic_name : str Key to store the magic as. func : function Callable object to store.
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'status') and self.status is not None: _dict['status'] = self.status if hasattr(self, 'message') and self.message is not None: _dict['message'] = self.message return _dict
Return a json dictionary representing this model.
def bonds(self): """ iterate other all bonds """ seen = set() for n, m_bond in self._adj.items(): seen.add(n) for m, bond in m_bond.items(): if m not in seen: yield n, m, bond
iterate other all bonds
def token_of_request( self, method, host, url, qheaders, content_type=None, body=None): """ <Method> <PathWithRawQuery> Host: <Host> Content-Type: <ContentType> [<X-Qiniu-*> Headers] [<Body>] #这里的 <Body> 只有在 <ContentType> 存在且不为 application/octet-stream 时才签进去。 """ parsed_url = urlparse(url) netloc = parsed_url.netloc path = parsed_url.path query = parsed_url.query if not host: host = netloc path_with_query = path if query != '': path_with_query = ''.join([path_with_query, '?', query]) data = ''.join(["%s %s" % (method, path_with_query), "\n", "Host: %s" % host, "\n"]) if content_type: data += "Content-Type: %s" % (content_type) + "\n" data += qheaders data += "\n" if content_type and content_type != "application/octet-stream" and body: data += body.decode(encoding='UTF-8') return '{0}:{1}'.format(self.__access_key, self.__token(data))
<Method> <PathWithRawQuery> Host: <Host> Content-Type: <ContentType> [<X-Qiniu-*> Headers] [<Body>] #这里的 <Body> 只有在 <ContentType> 存在且不为 application/octet-stream 时才签进去。
def _do_multivalued_field_facets(self, results, field_facets): """ Implements a multivalued field facet on the results. This is implemented using brute force - O(N^2) - because Xapian does not have it implemented yet (see http://trac.xapian.org/ticket/199) """ facet_dict = {} for field in field_facets: facet_list = {} if not self._multi_value_field(field): continue for result in results: field_value = getattr(result, field) for item in field_value: # Facet each item in a MultiValueField facet_list[item] = facet_list.get(item, 0) + 1 facet_dict[field] = list(facet_list.items()) return facet_dict
Implements a multivalued field facet on the results. This is implemented using brute force - O(N^2) - because Xapian does not have it implemented yet (see http://trac.xapian.org/ticket/199)
def maybe_replace_any_if_equal(name, expected, actual): """Return the type given in `expected`. Raise ValueError if `expected` isn't equal to `actual`. If --replace-any is used, the Any type in `actual` is considered equal. The implementation is naively checking if the string representation of `actual` is one of "Any", "typing.Any", or "t.Any". This is done for two reasons: 1. I'm lazy. 2. We want people to be able to explicitly state that they want Any without it being replaced. This way they can use an alias. """ is_equal = expected == actual if not is_equal and Config.replace_any: actual_str = minimize_whitespace(str(actual)) if actual_str and actual_str[0] in {'"', "'"}: actual_str = actual_str[1:-1] is_equal = actual_str in {'Any', 'typing.Any', 't.Any'} if not is_equal: expected_annotation = minimize_whitespace(str(expected)) actual_annotation = minimize_whitespace(str(actual)) raise ValueError( f"incompatible existing {name}. " + f"Expected: {expected_annotation!r}, actual: {actual_annotation!r}" ) return expected or actual
Return the type given in `expected`. Raise ValueError if `expected` isn't equal to `actual`. If --replace-any is used, the Any type in `actual` is considered equal. The implementation is naively checking if the string representation of `actual` is one of "Any", "typing.Any", or "t.Any". This is done for two reasons: 1. I'm lazy. 2. We want people to be able to explicitly state that they want Any without it being replaced. This way they can use an alias.
def information_content(values): "Number of bits to represent the probability distribution in values." probabilities = normalize(removeall(0, values)) return sum(-p * log2(p) for p in probabilities)
Number of bits to represent the probability distribution in values.
def repr(self, changed_widgets=None): """It is used to automatically represent the object to HTML format packs all the attributes, children and so on. Args: changed_widgets (dict): A dictionary containing a collection of tags that have to be updated. The tag that have to be updated is the key, and the value is its textual repr. """ if changed_widgets is None: changed_widgets={} local_changed_widgets = {} self._set_updated() return ''.join(('<', self.type, '>\n', self.innerHTML(local_changed_widgets), '\n</', self.type, '>'))
It is used to automatically represent the object to HTML format packs all the attributes, children and so on. Args: changed_widgets (dict): A dictionary containing a collection of tags that have to be updated. The tag that have to be updated is the key, and the value is its textual repr.
def _identify(self, dataframe): """ Returns a list of indexes containing only the points that pass the filter. Parameters ---------- dataframe : DataFrame """ ## # TODO Fix this implementation. (i.e., why not support just 'left') # At the moment this implementation won't work at all. # The logic here can be simplified. id1 = dataframe[self.channels[0]] >= self.vert[0] id2 = dataframe[self.channels[1]] >= self.vert[1] if 'left' in self.region: id1 = ~id1 if 'bottom' in self.region: id2 = ~id2 idx = id1 & id2 if 'out' in self.region: idx = ~idx return idx
Returns a list of indexes containing only the points that pass the filter. Parameters ---------- dataframe : DataFrame
def _make_request(self, opener, request, timeout=None): """Make the API call and return the response. This is separated into it's own function, so we can mock it easily for testing. :param opener: :type opener: :param request: url payload to request :type request: urllib.Request object :param timeout: timeout value or None :type timeout: float :return: urllib response """ timeout = timeout or self.timeout try: return opener.open(request, timeout=timeout) except HTTPError as err: exc = handle_error(err) exc.__cause__ = None raise exc
Make the API call and return the response. This is separated into it's own function, so we can mock it easily for testing. :param opener: :type opener: :param request: url payload to request :type request: urllib.Request object :param timeout: timeout value or None :type timeout: float :return: urllib response
def verify_checksum(*lines): """Verify the checksum of one or more TLE lines. Raises `ValueError` if any of the lines fails its checksum, and includes the failing line in the error message. """ for line in lines: checksum = line[68:69] if not checksum.isdigit(): continue checksum = int(checksum) computed = compute_checksum(line) if checksum != computed: complaint = ('TLE line gives its checksum as {}' ' but in fact tallies to {}:\n{}') raise ValueError(complaint.format(checksum, computed, line))
Verify the checksum of one or more TLE lines. Raises `ValueError` if any of the lines fails its checksum, and includes the failing line in the error message.
def processRequest(self, request: Request, frm: str): """ Handle a REQUEST from the client. If the request has already been executed, the node re-sends the reply to the client. Otherwise, the node acknowledges the client request, adds it to its list of client requests, and sends a PROPAGATE to the remaining nodes. :param request: the REQUEST from the client :param frm: the name of the client that sent this REQUEST """ logger.debug("{} received client request: {} from {}". format(self.name, request, frm)) self.nodeRequestSpikeMonitorData['accum'] += 1 # TODO: What if client sends requests with same request id quickly so # before reply for one is generated, the other comes. In that # case we need to keep track of what requests ids node has seen # in-memory and once request with a particular request id is processed, # it should be removed from that in-memory DS. # If request is already processed(there is a reply for the # request in # the node's transaction store then return the reply from the # transaction store) # TODO: What if the reply was a REQNACK? Its not gonna be found in the # replies. txn_type = request.operation[TXN_TYPE] if self.is_action(txn_type): self.process_action(request, frm) elif txn_type == GET_TXN: self.handle_get_txn_req(request, frm) self.total_read_request_number += 1 elif self.is_query(txn_type): self.process_query(request, frm) self.total_read_request_number += 1 elif self.can_write_txn(txn_type): reply = self.getReplyFromLedgerForRequest(request) if reply: logger.debug("{} returning reply from already processed " "REQUEST: {}".format(self, request)) self.transmitToClient(reply, frm) return # If the node is not already processing the request if not self.isProcessingReq(request.key): self.startedProcessingReq(request.key, frm) # forced request should be processed before consensus self.handle_request_if_forced(request) # If not already got the propagate request(PROPAGATE) for the # corresponding client request(REQUEST) self.recordAndPropagate(request, frm) self.send_ack_to_client((request.identifier, request.reqId), frm) else: raise InvalidClientRequest( request.identifier, request.reqId, 'Pool is in readonly mode, try again in 60 seconds')
Handle a REQUEST from the client. If the request has already been executed, the node re-sends the reply to the client. Otherwise, the node acknowledges the client request, adds it to its list of client requests, and sends a PROPAGATE to the remaining nodes. :param request: the REQUEST from the client :param frm: the name of the client that sent this REQUEST
def bytes_to_ustr(self, b): "convert bytes array to unicode string" return b.decode(charset_map.get(self.charset, self.charset))
convert bytes array to unicode string
def get_base_url(self, force_http=False): """ Creates base URL path :param force_http: `True` if HTTP base URL should be used and `False` otherwise :type force_http: str :return: base url string :rtype: str """ base_url = SHConfig().aws_metadata_url.rstrip('/') if force_http else 's3:/' aws_bucket = SHConfig().aws_s3_l1c_bucket if self.data_source is DataSource.SENTINEL2_L1C else \ SHConfig().aws_s3_l2a_bucket return '{}/{}/'.format(base_url, aws_bucket)
Creates base URL path :param force_http: `True` if HTTP base URL should be used and `False` otherwise :type force_http: str :return: base url string :rtype: str
def search(self, start_ts, end_ts): """Called to query Mongo for documents in a time range. """ for meta_collection_name in self._meta_collections(): meta_coll = self.meta_database[meta_collection_name] for ts_ns_doc in meta_coll.find( {"_ts": {"$lte": end_ts, "$gte": start_ts}} ): yield ts_ns_doc
Called to query Mongo for documents in a time range.
def postprocess_result(morphresult, trim_phonetic, trim_compound): """Postprocess vabamorf wrapper output.""" word, analysis = morphresult return { 'text': deconvert(word), 'analysis': [postprocess_analysis(a, trim_phonetic, trim_compound) for a in analysis] }
Postprocess vabamorf wrapper output.
def __create(self, account_id, name, short_description, amount, period, **kwargs): """Call documentation: `/subscription_plan/create <https://www.wepay.com/developer/reference/subscription_plan#create>`_, plus extra keyword parameters: :keyword str access_token: will be used instead of instance's ``access_token``, with ``batch_mode=True`` will set `authorization` param to it's value. :keyword bool batch_mode: turn on/off the batch_mode, see :class:`wepay.api.WePay` :keyword str batch_reference_id: `reference_id` param for batch call, see :class:`wepay.api.WePay` :keyword str api_version: WePay API version, see :class:`wepay.api.WePay` """ params = { 'account_id': account_id, 'name': name, 'short_description': short_description, 'amount': amount, 'period': period } return self.make_call(self.__create, params, kwargs)
Call documentation: `/subscription_plan/create <https://www.wepay.com/developer/reference/subscription_plan#create>`_, plus extra keyword parameters: :keyword str access_token: will be used instead of instance's ``access_token``, with ``batch_mode=True`` will set `authorization` param to it's value. :keyword bool batch_mode: turn on/off the batch_mode, see :class:`wepay.api.WePay` :keyword str batch_reference_id: `reference_id` param for batch call, see :class:`wepay.api.WePay` :keyword str api_version: WePay API version, see :class:`wepay.api.WePay`
def _init_multi_count_metrics(self, pplan_helper): """Initializes the default values for a necessary set of MultiCountMetrics""" # inputs to_in_init = [self.metrics[i] for i in self.inputs_init if i in self.metrics and isinstance(self.metrics[i], MultiCountMetric)] for in_stream in pplan_helper.get_my_bolt().inputs: stream_id = in_stream.stream.id global_stream_id = in_stream.stream.component_name + "/" + stream_id for metric in to_in_init: metric.add_key(stream_id) metric.add_key(global_stream_id) # outputs to_out_init = [self.metrics[i] for i in self.outputs_init if i in self.metrics and isinstance(self.metrics[i], MultiCountMetric)] for out_stream in pplan_helper.get_my_bolt().outputs: stream_id = out_stream.stream.id for metric in to_out_init: metric.add_key(stream_id)
Initializes the default values for a necessary set of MultiCountMetrics
def to_java_doubles(m): ''' to_java_doubles(m) yields a java array object for the vector or matrix m. ''' global _java if _java is None: _init_registration() m = np.asarray(m) dims = len(m.shape) if dims > 2: raise ValueError('1D and 2D arrays supported only') bindat = serialize_numpy(m, 'd') return (_java.jvm.nben.util.Numpy.double2FromBytes(bindat) if dims == 2 else _java.jvm.nben.util.Numpy.double1FromBytes(bindat))
to_java_doubles(m) yields a java array object for the vector or matrix m.
def unset(entity, *types): """Unset the TypedFields on the input `entity`. Args: entity: A mixbox.Entity object. *types: A variable-length list of TypedField subclasses. If not provided, defaults to TypedField. """ if not types: types = (TypedField,) fields = list(entity._fields.keys()) remove = (x for x in fields if isinstance(x, types)) for field in remove: del entity._fields[field]
Unset the TypedFields on the input `entity`. Args: entity: A mixbox.Entity object. *types: A variable-length list of TypedField subclasses. If not provided, defaults to TypedField.
def monitor_session_span_command_direction(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") monitor = ET.SubElement(config, "monitor", xmlns="urn:brocade.com:mgmt:brocade-span") session = ET.SubElement(monitor, "session") session_number_key = ET.SubElement(session, "session-number") session_number_key.text = kwargs.pop('session_number') span_command = ET.SubElement(session, "span-command") direction = ET.SubElement(span_command, "direction") direction.text = kwargs.pop('direction') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def cube(width, height, depth, center=(0.0, 0.0, 0.0), normals=True, uvs=True) -> VAO: """ Creates a cube VAO with normals and texture coordinates Args: width (float): Width of the cube height (float): Height of the cube depth (float): Depth of the cube Keyword Args: center: center of the cube as a 3-component tuple normals: (bool) Include normals uvs: (bool) include uv coordinates Returns: A :py:class:`demosys.opengl.vao.VAO` instance """ width, height, depth = width / 2.0, height / 2.0, depth / 2.0 pos = numpy.array([ center[0] + width, center[1] - height, center[2] + depth, center[0] + width, center[1] + height, center[2] + depth, center[0] - width, center[1] - height, center[2] + depth, center[0] + width, center[1] + height, center[2] + depth, center[0] - width, center[1] + height, center[2] + depth, center[0] - width, center[1] - height, center[2] + depth, center[0] + width, center[1] - height, center[2] - depth, center[0] + width, center[1] + height, center[2] - depth, center[0] + width, center[1] - height, center[2] + depth, center[0] + width, center[1] + height, center[2] - depth, center[0] + width, center[1] + height, center[2] + depth, center[0] + width, center[1] - height, center[2] + depth, center[0] + width, center[1] - height, center[2] - depth, center[0] + width, center[1] - height, center[2] + depth, center[0] - width, center[1] - height, center[2] + depth, center[0] + width, center[1] - height, center[2] - depth, center[0] - width, center[1] - height, center[2] + depth, center[0] - width, center[1] - height, center[2] - depth, center[0] - width, center[1] - height, center[2] + depth, center[0] - width, center[1] + height, center[2] + depth, center[0] - width, center[1] + height, center[2] - depth, center[0] - width, center[1] - height, center[2] + depth, center[0] - width, center[1] + height, center[2] - depth, center[0] - width, center[1] - height, center[2] - depth, center[0] + width, center[1] + height, center[2] - depth, center[0] + width, center[1] - height, center[2] - depth, center[0] - width, center[1] - height, center[2] - depth, center[0] + width, center[1] + height, center[2] - depth, center[0] - width, center[1] - height, center[2] - depth, center[0] - width, center[1] + height, center[2] - depth, center[0] + width, center[1] + height, center[2] - depth, center[0] - width, center[1] + height, center[2] - depth, center[0] + width, center[1] + height, center[2] + depth, center[0] - width, center[1] + height, center[2] - depth, center[0] - width, center[1] + height, center[2] + depth, center[0] + width, center[1] + height, center[2] + depth, ], dtype=numpy.float32) if normals: normal_data = numpy.array([ -0, 0, 1, -0, 0, 1, -0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, -1, -0, 0, -1, -0, 0, -1, -0, 0, -1, -0, 0, -1, -0, 0, -1, -0, 0, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, ], dtype=numpy.float32) if uvs: uvs_data = numpy.array([ 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0 ], dtype=numpy.float32) vao = VAO("geometry:cube") # Add buffers vao.buffer(pos, '3f', ['in_position']) if normals: vao.buffer(normal_data, '3f', ['in_normal']) if uvs: vao.buffer(uvs_data, '2f', ['in_uv']) return vao
Creates a cube VAO with normals and texture coordinates Args: width (float): Width of the cube height (float): Height of the cube depth (float): Depth of the cube Keyword Args: center: center of the cube as a 3-component tuple normals: (bool) Include normals uvs: (bool) include uv coordinates Returns: A :py:class:`demosys.opengl.vao.VAO` instance
def nps_survey_response_show(self, survey_id, id, **kwargs): "https://developer.zendesk.com/rest_api/docs/nps-api/responses#show-response" api_path = "/api/v2/nps/surveys/{survey_id}/responses/{id}.json" api_path = api_path.format(survey_id=survey_id, id=id) return self.call(api_path, **kwargs)
https://developer.zendesk.com/rest_api/docs/nps-api/responses#show-response
def _get_transitions(self, expression: Any, expected_type: PredicateType) -> Tuple[List[str], PredicateType]: """ This is used when converting a logical form into an action sequence. This piece recursively translates a lisp expression into an action sequence, making sure we match the expected type (or using the expected type to get the right type for constant expressions). """ if isinstance(expression, (list, tuple)): function_transitions, return_type, argument_types = self._get_function_transitions(expression[0], expected_type) if len(argument_types) != len(expression[1:]): raise ParsingError(f'Wrong number of arguments for function in {expression}') argument_transitions = [] for argument_type, subexpression in zip(argument_types, expression[1:]): argument_transitions.extend(self._get_transitions(subexpression, argument_type)[0]) return function_transitions + argument_transitions, return_type elif isinstance(expression, str): if expression not in self._functions: raise ParsingError(f"Unrecognized constant: {expression}") constant_types = self._function_types[expression] if len(constant_types) == 1: constant_type = constant_types[0] # This constant had only one type; that's the easy case. if expected_type and expected_type != constant_type: raise ParsingError(f'{expression} did not have expected type {expected_type} ' f'(found {constant_type})') return [f'{constant_type} -> {expression}'], constant_type else: if not expected_type: raise ParsingError('With no expected type and multiple types to pick from ' f"I don't know what type to use (constant was {expression})") if expected_type not in constant_types: raise ParsingError(f'{expression} did not have expected type {expected_type} ' f'(found these options: {constant_types}; none matched)') return [f'{expected_type} -> {expression}'], expected_type else: raise ParsingError('Not sure how you got here. Please open an issue on github with details.')
This is used when converting a logical form into an action sequence. This piece recursively translates a lisp expression into an action sequence, making sure we match the expected type (or using the expected type to get the right type for constant expressions).
def draw_data_value_rect(cairo_context, color, value_size, name_size, pos, port_side): """This method draws the containing rect for the data port value, depending on the side and size of the label. :param cairo_context: Draw Context :param color: Background color of value part :param value_size: Size (width, height) of label holding the value :param name_size: Size (width, height) of label holding the name :param pos: Position of name label start point (upper left corner of label) :param port_side: Side on which the value part should be drawn :return: Rotation Angle (to rotate value accordingly), X-Position of value label start point, Y-Position of value label start point """ c = cairo_context rot_angle = .0 move_x = 0. move_y = 0. if port_side is SnappedSide.RIGHT: move_x = pos[0] + name_size[0] move_y = pos[1] c.rectangle(move_x, move_y, value_size[0], value_size[1]) elif port_side is SnappedSide.BOTTOM: move_x = pos[0] - value_size[1] move_y = pos[1] + name_size[0] rot_angle = pi / 2. c.rectangle(move_x, move_y, value_size[1], value_size[0]) elif port_side is SnappedSide.LEFT: move_x = pos[0] - value_size[0] move_y = pos[1] c.rectangle(move_x, move_y, value_size[0], value_size[1]) elif port_side is SnappedSide.TOP: move_x = pos[0] - value_size[1] move_y = pos[1] - value_size[0] rot_angle = -pi / 2. c.rectangle(move_x, move_y, value_size[1], value_size[0]) c.set_source_rgba(*color) c.fill_preserve() c.set_source_rgb(*gui_config.gtk_colors['BLACK'].to_floats()) c.stroke() return rot_angle, move_x, move_y
This method draws the containing rect for the data port value, depending on the side and size of the label. :param cairo_context: Draw Context :param color: Background color of value part :param value_size: Size (width, height) of label holding the value :param name_size: Size (width, height) of label holding the name :param pos: Position of name label start point (upper left corner of label) :param port_side: Side on which the value part should be drawn :return: Rotation Angle (to rotate value accordingly), X-Position of value label start point, Y-Position of value label start point
def from_pb(cls, app_profile_pb, instance): """Creates an instance app_profile from a protobuf. :type app_profile_pb: :class:`instance_pb2.app_profile_pb` :param app_profile_pb: An instance protobuf object. :type instance: :class:`google.cloud.bigtable.instance.Instance` :param instance: The instance that owns the cluster. :rtype: :class:`AppProfile` :returns: The AppProfile parsed from the protobuf response. :raises: :class:`ValueError <exceptions.ValueError>` if the AppProfile name does not match ``projects/{project}/instances/{instance_id}/appProfiles/{app_profile_id}`` or if the parsed instance ID does not match the istance ID on the client. or if the parsed project ID does not match the project ID on the client. """ match_app_profile_name = _APP_PROFILE_NAME_RE.match(app_profile_pb.name) if match_app_profile_name is None: raise ValueError( "AppProfile protobuf name was not in the " "expected format.", app_profile_pb.name, ) if match_app_profile_name.group("instance") != instance.instance_id: raise ValueError( "Instance ID on app_profile does not match the " "instance ID on the client" ) if match_app_profile_name.group("project") != instance._client.project: raise ValueError( "Project ID on app_profile does not match the " "project ID on the client" ) app_profile_id = match_app_profile_name.group("app_profile_id") result = cls(app_profile_id, instance) result._update_from_pb(app_profile_pb) return result
Creates an instance app_profile from a protobuf. :type app_profile_pb: :class:`instance_pb2.app_profile_pb` :param app_profile_pb: An instance protobuf object. :type instance: :class:`google.cloud.bigtable.instance.Instance` :param instance: The instance that owns the cluster. :rtype: :class:`AppProfile` :returns: The AppProfile parsed from the protobuf response. :raises: :class:`ValueError <exceptions.ValueError>` if the AppProfile name does not match ``projects/{project}/instances/{instance_id}/appProfiles/{app_profile_id}`` or if the parsed instance ID does not match the istance ID on the client. or if the parsed project ID does not match the project ID on the client.
def search(self, terms): """ returns a dict {"name": "image_dict"} """ images = {} response = self._request_builder('GET', 'search', params={'q': terms}) if self._validate_response(response): body = json.loads(response.content.decode('utf-8'))['results'] for image in body: images[image['name']] = image return images
returns a dict {"name": "image_dict"}
def cash_table(self): '现金的table' _cash = pd.DataFrame( data=[self.cash[1::], self.time_index_max], index=['cash', 'datetime'] ).T _cash = _cash.assign( date=_cash.datetime.apply(lambda x: pd.to_datetime(str(x)[0:10])) ).assign(account_cookie=self.account_cookie) # .sort_values('datetime') return _cash.set_index(['datetime', 'account_cookie'], drop=False) """ 实验性质 @2018-06-09 # 对于账户持仓的分解 1. 真实持仓hold: 正常模式/TZero模式: hold = 历史持仓(init_hold)+ 初始化账户后发生的所有交易导致的持仓(hold_available) 动态持仓(初始化账户后的持仓)hold_available: self.history 计算而得 2. 账户的可卖额度(sell_available) 正常模式: sell_available 结算前: init_hold+ 买卖交易(卖-) 结算后: init_hold+ 买卖交易(买+ 卖-) TZero模式: sell_available 结算前: init_hold - 买卖交易占用的额度(abs(买+ 卖-)) 结算过程 是为了补平(等于让hold={}) 结算后: init_hold """
现金的table
def drop_curie(self, name): """Removes a CURIE. The CURIE link with the given name is removed from the document. """ curies = self.o[LINKS_KEY][self.draft.curies_rel] if isinstance(curies, dict) and curies['name'] == name: del self.o[LINKS_KEY][self.draft.curies_rel] return for i, curie in enumerate(curies): if curie['name'] == name: del curies[i] break continue
Removes a CURIE. The CURIE link with the given name is removed from the document.
def reconstruct_emds(edm, Om, all_points, method=None, **kwargs): """ Reconstruct point set using E(dge)-MDS. """ from .point_set import dm_from_edm N = all_points.shape[0] d = all_points.shape[1] dm = dm_from_edm(edm) if method is None: from .mds import superMDS Xhat, __ = superMDS(all_points[0, :], N, d, Om=Om, dm=dm) else: C = kwargs.get('C', None) b = kwargs.get('b', None) if C is None or b is None: raise NameError( 'Need constraints C and b for reconstruct_emds in iterative mode.') KE_noisy = np.multiply(np.outer(dm, dm), Om) if method == 'iterative': from .mds import iterativeEMDS Xhat, __ = iterativeEMDS( all_points[0, :], N, d, KE=KE_noisy, C=C, b=b) elif method == 'relaxed': from .mds import relaxedEMDS Xhat, __ = relaxedEMDS( all_points[0, :], N, d, KE=KE_noisy, C=C, b=b) else: raise NameError('Undefined method', method) Y, R, t, c = procrustes(all_points, Xhat, scale=False) return Y
Reconstruct point set using E(dge)-MDS.
def get_asn_verbose_dns(self, asn=None): """ The function for retrieving the information for an ASN from Cymru via port 53 (DNS). This is needed since IP to ASN mapping via Cymru DNS does not return the ASN Description like Cymru Whois does. Args: asn (:obj:`str`): The AS number (required). Returns: str: The raw ASN data. Raises: ASNLookupError: The ASN lookup failed. """ if asn[0:2] != 'AS': asn = 'AS{0}'.format(asn) zone = '{0}.asn.cymru.com'.format(asn) try: log.debug('ASN verbose query for {0}'.format(zone)) data = self.dns_resolver.query(zone, 'TXT') return str(data[0]) except (dns.resolver.NXDOMAIN, dns.resolver.NoNameservers, dns.resolver.NoAnswer, dns.exception.Timeout) as e: raise ASNLookupError( 'ASN lookup failed (DNS {0}) for {1}.'.format( e.__class__.__name__, asn) ) except: # pragma: no cover raise ASNLookupError( 'ASN lookup failed for {0}.'.format(asn) )
The function for retrieving the information for an ASN from Cymru via port 53 (DNS). This is needed since IP to ASN mapping via Cymru DNS does not return the ASN Description like Cymru Whois does. Args: asn (:obj:`str`): The AS number (required). Returns: str: The raw ASN data. Raises: ASNLookupError: The ASN lookup failed.
def update_domain(self, domain, emailAddress=None, ttl=None, comment=None): """ Provides a way to modify the following attributes of a domain record: - email address - ttl setting - comment """ if not any((emailAddress, ttl, comment)): raise exc.MissingDNSSettings( "No settings provided to update_domain().") uri = "/domains/%s" % utils.get_id(domain) body = {"comment": comment, "ttl": ttl, "emailAddress": emailAddress, } none_keys = [key for key, val in body.items() if val is None] for none_key in none_keys: body.pop(none_key) resp, resp_body = self._async_call(uri, method="PUT", body=body, error_class=exc.DomainUpdateFailed, has_response=False) return resp_body
Provides a way to modify the following attributes of a domain record: - email address - ttl setting - comment
def to_triangulation(self): """ Returns the mesh as a matplotlib.tri.Triangulation instance. (2D only) """ from matplotlib.tri import Triangulation conn = self.split("simplices").unstack() coords = self.nodes.coords.copy() node_map = pd.Series(data = np.arange(len(coords)), index = coords.index) conn = node_map.loc[conn.values.flatten()].values.reshape(*conn.shape) return Triangulation(coords.x.values, coords.y.values, conn)
Returns the mesh as a matplotlib.tri.Triangulation instance. (2D only)
def maybe_cythonize_extensions(top_path, config): """Tweaks for building extensions between release and development mode.""" is_release = os.path.exists(os.path.join(top_path, 'PKG-INFO')) if is_release: build_from_c_and_cpp_files(config.ext_modules) else: message = ('Please install cython with a version >= {0} in order ' 'to build a scikit-survival development version.').format( CYTHON_MIN_VERSION) try: import Cython if LooseVersion(Cython.__version__) < CYTHON_MIN_VERSION: message += ' Your version of Cython was {0}.'.format( Cython.__version__) raise ValueError(message) from Cython.Build import cythonize except ImportError as exc: exc.args += (message,) raise # http://docs.cython.org/en/latest/src/userguide/source_files_and_compilation.html#cythonize-arguments directives = {'language_level': '3'} cy_cov = os.environ.get('CYTHON_COVERAGE', False) if cy_cov: directives['linetrace'] = True macros = [('CYTHON_TRACE', '1'), ('CYTHON_TRACE_NOGIL', '1')] else: macros = [] config.ext_modules = cythonize( config.ext_modules, compiler_directives=directives) for e in config.ext_modules: e.define_macros.extend(macros)
Tweaks for building extensions between release and development mode.
def get_target_extraction_context(self, build_file_path: str) -> dict: """Return a build file parser target extraction context. The target extraction context is a build-file-specific mapping from builder-name to target extraction function, for every registered builder. """ extraction_context = {} for name, builder in Plugin.builders.items(): extraction_context[name] = extractor(name, builder, build_file_path, self) return extraction_context
Return a build file parser target extraction context. The target extraction context is a build-file-specific mapping from builder-name to target extraction function, for every registered builder.
def getAdaptedTraveltime(self, edgeID, time): """getAdaptedTraveltime(string, double) -> double Returns the travel time value (in s) used for (re-)routing which is valid on the edge at the given time. """ self._connection._beginMessage(tc.CMD_GET_EDGE_VARIABLE, tc.VAR_EDGE_TRAVELTIME, edgeID, 1 + 4) self._connection._string += struct.pack( "!Bi", tc.TYPE_INTEGER, time) return self._connection._checkResult(tc.CMD_GET_EDGE_VARIABLE, tc.VAR_EDGE_TRAVELTIME, edgeID).readDouble()
getAdaptedTraveltime(string, double) -> double Returns the travel time value (in s) used for (re-)routing which is valid on the edge at the given time.
def setup_logging(verbosity, filename=None): """Configure logging for this tool.""" levels = [logging.WARNING, logging.INFO, logging.DEBUG] level = levels[min(verbosity, len(levels) - 1)] logging.root.setLevel(level) fmt = logging.Formatter('%(asctime)s %(levelname)-12s %(message)-100s ' '[%(filename)s:%(lineno)d]') hdlr = logging.StreamHandler() # stderr hdlr.setFormatter(fmt) logging.root.addHandler(hdlr) if filename: hdlr = logging.FileHandler(filename, 'a') hdlr.setFormatter(fmt) logging.root.addHandler(hdlr)
Configure logging for this tool.
def register_intent_parser(self, intent_parser, domain=0): """ Register a intent parser with a domain. Args: intent_parser(intent): The intent parser you wish to register. domain(str): a string representing the domain you wish register the intent parser to. """ if domain not in self.domains: self.register_domain(domain=domain) self.domains[domain].register_intent_parser( intent_parser=intent_parser)
Register a intent parser with a domain. Args: intent_parser(intent): The intent parser you wish to register. domain(str): a string representing the domain you wish register the intent parser to.
def device_destroy(self, id, **kwargs): # noqa: E501 """Delete a device. # noqa: E501 Delete device. Only available for devices with a developer certificate. Attempts to delete a device with a production certicate will return a 400 response. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.device_destroy(id, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str id: (required) :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('asynchronous'): return self.device_destroy_with_http_info(id, **kwargs) # noqa: E501 else: (data) = self.device_destroy_with_http_info(id, **kwargs) # noqa: E501 return data
Delete a device. # noqa: E501 Delete device. Only available for devices with a developer certificate. Attempts to delete a device with a production certicate will return a 400 response. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.device_destroy(id, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str id: (required) :return: None If the method is called asynchronously, returns the request thread.
def get(self, wheel=False): """Downloads the package from PyPI. Returns: Full path of the downloaded file. Raises: PermissionError if the save_dir is not writable. """ try: url = get_url(self.client, self.name, self.version, wheel, hashed_format=True)[0] except exceptions.MissingUrlException as e: raise SystemExit(e) if wheel: self.temp_dir = tempfile.mkdtemp() save_dir = self.temp_dir else: save_dir = self.save_dir save_file = '{0}/{1}'.format(save_dir, url.split('/')[-1]) request.urlretrieve(url, save_file) logger.info('Downloaded package from PyPI: {0}.'.format(save_file)) return save_file
Downloads the package from PyPI. Returns: Full path of the downloaded file. Raises: PermissionError if the save_dir is not writable.
def precompute_begin_state(self): """ Caches the summation calculation and available choices for BEGIN * state_size. Significantly speeds up chain generation on large corpuses. Thanks, @schollz! """ begin_state = tuple([ BEGIN ] * self.state_size) choices, weights = zip(*self.model[begin_state].items()) cumdist = list(accumulate(weights)) self.begin_cumdist = cumdist self.begin_choices = choices
Caches the summation calculation and available choices for BEGIN * state_size. Significantly speeds up chain generation on large corpuses. Thanks, @schollz!
def ndim(self): """If given FeatureType stores a dictionary of numpy.ndarrays it returns dimensions of such arrays.""" if self.is_raster(): return { FeatureType.DATA: 4, FeatureType.MASK: 4, FeatureType.SCALAR: 2, FeatureType.LABEL: 2, FeatureType.DATA_TIMELESS: 3, FeatureType.MASK_TIMELESS: 3, FeatureType.SCALAR_TIMELESS: 1, FeatureType.LABEL_TIMELESS: 1 }[self] return None
If given FeatureType stores a dictionary of numpy.ndarrays it returns dimensions of such arrays.
def get_victoria_day(self, year): """ Return Victoria Day for Edinburgh. Set to the Monday strictly before May 24th. It means that if May 24th is a Monday, it's shifted to the week before. """ may_24th = date(year, 5, 24) # Since "MON(day) == 0", it's either the difference between MON and the # current weekday (starting at 0), or 7 days before the May 24th shift = may_24th.weekday() or 7 victoria_day = may_24th - timedelta(days=shift) return (victoria_day, "Victoria Day")
Return Victoria Day for Edinburgh. Set to the Monday strictly before May 24th. It means that if May 24th is a Monday, it's shifted to the week before.
def _event_duration(vevent): """unify dtend and duration to the duration of the given vevent""" if hasattr(vevent, 'dtend'): return vevent.dtend.value - vevent.dtstart.value elif hasattr(vevent, 'duration') and vevent.duration.value: return vevent.duration.value return timedelta(0)
unify dtend and duration to the duration of the given vevent
def initialize(self, store): """Common initialization of handlers happens here. If additional initialization is required, this method must either be called with ``super`` or the child class must assign the ``store`` attribute and register itself with the store. """ assert isinstance(store, stores.BaseStore) self.messages = Queue() self.store = store self.store.register(self)
Common initialization of handlers happens here. If additional initialization is required, this method must either be called with ``super`` or the child class must assign the ``store`` attribute and register itself with the store.
def flatten(suitable_for_isinstance): """ isinstance() can accept a bunch of really annoying different types: * a single type * a tuple of types * an arbitrary nested tree of tuples Return a flattened tuple of the given argument. """ types = set() if not isinstance(suitable_for_isinstance, tuple): suitable_for_isinstance = (suitable_for_isinstance,) for thing in suitable_for_isinstance: if isinstance(thing, tuple): types.update(flatten(thing)) else: types.add(thing) return tuple(types)
isinstance() can accept a bunch of really annoying different types: * a single type * a tuple of types * an arbitrary nested tree of tuples Return a flattened tuple of the given argument.
def DbPutClassAttributeProperty2(self, argin): """ This command adds support for array properties compared to the previous one called DbPutClassAttributeProperty. The old comman is still there for compatibility reason :param argin: Str[0] = Tango class name Str[1] = Attribute number Str[2] = Attribute name Str[3] = Property number Str[4] = Property name Str[5] = Property value number (array case) Str[5] = Property value 1 Str[n] = Property value n (array case) ..... :type: tango.DevVarStringArray :return: :rtype: tango.DevVoid """ self._log.debug("In DbPutClassAttributeProperty2()") class_name = argin[0] nb_attributes = int(argin[1]) self.db.put_class_attribute_property2(class_name, nb_attributes, argin[2:])
This command adds support for array properties compared to the previous one called DbPutClassAttributeProperty. The old comman is still there for compatibility reason :param argin: Str[0] = Tango class name Str[1] = Attribute number Str[2] = Attribute name Str[3] = Property number Str[4] = Property name Str[5] = Property value number (array case) Str[5] = Property value 1 Str[n] = Property value n (array case) ..... :type: tango.DevVarStringArray :return: :rtype: tango.DevVoid
def creds_display(creds: dict, filt: dict = None, filt_dflt_incl: bool = False) -> dict: """ Find indy-sdk creds matching input filter from within input creds structure, json-loaded as returned via HolderProver.get_creds(), and return human-legible summary. :param creds: creds structure returned by HolderProver.get_creds(); e.g., :: { "attrs": { "attr0_uuid": [ { "interval": null, "cred_info": { "attrs": { "attr0": "2", "attr1": "Hello", "attr2": "World" }, "referent": "00000000-0000-0000-0000-000000000000", "schema_id": "Q4zqM7aXqm7gDQkUVLng9h:2:bc-reg:1.0", "cred_def_id": "Q4zqM7aXqm7gDQkUVLng9h:3:CL:18:0", "cred_rev_id": null, "rev_reg_id": null } }, { "interval": null, "cred_info": { "attrs": { "attr0": "1", "attr1": "Nice", "attr2": "Tractor" }, "referent": "00000000-0000-0000-0000-111111111111", "schema_id": "Q4zqM7aXqm7gDQkUVLng9h:2:bc-reg:1.0", "cred_def_id": "Q4zqM7aXqm7gDQkUVLng9h:3:CL:18:0", "cred_rev_id": null, "rev_reg_id": null } } ], "attr1_uuid": [ { "interval": null, "cred_info": { "attrs": { "attr0": "2", "attr1": "Hello", "attr2": "World" }, "referent": "00000000-0000-0000-0000-000000000000", "schema_id": "Q4zqM7aXqm7gDQkUVLng9h:2:bc-reg:1.0", "cred_def_id": "Q4zqM7aXqm7gDQkUVLng9h:3:CL:18:0", "cred_rev_id": null, "rev_reg_id": null } }, { "interval": null, "cred_info": { "attrs": { "attr0": "1", "attr1": "Nice", "attr2": "Tractor" }, "referent": "00000000-0000-0000-0000-111111111111", "schema_id": "Q4zqM7aXqm7gDQkUVLng9h:2:bc-reg:1.0", "cred_def_id": "Q4zqM7aXqm7gDQkUVLng9h:3:CL:18:0", "cred_rev_id": null, "rev_reg_id": null } } ], "attr2_uuid": [ ... ] } } :param filt: filter for matching attributes and values; dict (None or empty for no filter, matching all) mapping each cred def identifier to dict mapping attributes to values to match; e.g., :: { 'Q4zqM7aXqm7gDQkUVLng9h:3:CL:18:0': { 'attr0': 1, # operation stringifies en passant 'attr1': 'Nice' }, ... } :param: filt_dflt_incl: whether to include (True) all attributes for schema that filter does not identify or to exclude (False) all such attributes :return: human-legible dict mapping credential identifiers to human-readable creds synopses -- not proper indy-sdk creds structures (each as per HolderProver.get_creds_display_coarse()) -- for creds matching input filter """ rv = {} if filt is None: filt = {} for cred_uuid in creds.get('attrs', {}): for cred in creds['attrs'][cred_uuid]: # creds['attrs'][cred_uuid] is a list of dict cred_info = cred['cred_info'] if cred_info['referent'] in rv: continue cred_cd_id = cred_info['cred_def_id'] if (not filt) or (filt_dflt_incl and cred_cd_id not in filt): rv[cred_info['referent']] = cred_info continue if filt and cred_cd_id in filt: if ({k: str(filt[cred_cd_id][k]) for k in filt[cred_cd_id]}.items() <= cred_info['attrs'].items()): rv[cred_info['referent']] = cred_info return rv
Find indy-sdk creds matching input filter from within input creds structure, json-loaded as returned via HolderProver.get_creds(), and return human-legible summary. :param creds: creds structure returned by HolderProver.get_creds(); e.g., :: { "attrs": { "attr0_uuid": [ { "interval": null, "cred_info": { "attrs": { "attr0": "2", "attr1": "Hello", "attr2": "World" }, "referent": "00000000-0000-0000-0000-000000000000", "schema_id": "Q4zqM7aXqm7gDQkUVLng9h:2:bc-reg:1.0", "cred_def_id": "Q4zqM7aXqm7gDQkUVLng9h:3:CL:18:0", "cred_rev_id": null, "rev_reg_id": null } }, { "interval": null, "cred_info": { "attrs": { "attr0": "1", "attr1": "Nice", "attr2": "Tractor" }, "referent": "00000000-0000-0000-0000-111111111111", "schema_id": "Q4zqM7aXqm7gDQkUVLng9h:2:bc-reg:1.0", "cred_def_id": "Q4zqM7aXqm7gDQkUVLng9h:3:CL:18:0", "cred_rev_id": null, "rev_reg_id": null } } ], "attr1_uuid": [ { "interval": null, "cred_info": { "attrs": { "attr0": "2", "attr1": "Hello", "attr2": "World" }, "referent": "00000000-0000-0000-0000-000000000000", "schema_id": "Q4zqM7aXqm7gDQkUVLng9h:2:bc-reg:1.0", "cred_def_id": "Q4zqM7aXqm7gDQkUVLng9h:3:CL:18:0", "cred_rev_id": null, "rev_reg_id": null } }, { "interval": null, "cred_info": { "attrs": { "attr0": "1", "attr1": "Nice", "attr2": "Tractor" }, "referent": "00000000-0000-0000-0000-111111111111", "schema_id": "Q4zqM7aXqm7gDQkUVLng9h:2:bc-reg:1.0", "cred_def_id": "Q4zqM7aXqm7gDQkUVLng9h:3:CL:18:0", "cred_rev_id": null, "rev_reg_id": null } } ], "attr2_uuid": [ ... ] } } :param filt: filter for matching attributes and values; dict (None or empty for no filter, matching all) mapping each cred def identifier to dict mapping attributes to values to match; e.g., :: { 'Q4zqM7aXqm7gDQkUVLng9h:3:CL:18:0': { 'attr0': 1, # operation stringifies en passant 'attr1': 'Nice' }, ... } :param: filt_dflt_incl: whether to include (True) all attributes for schema that filter does not identify or to exclude (False) all such attributes :return: human-legible dict mapping credential identifiers to human-readable creds synopses -- not proper indy-sdk creds structures (each as per HolderProver.get_creds_display_coarse()) -- for creds matching input filter
def get_averaged_bias_matrix(bias_sequences, dtrajs, nstates=None): r""" Computes a bias matrix via an exponential average of the observed frame wise bias energies. Parameters ---------- bias_sequences : list of numpy.ndarray(T_i, num_therm_states) A single reduced bias energy trajectory or a list of reduced bias energy trajectories. For every simulation frame seen in trajectory i and time step t, btrajs[i][t, k] is the reduced bias energy of that frame evaluated in the k'th thermodynamic state (i.e. at the k'th Umbrella/Hamiltonian/temperature) dtrajs : list of numpy.ndarray(T_i) of int A single discrete trajectory or a list of discrete trajectories. The integers are indexes in 0,...,num_conf_states-1 enumerating the num_conf_states Markov states or the bins the trajectory is in at any time. nstates : int, optional, default=None Number of configuration states. Returns ------- bias_matrix : numpy.ndarray(shape=(num_therm_states, num_conf_states)) object bias_energies_full[j, i] is the bias energy in units of kT for each discrete state i at thermodynamic state j. """ from pyemma.thermo.extensions.util import (logsumexp as _logsumexp, logsumexp_pair as _logsumexp_pair) nmax = int(_np.max([dtraj.max() for dtraj in dtrajs])) if nstates is None: nstates = nmax + 1 elif nstates < nmax + 1: raise ValueError("nstates is smaller than the number of observed microstates") nthermo = bias_sequences[0].shape[1] bias_matrix = -_np.ones(shape=(nthermo, nstates), dtype=_np.float64) * _np.inf counts = _np.zeros(shape=(nstates,), dtype=_np.intc) for s in range(len(bias_sequences)): for i in range(nstates): idx = (dtrajs[s] == i) nidx = idx.sum() if nidx == 0: continue counts[i] += nidx selected_bias_sequence = bias_sequences[s][idx, :] for k in range(nthermo): bias_matrix[k, i] = _logsumexp_pair( bias_matrix[k, i], _logsumexp( _np.ascontiguousarray(-selected_bias_sequence[:, k]), inplace=False)) idx = counts.nonzero() log_counts = _np.log(counts[idx]) bias_matrix *= -1.0 bias_matrix[:, idx] += log_counts[_np.newaxis, :] return bias_matrix
r""" Computes a bias matrix via an exponential average of the observed frame wise bias energies. Parameters ---------- bias_sequences : list of numpy.ndarray(T_i, num_therm_states) A single reduced bias energy trajectory or a list of reduced bias energy trajectories. For every simulation frame seen in trajectory i and time step t, btrajs[i][t, k] is the reduced bias energy of that frame evaluated in the k'th thermodynamic state (i.e. at the k'th Umbrella/Hamiltonian/temperature) dtrajs : list of numpy.ndarray(T_i) of int A single discrete trajectory or a list of discrete trajectories. The integers are indexes in 0,...,num_conf_states-1 enumerating the num_conf_states Markov states or the bins the trajectory is in at any time. nstates : int, optional, default=None Number of configuration states. Returns ------- bias_matrix : numpy.ndarray(shape=(num_therm_states, num_conf_states)) object bias_energies_full[j, i] is the bias energy in units of kT for each discrete state i at thermodynamic state j.
def download_apcor(self, uri): """ Downloads apcor data. Args: uri: The URI of the apcor data file. Returns: apcor: ossos.downloads.core.ApcorData """ local_file = os.path.basename(uri) if os.access(local_file, os.F_OK): fobj = open(local_file) else: fobj = storage.vofile(uri, view='data') fobj.seek(0) str = fobj.read() fobj.close() apcor_str = str return ApcorData.from_string(apcor_str)
Downloads apcor data. Args: uri: The URI of the apcor data file. Returns: apcor: ossos.downloads.core.ApcorData
def iterate_analogy_datasets(args): """Generator over all analogy evaluation datasets. Iterates over dataset names, keyword arguments for their creation and the created dataset. """ for dataset_name in args.analogy_datasets: parameters = nlp.data.list_datasets(dataset_name) for key_values in itertools.product(*parameters.values()): kwargs = dict(zip(parameters.keys(), key_values)) yield dataset_name, kwargs, nlp.data.create(dataset_name, **kwargs)
Generator over all analogy evaluation datasets. Iterates over dataset names, keyword arguments for their creation and the created dataset.
def _get_role(rolename): """Reads and parses a file containing a role""" path = os.path.join('roles', rolename + '.json') if not os.path.exists(path): abort("Couldn't read role file {0}".format(path)) with open(path, 'r') as f: try: role = json.loads(f.read()) except ValueError as e: msg = "Little Chef found the following error in your" msg += " {0}.json file:\n {1}".format(rolename, str(e)) abort(msg) role['fullname'] = rolename return role
Reads and parses a file containing a role
def _symmetrize_correlograms(correlograms): """Return the symmetrized version of the CCG arrays.""" n_clusters, _, n_bins = correlograms.shape assert n_clusters == _ # We symmetrize c[i, j, 0]. # This is necessary because the algorithm in correlograms() # is sensitive to the order of identical spikes. correlograms[..., 0] = np.maximum(correlograms[..., 0], correlograms[..., 0].T) sym = correlograms[..., 1:][..., ::-1] sym = np.transpose(sym, (1, 0, 2)) return np.dstack((sym, correlograms))
Return the symmetrized version of the CCG arrays.
def get_status_badge(self, project, definition, branch_name=None, stage_name=None, job_name=None, configuration=None, label=None): """GetStatusBadge. [Preview API] <p>Gets the build status for a definition, optionally scoped to a specific branch, stage, job, and configuration.</p> <p>If there are more than one, then it is required to pass in a stageName value when specifying a jobName, and the same rule then applies for both if passing a configuration parameter.</p> :param str project: Project ID or project name :param str definition: Either the definition name with optional leading folder path, or the definition id. :param str branch_name: Only consider the most recent build for this branch. :param str stage_name: Use this stage within the pipeline to render the status. :param str job_name: Use this job within a stage of the pipeline to render the status. :param str configuration: Use this job configuration to render the status :param str label: Replaces the default text on the left side of the badge. :rtype: str """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if definition is not None: route_values['definition'] = self._serialize.url('definition', definition, 'str') query_parameters = {} if branch_name is not None: query_parameters['branchName'] = self._serialize.query('branch_name', branch_name, 'str') if stage_name is not None: query_parameters['stageName'] = self._serialize.query('stage_name', stage_name, 'str') if job_name is not None: query_parameters['jobName'] = self._serialize.query('job_name', job_name, 'str') if configuration is not None: query_parameters['configuration'] = self._serialize.query('configuration', configuration, 'str') if label is not None: query_parameters['label'] = self._serialize.query('label', label, 'str') response = self._send(http_method='GET', location_id='07acfdce-4757-4439-b422-ddd13a2fcc10', version='5.0-preview.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('str', response)
GetStatusBadge. [Preview API] <p>Gets the build status for a definition, optionally scoped to a specific branch, stage, job, and configuration.</p> <p>If there are more than one, then it is required to pass in a stageName value when specifying a jobName, and the same rule then applies for both if passing a configuration parameter.</p> :param str project: Project ID or project name :param str definition: Either the definition name with optional leading folder path, or the definition id. :param str branch_name: Only consider the most recent build for this branch. :param str stage_name: Use this stage within the pipeline to render the status. :param str job_name: Use this job within a stage of the pipeline to render the status. :param str configuration: Use this job configuration to render the status :param str label: Replaces the default text on the left side of the badge. :rtype: str
def enable_service_freshness_checks(self): """Enable service freshness checks (globally) Format of the line that triggers function call:: ENABLE_SERVICE_FRESHNESS_CHECKS :return: None """ if not self.my_conf.check_service_freshness: self.my_conf.modified_attributes |= \ DICT_MODATTR["MODATTR_FRESHNESS_CHECKS_ENABLED"].value self.my_conf.check_service_freshness = True self.my_conf.explode_global_conf() self.daemon.update_program_status()
Enable service freshness checks (globally) Format of the line that triggers function call:: ENABLE_SERVICE_FRESHNESS_CHECKS :return: None
def detect_with_url( self, url, return_face_id=True, return_face_landmarks=False, return_face_attributes=None, recognition_model="recognition_01", return_recognition_model=False, custom_headers=None, raw=False, **operation_config): """Detect human faces in an image, return face rectangles, and optionally with faceIds, landmarks, and attributes.<br /> * Optional parameters including faceId, landmarks, and attributes. Attributes include age, gender, headPose, smile, facialHair, glasses, emotion, hair, makeup, occlusion, accessories, blur, exposure and noise. * The extracted face feature, instead of the actual image, will be stored on server. The faceId is an identifier of the face feature and will be used in [Face - Identify](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395239), [Face - Verify](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039523a), and [Face - Find Similar](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395237). It will expire 24 hours after the detection call. * Higher face image quality means better detection and recognition precision. Please consider high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger. * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size is from 1KB to 6MB. * Faces are detectable when its size is 36x36 to 4096x4096 pixels. If need to detect very small but clear faces, please try to enlarge the input image. * Up to 64 faces can be returned for an image. Faces are ranked by face rectangle size from large to small. * Face detector prefer frontal and near-frontal faces. There are cases that faces may not be detected, e.g. exceptionally large face angles (head-pose) or being occluded, or wrong image orientation. * Attributes (age, gender, headPose, smile, facialHair, glasses, emotion, hair, makeup, occlusion, accessories, blur, exposure and noise) may not be perfectly accurate. HeadPose's pitch value is a reserved field and will always return 0. * Different 'recognitionModel' values are provided. If follow-up operations like Verify, Identify, Find Similar are needed, please specify the recognition model with 'recognitionModel' parameter. The default value for 'recognitionModel' is 'recognition_01', if latest model needed, please explicitly specify the model you need in this parameter. Once specified, the detected faceIds will be associated with the specified recognition model. More details, please refer to [How to specify a recognition model](https://docs.microsoft.com/en-us/azure/cognitive-services/face/face-api-how-to-topics/specify-recognition-model) . :param url: Publicly reachable URL of an image :type url: str :param return_face_id: A value indicating whether the operation should return faceIds of detected faces. :type return_face_id: bool :param return_face_landmarks: A value indicating whether the operation should return landmarks of the detected faces. :type return_face_landmarks: bool :param return_face_attributes: Analyze and return the one or more specified face attributes in the comma-separated string like "returnFaceAttributes=age,gender". Supported face attributes include age, gender, headPose, smile, facialHair, glasses and emotion. Note that each face attribute analysis has additional computational and time cost. :type return_face_attributes: list[str or ~azure.cognitiveservices.vision.face.models.FaceAttributeType] :param recognition_model: Name of recognition model. Recognition model is used when the face features are extracted and associated with detected faceIds, (Large)FaceList or (Large)PersonGroup. A recognition model name can be provided when performing Face - Detect or (Large)FaceList - Create or (Large)PersonGroup - Create. The default value is 'recognition_01', if latest model needed, please explicitly specify the model you need. Possible values include: 'recognition_01', 'recognition_02' :type recognition_model: str or ~azure.cognitiveservices.vision.face.models.RecognitionModel :param return_recognition_model: A value indicating whether the operation should return 'recognitionModel' in response. :type return_recognition_model: bool :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: list or ClientRawResponse if raw=true :rtype: list[~azure.cognitiveservices.vision.face.models.DetectedFace] or ~msrest.pipeline.ClientRawResponse :raises: :class:`APIErrorException<azure.cognitiveservices.vision.face.models.APIErrorException>` """ image_url = models.ImageUrl(url=url) # Construct URL url = self.detect_with_url.metadata['url'] path_format_arguments = { 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True) } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} if return_face_id is not None: query_parameters['returnFaceId'] = self._serialize.query("return_face_id", return_face_id, 'bool') if return_face_landmarks is not None: query_parameters['returnFaceLandmarks'] = self._serialize.query("return_face_landmarks", return_face_landmarks, 'bool') if return_face_attributes is not None: query_parameters['returnFaceAttributes'] = self._serialize.query("return_face_attributes", return_face_attributes, '[FaceAttributeType]', div=',') if recognition_model is not None: query_parameters['recognitionModel'] = self._serialize.query("recognition_model", recognition_model, 'str') if return_recognition_model is not None: query_parameters['returnRecognitionModel'] = self._serialize.query("return_recognition_model", return_recognition_model, 'bool') # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' header_parameters['Content-Type'] = 'application/json; charset=utf-8' if custom_headers: header_parameters.update(custom_headers) # Construct body body_content = self._serialize.body(image_url, 'ImageUrl') # Construct and send request request = self._client.post(url, query_parameters, header_parameters, body_content) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: raise models.APIErrorException(self._deserialize, response) deserialized = None if response.status_code == 200: deserialized = self._deserialize('[DetectedFace]', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized
Detect human faces in an image, return face rectangles, and optionally with faceIds, landmarks, and attributes.<br /> * Optional parameters including faceId, landmarks, and attributes. Attributes include age, gender, headPose, smile, facialHair, glasses, emotion, hair, makeup, occlusion, accessories, blur, exposure and noise. * The extracted face feature, instead of the actual image, will be stored on server. The faceId is an identifier of the face feature and will be used in [Face - Identify](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395239), [Face - Verify](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039523a), and [Face - Find Similar](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395237). It will expire 24 hours after the detection call. * Higher face image quality means better detection and recognition precision. Please consider high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger. * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size is from 1KB to 6MB. * Faces are detectable when its size is 36x36 to 4096x4096 pixels. If need to detect very small but clear faces, please try to enlarge the input image. * Up to 64 faces can be returned for an image. Faces are ranked by face rectangle size from large to small. * Face detector prefer frontal and near-frontal faces. There are cases that faces may not be detected, e.g. exceptionally large face angles (head-pose) or being occluded, or wrong image orientation. * Attributes (age, gender, headPose, smile, facialHair, glasses, emotion, hair, makeup, occlusion, accessories, blur, exposure and noise) may not be perfectly accurate. HeadPose's pitch value is a reserved field and will always return 0. * Different 'recognitionModel' values are provided. If follow-up operations like Verify, Identify, Find Similar are needed, please specify the recognition model with 'recognitionModel' parameter. The default value for 'recognitionModel' is 'recognition_01', if latest model needed, please explicitly specify the model you need in this parameter. Once specified, the detected faceIds will be associated with the specified recognition model. More details, please refer to [How to specify a recognition model](https://docs.microsoft.com/en-us/azure/cognitive-services/face/face-api-how-to-topics/specify-recognition-model) . :param url: Publicly reachable URL of an image :type url: str :param return_face_id: A value indicating whether the operation should return faceIds of detected faces. :type return_face_id: bool :param return_face_landmarks: A value indicating whether the operation should return landmarks of the detected faces. :type return_face_landmarks: bool :param return_face_attributes: Analyze and return the one or more specified face attributes in the comma-separated string like "returnFaceAttributes=age,gender". Supported face attributes include age, gender, headPose, smile, facialHair, glasses and emotion. Note that each face attribute analysis has additional computational and time cost. :type return_face_attributes: list[str or ~azure.cognitiveservices.vision.face.models.FaceAttributeType] :param recognition_model: Name of recognition model. Recognition model is used when the face features are extracted and associated with detected faceIds, (Large)FaceList or (Large)PersonGroup. A recognition model name can be provided when performing Face - Detect or (Large)FaceList - Create or (Large)PersonGroup - Create. The default value is 'recognition_01', if latest model needed, please explicitly specify the model you need. Possible values include: 'recognition_01', 'recognition_02' :type recognition_model: str or ~azure.cognitiveservices.vision.face.models.RecognitionModel :param return_recognition_model: A value indicating whether the operation should return 'recognitionModel' in response. :type return_recognition_model: bool :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: list or ClientRawResponse if raw=true :rtype: list[~azure.cognitiveservices.vision.face.models.DetectedFace] or ~msrest.pipeline.ClientRawResponse :raises: :class:`APIErrorException<azure.cognitiveservices.vision.face.models.APIErrorException>`
def fnmatch_multiple(candidates, pattern): ''' Convenience function which runs fnmatch.fnmatch() on each element of passed iterable. The first matching candidate is returned, or None if there is no matching candidate. ''' # Make sure that candidates is iterable to avoid a TypeError when we try to # iterate over its items. try: candidates_iter = iter(candidates) except TypeError: return None for candidate in candidates_iter: try: if fnmatch.fnmatch(candidate, pattern): return candidate except TypeError: pass return None
Convenience function which runs fnmatch.fnmatch() on each element of passed iterable. The first matching candidate is returned, or None if there is no matching candidate.
def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error): """Checks for additional blank line issues related to sections. Currently the only thing checked here is blank line before protected/private. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. class_info: A _ClassInfo objects. linenum: The number of the line to check. error: The function to call with any errors found. """ # Skip checks if the class is small, where small means 25 lines or less. # 25 lines seems like a good cutoff since that's the usual height of # terminals, and any class that can't fit in one screen can't really # be considered "small". # # Also skip checks if we are on the first line. This accounts for # classes that look like # class Foo { public: ... }; # # If we didn't find the end of the class, last_line would be zero, # and the check will be skipped by the first condition. if (class_info.last_line - class_info.starting_linenum <= 24 or linenum <= class_info.starting_linenum): return matched = Match(r'\s*(public|protected|private):', clean_lines.lines[linenum]) if matched: # Issue warning if the line before public/protected/private was # not a blank line, but don't do this if the previous line contains # "class" or "struct". This can happen two ways: # - We are at the beginning of the class. # - We are forward-declaring an inner class that is semantically # private, but needed to be public for implementation reasons. # Also ignores cases where the previous line ends with a backslash as can be # common when defining classes in C macros. prev_line = clean_lines.lines[linenum - 1] if (not IsBlankLine(prev_line) and not Search(r'\b(class|struct)\b', prev_line) and not Search(r'\\$', prev_line)): # Try a bit harder to find the beginning of the class. This is to # account for multi-line base-specifier lists, e.g.: # class Derived # : public Base { end_class_head = class_info.starting_linenum for i in range(class_info.starting_linenum, linenum): if Search(r'\{\s*$', clean_lines.lines[i]): end_class_head = i break if end_class_head < linenum - 1: error(filename, linenum, 'whitespace/blank_line', 3, '"%s:" should be preceded by a blank line' % matched.group(1))
Checks for additional blank line issues related to sections. Currently the only thing checked here is blank line before protected/private. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. class_info: A _ClassInfo objects. linenum: The number of the line to check. error: The function to call with any errors found.
def visit_and_update_expressions(self, visitor_fn): """Create an updated version (if needed) of the ConstructResult via the visitor pattern.""" new_fields = {} for key, value in six.iteritems(self.fields): new_value = value.visit_and_update(visitor_fn) if new_value is not value: new_fields[key] = new_value if new_fields: return ConstructResult(dict(self.fields, **new_fields)) else: return self
Create an updated version (if needed) of the ConstructResult via the visitor pattern.
def count_emails(self, conditions={}): """ Count all certified emails """ url = self.EMAILS_COUNT_URL + "?" for key, value in conditions.items(): if key is 'ids': value = ",".join(value) url += '&%s=%s' % (key, value) connection = Connection(self.token) connection.set_url(self.production, url) connection.set_url(self.production, url) return connection.get_request()
Count all certified emails
def autofix_codeblock(codeblock, max_line_len=80, aggressive=False, very_aggressive=False, experimental=False): r""" Uses autopep8 to format a block of code Example: >>> # DISABLE_DOCTEST >>> import utool as ut >>> codeblock = ut.codeblock( ''' def func( with , some = 'Problems' ): syntax ='Ok' but = 'Its very messy' if None: # syntax might not be perfect due to being cut off ommiting_this_line_still_works= True ''') >>> fixed_codeblock = ut.autofix_codeblock(codeblock) >>> print(fixed_codeblock) """ # FIXME idk how to remove the blank line following the function with # autopep8. It seems to not be supported by them, but it looks bad. import autopep8 arglist = ['--max-line-length', '80'] if aggressive: arglist.extend(['-a']) if very_aggressive: arglist.extend(['-a', '-a']) if experimental: arglist.extend(['--experimental']) arglist.extend(['']) autopep8_options = autopep8.parse_args(arglist) fixed_codeblock = autopep8.fix_code(codeblock, options=autopep8_options) return fixed_codeblock
r""" Uses autopep8 to format a block of code Example: >>> # DISABLE_DOCTEST >>> import utool as ut >>> codeblock = ut.codeblock( ''' def func( with , some = 'Problems' ): syntax ='Ok' but = 'Its very messy' if None: # syntax might not be perfect due to being cut off ommiting_this_line_still_works= True ''') >>> fixed_codeblock = ut.autofix_codeblock(codeblock) >>> print(fixed_codeblock)
def samples(dataset='imagenet', index=0, batchsize=1, shape=(224, 224), data_format='channels_last'): ''' Returns a batch of example images and the corresponding labels Parameters ---------- dataset : string The data set to load (options: imagenet, mnist, cifar10, cifar100, fashionMNIST) index : int For each data set 20 example images exist. The returned batch contains the images with index [index, index + 1, index + 2, ...] batchsize : int Size of batch. shape : list of integers The shape of the returned image (only relevant for Imagenet). data_format : str "channels_first" or "channels_last" Returns ------- images : array_like The batch of example images labels : array of int The labels associated with the images. ''' from PIL import Image images, labels = [], [] basepath = os.path.dirname(__file__) samplepath = os.path.join(basepath, 'data') files = os.listdir(samplepath) for idx in range(index, index + batchsize): i = idx % 20 # get filename and label file = [n for n in files if '{}_{:02d}_'.format(dataset, i) in n][0] label = int(file.split('.')[0].split('_')[-1]) # open file path = os.path.join(samplepath, file) image = Image.open(path) if dataset == 'imagenet': image = image.resize(shape) image = np.asarray(image, dtype=np.float32) if dataset != 'mnist' and data_format == 'channels_first': image = np.transpose(image, (2, 0, 1)) images.append(image) labels.append(label) labels = np.array(labels) images = np.stack(images) return images, labels
Returns a batch of example images and the corresponding labels Parameters ---------- dataset : string The data set to load (options: imagenet, mnist, cifar10, cifar100, fashionMNIST) index : int For each data set 20 example images exist. The returned batch contains the images with index [index, index + 1, index + 2, ...] batchsize : int Size of batch. shape : list of integers The shape of the returned image (only relevant for Imagenet). data_format : str "channels_first" or "channels_last" Returns ------- images : array_like The batch of example images labels : array of int The labels associated with the images.
def _routes_updated(self, ri): """Update the state of routes in the router. Compares the current routes with the (configured) existing routes and detect what was removed or added. Then configure the logical router in the hosting device accordingly. :param ri: RouterInfo corresponding to the router. :return: None :raises: networking_cisco.plugins.cisco.cfg_agent.cfg_exceptions. DriverException if the configuration operation fails. """ new_routes = ri.router['routes'] old_routes = ri.routes adds, removes = bc.common_utils.diff_list_of_dict(old_routes, new_routes) for route in adds: LOG.debug("Added route entry is '%s'", route) # remove replaced route from deleted route for del_route in removes: if route['destination'] == del_route['destination']: removes.remove(del_route) driver = self.driver_manager.get_driver(ri.id) driver.routes_updated(ri, 'replace', route) for route in removes: LOG.debug("Removed route entry is '%s'", route) driver = self.driver_manager.get_driver(ri.id) driver.routes_updated(ri, 'delete', route) ri.routes = new_routes
Update the state of routes in the router. Compares the current routes with the (configured) existing routes and detect what was removed or added. Then configure the logical router in the hosting device accordingly. :param ri: RouterInfo corresponding to the router. :return: None :raises: networking_cisco.plugins.cisco.cfg_agent.cfg_exceptions. DriverException if the configuration operation fails.
def check_len_in(self, min_len, max_len, item): """Checks that the length of item is in range(min_len, max_len+1).""" if max_len is None: if min_len: self.add_check("_coconut.len(" + item + ") >= " + str(min_len)) elif min_len == max_len: self.add_check("_coconut.len(" + item + ") == " + str(min_len)) elif not min_len: self.add_check("_coconut.len(" + item + ") <= " + str(max_len)) else: self.add_check(str(min_len) + " <= _coconut.len(" + item + ") <= " + str(max_len))
Checks that the length of item is in range(min_len, max_len+1).
def __densify_border(self): """ Densify the border of a polygon. The border is densified by a given factor (by default: 0.5). The complexity of the polygon's geometry is evaluated in order to densify the borders of its interior rings as well. Returns: list: a list of points where each point is represented by a list of its reduced coordinates Example: [[X1, Y1], [X2, Y2], ..., [Xn, Yn] """ if isinstance(self._input_geom, MultiPolygon): polygons = [polygon for polygon in self._input_geom] else: polygons = [self._input_geom] points = [] for polygon in polygons: if len(polygon.interiors) == 0: exterior = LineString(polygon.exterior) points += self.__fixed_interpolation(exterior) else: exterior = LineString(polygon.exterior) points += self.__fixed_interpolation(exterior) for j in range(len(polygon.interiors)): interior = LineString(polygon.interiors[j]) points += self.__fixed_interpolation(interior) return points
Densify the border of a polygon. The border is densified by a given factor (by default: 0.5). The complexity of the polygon's geometry is evaluated in order to densify the borders of its interior rings as well. Returns: list: a list of points where each point is represented by a list of its reduced coordinates Example: [[X1, Y1], [X2, Y2], ..., [Xn, Yn]
def as_float_array(a): """View the quaternion array as an array of floats This function is fast (of order 1 microsecond) because no data is copied; the returned quantity is just a "view" of the original. The output view has one more dimension (of size 4) than the input array, but is otherwise the same shape. """ return np.asarray(a, dtype=np.quaternion).view((np.double, 4))
View the quaternion array as an array of floats This function is fast (of order 1 microsecond) because no data is copied; the returned quantity is just a "view" of the original. The output view has one more dimension (of size 4) than the input array, but is otherwise the same shape.
def update(self, name=None, description=None, privacy_policy=None, subscription_policy=None, is_managed=None): """Update group. :param name: Name of group. :param description: Description of group. :param privacy_policy: PrivacyPolicy :param subscription_policy: SubscriptionPolicy :returns: Updated group """ with db.session.begin_nested(): if name is not None: self.name = name if description is not None: self.description = description if ( privacy_policy is not None and PrivacyPolicy.validate(privacy_policy) ): self.privacy_policy = privacy_policy if ( subscription_policy is not None and SubscriptionPolicy.validate(subscription_policy) ): self.subscription_policy = subscription_policy if is_managed is not None: self.is_managed = is_managed db.session.merge(self) return self
Update group. :param name: Name of group. :param description: Description of group. :param privacy_policy: PrivacyPolicy :param subscription_policy: SubscriptionPolicy :returns: Updated group
def record_to_fs(self): """Create a filesystem file from a File""" fr = self.record fn_path = self.file_name if fr.contents: if six.PY2: with self._fs.open(fn_path, 'wb') as f: self.record_to_fh(f) else: # py3 with self._fs.open(fn_path, 'w', newline='') as f: self.record_to_fh(f)
Create a filesystem file from a File
def get_alpha_value(self): ''' getter Learning rate. ''' if isinstance(self.__alpha_value, float) is False: raise TypeError("The type of __alpha_value must be float.") return self.__alpha_value
getter Learning rate.
def IsDesktopLocked() -> bool: """ Check if desktop is locked. Return bool. Desktop is locked if press Win+L, Ctrl+Alt+Del or in remote desktop mode. """ isLocked = False desk = ctypes.windll.user32.OpenDesktopW(ctypes.c_wchar_p('Default'), 0, 0, 0x0100) # DESKTOP_SWITCHDESKTOP = 0x0100 if desk: isLocked = not ctypes.windll.user32.SwitchDesktop(desk) ctypes.windll.user32.CloseDesktop(desk) return isLocked
Check if desktop is locked. Return bool. Desktop is locked if press Win+L, Ctrl+Alt+Del or in remote desktop mode.
def post(self): """ Makes the HTTP POST to the url sending post_data. """ self._construct_post_data() post_args = {"json": self.post_data} self.http_method_args.update(post_args) return self.http_method("POST")
Makes the HTTP POST to the url sending post_data.
def zone_compare(timezone): ''' Compares the given timezone name with the system timezone name. Checks the hash sum between the given timezone, and the one set in /etc/localtime. Returns True if names and hash sums match, and False if not. Mostly useful for running state checks. .. versionchanged:: 2016.3.0 .. note:: On Solaris-link operating systems only a string comparison is done. .. versionchanged:: 2016.11.4 .. note:: On AIX operating systems only a string comparison is done. CLI Example: .. code-block:: bash salt '*' timezone.zone_compare 'America/Denver' ''' if 'Solaris' in __grains__['os_family'] or 'AIX' in __grains__['os_family']: return timezone == get_zone() if 'FreeBSD' in __grains__['os_family']: if not os.path.isfile(_get_localtime_path()): return timezone == get_zone() tzfile = _get_localtime_path() zonepath = _get_zone_file(timezone) try: return filecmp.cmp(tzfile, zonepath, shallow=False) except OSError as exc: problematic_file = exc.filename if problematic_file == zonepath: raise SaltInvocationError( 'Can\'t find a local timezone "{0}"'.format(timezone)) elif problematic_file == tzfile: raise CommandExecutionError( 'Failed to read {0} to determine current timezone: {1}' .format(tzfile, exc.strerror)) raise
Compares the given timezone name with the system timezone name. Checks the hash sum between the given timezone, and the one set in /etc/localtime. Returns True if names and hash sums match, and False if not. Mostly useful for running state checks. .. versionchanged:: 2016.3.0 .. note:: On Solaris-link operating systems only a string comparison is done. .. versionchanged:: 2016.11.4 .. note:: On AIX operating systems only a string comparison is done. CLI Example: .. code-block:: bash salt '*' timezone.zone_compare 'America/Denver'
def job_exists(name=None): ''' Check whether the job exists in configured Jenkins jobs. :param name: The name of the job is check if it exists. :return: True if job exists, False if job does not exist. CLI Example: .. code-block:: bash salt '*' jenkins.job_exists jobname ''' if not name: raise SaltInvocationError('Required parameter \'name\' is missing') server = _connect() if server.job_exists(name): return True else: return False
Check whether the job exists in configured Jenkins jobs. :param name: The name of the job is check if it exists. :return: True if job exists, False if job does not exist. CLI Example: .. code-block:: bash salt '*' jenkins.job_exists jobname
def record_schemas( fn, wrapper, location, request_schema=None, response_schema=None): """Support extracting the schema from the decorated function.""" # have we already been decorated by an acceptable api call? has_acceptable = hasattr(fn, '_acceptable_metadata') if request_schema is not None: # preserve schema for later use wrapper._request_schema = wrapper._request_schema = request_schema wrapper._request_schema_location = location if has_acceptable: fn._acceptable_metadata._request_schema = request_schema fn._acceptable_metadata._request_schema_location = location if response_schema is not None: # preserve schema for later use wrapper._response_schema = wrapper._response_schema = response_schema wrapper._response_schema_location = location if has_acceptable: fn._acceptable_metadata._response_schema = response_schema fn._acceptable_metadata._response_schema_location = location
Support extracting the schema from the decorated function.
def get(self, request, format=None): """ Get list of bots --- serializer: BotSerializer responseMessages: - code: 401 message: Not authenticated """ bots = Bot.objects.filter(owner=request.user) serializer = BotSerializer(bots, many=True) return Response(serializer.data)
Get list of bots --- serializer: BotSerializer responseMessages: - code: 401 message: Not authenticated
def patch_conf(settings_patch=None, settings_file=None): """ Reload the configuration form scratch. Only the default config is loaded, not the environment-specified config. Then the specified patch is applied. This is for unit tests only! :param settings_patch: Custom configuration values to insert :param settings_file: Custom settings file to read """ if settings_patch is None: settings_patch = {} reload_config() os.environ[ENVIRONMENT_VARIABLE] = settings_file if settings_file else '' from bernard.conf import settings as l_settings # noinspection PyProtectedMember r_settings = l_settings._settings r_settings.update(settings_patch) if 'bernard.i18n' in modules: from bernard.i18n import translate, intents translate._regenerate_word_dict() intents._refresh_intents_db() yield
Reload the configuration form scratch. Only the default config is loaded, not the environment-specified config. Then the specified patch is applied. This is for unit tests only! :param settings_patch: Custom configuration values to insert :param settings_file: Custom settings file to read
def param_title(param_values, slug): """ Отображает наименование параметра товара Пример использования:: {% param_title item.paramvalue_set.all "producer" %} :param param_values: список значений параметров :param slug: символьный код параметра :return: """ for val in param_values: if val.param.slug == slug: return val.param.title return None
Отображает наименование параметра товара Пример использования:: {% param_title item.paramvalue_set.all "producer" %} :param param_values: список значений параметров :param slug: символьный код параметра :return:
def cluster_elongate(): "Not so applicable for this sample" start_centers = [[1.0, 4.5], [3.1, 2.7]] template_clustering(start_centers, SIMPLE_SAMPLES.SAMPLE_ELONGATE, criterion = splitting_type.BAYESIAN_INFORMATION_CRITERION) template_clustering(start_centers, SIMPLE_SAMPLES.SAMPLE_ELONGATE, criterion = splitting_type.MINIMUM_NOISELESS_DESCRIPTION_LENGTH)
Not so applicable for this sample