code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def matches(self, other): ''' A disjunctive list matches a phoneme if any of its members matches the phoneme. If other is also a disjunctive list, any match between this list and the other returns true. ''' if other is None: return False if isinstance(other, PhonemeDisjunction): return any([phoneme.matches(other) for phoneme in self]) if isinstance(other, list) or isinstance(other, PhonologicalFeature): other = phoneme(other) return any([phoneme <= other for phoneme in self])
A disjunctive list matches a phoneme if any of its members matches the phoneme. If other is also a disjunctive list, any match between this list and the other returns true.
def load_rules(self): """ load the rules from file """ self.col_maps = [] #print("reading mapping table") with open(self.col_file, 'r') as f: for line in f: rule = MapColumn(line) #rule = line self.col_maps.append(rule)
load the rules from file
def is_indexed(self, dataset): """ Returns True if dataset is already indexed. Otherwise returns False. """ query = text(""" SELECT vid FROM dataset_index WHERE vid = :vid; """) result = self.backend.library.database.connection.execute(query, vid=dataset.vid) return bool(result.fetchall())
Returns True if dataset is already indexed. Otherwise returns False.
def _buildStaticFiles(self): """ move over static files so that relative imports work Note: if a dir is passed, it is copied with all of its contents If the file is a zip, it is copied and extracted too # By default folder name is 'static', unless *output_path_static* is passed (now allowed only in special applications like KompleteVizMultiModel) """ if not self.output_path_static: self.output_path_static = os.path.join(self.output_path, "static") # printDebug(self.output_path_static, "red") if not os.path.exists(self.output_path_static): os.makedirs(self.output_path_static) for x in self.static_files: source_f = os.path.join(self.static_root, x) dest_f = os.path.join(self.output_path_static, x) if os.path.isdir(source_f): if os.path.exists(dest_f): # delete first if exists, as copytree will throw an error otherwise shutil.rmtree(dest_f) shutil.copytree(source_f, dest_f) else: shutil.copyfile(source_f, dest_f) if x.endswith('.zip'): printDebug("..unzipping") zip_ref = zipfile.ZipFile(os.path.join(dest_f), 'r') zip_ref.extractall(self.output_path_static) zip_ref.close() printDebug("..cleaning up") os.remove(dest_f) # http://superuser.com/questions/104500/what-is-macosx-folder shutil.rmtree( os.path.join(self.output_path_static, "__MACOSX"))
move over static files so that relative imports work Note: if a dir is passed, it is copied with all of its contents If the file is a zip, it is copied and extracted too # By default folder name is 'static', unless *output_path_static* is passed (now allowed only in special applications like KompleteVizMultiModel)
def parse_sgtin_96(sgtin_96): '''Given a SGTIN-96 hex string, parse each segment. Returns a dictionary of the segments.''' if not sgtin_96: raise Exception('Pass in a value.') if not sgtin_96.startswith("30"): # not a sgtin, not handled raise Exception('Not SGTIN-96.') binary = "{0:020b}".format(int(sgtin_96, 16)).zfill(96) header = int(binary[:8], 2) tag_filter = int(binary[8:11], 2) partition = binary[11:14] partition_value = int(partition, 2) m, l, n, k = SGTIN_96_PARTITION_MAP[partition_value] company_start = 8 + 3 + 3 company_end = company_start + m company_data = int(binary[company_start:company_end], 2) if company_data > pow(10, l): # can't be too large raise Exception('Company value is too large') company_prefix = str(company_data).zfill(l) item_start = company_end item_end = item_start + n item_data = binary[item_start:item_end] item_number = int(item_data, 2) item_reference = str(item_number).zfill(k) serial = int(binary[-38:], 2) return { "header": header, "filter": tag_filter, "partition": partition, "company_prefix": company_prefix, "item_reference": item_reference, "serial": serial }
Given a SGTIN-96 hex string, parse each segment. Returns a dictionary of the segments.
def compare_params(defined, existing, return_old_value=False): ''' .. versionadded:: 2017.7 Compares Zabbix object definition against existing Zabbix object. :param defined: Zabbix object definition taken from sls file. :param existing: Existing Zabbix object taken from result of an API call. :param return_old_value: Default False. If True, returns dict("old"=old_val, "new"=new_val) for rollback purpose. :return: Params that are different from existing object. Result extended by object ID can be passed directly to Zabbix API update method. ''' # Comparison of data types if not isinstance(defined, type(existing)): raise SaltException('Zabbix object comparison failed (data type mismatch). Expecting {0}, got {1}. ' 'Existing value: "{2}", defined value: "{3}").'.format(type(existing), type(defined), existing, defined)) # Comparison of values if not salt.utils.data.is_iter(defined): if six.text_type(defined) != six.text_type(existing) and return_old_value: return {'new': six.text_type(defined), 'old': six.text_type(existing)} elif six.text_type(defined) != six.text_type(existing) and not return_old_value: return six.text_type(defined) # Comparison of lists of values or lists of dicts if isinstance(defined, list): if len(defined) != len(existing): log.info('Different list length!') return {'new': defined, 'old': existing} if return_old_value else defined else: difflist = [] for ditem in defined: d_in_e = [] for eitem in existing: comp = compare_params(ditem, eitem, return_old_value) if return_old_value: d_in_e.append(comp['new']) else: d_in_e.append(comp) if all(d_in_e): difflist.append(ditem) # If there is any difference in a list then whole defined list must be returned and provided for update if any(difflist) and return_old_value: return {'new': defined, 'old': existing} elif any(difflist) and not return_old_value: return defined # Comparison of dicts if isinstance(defined, dict): try: # defined must be a subset of existing to be compared if set(defined) <= set(existing): intersection = set(defined) & set(existing) diffdict = {'new': {}, 'old': {}} if return_old_value else {} for i in intersection: comp = compare_params(defined[i], existing[i], return_old_value) if return_old_value: if comp or (not comp and isinstance(comp, list)): diffdict['new'].update({i: defined[i]}) diffdict['old'].update({i: existing[i]}) else: if comp or (not comp and isinstance(comp, list)): diffdict.update({i: defined[i]}) return diffdict return {'new': defined, 'old': existing} if return_old_value else defined except TypeError: raise SaltException('Zabbix object comparison failed (data type mismatch). Expecting {0}, got {1}. ' 'Existing value: "{2}", defined value: "{3}").'.format(type(existing), type(defined), existing, defined))
.. versionadded:: 2017.7 Compares Zabbix object definition against existing Zabbix object. :param defined: Zabbix object definition taken from sls file. :param existing: Existing Zabbix object taken from result of an API call. :param return_old_value: Default False. If True, returns dict("old"=old_val, "new"=new_val) for rollback purpose. :return: Params that are different from existing object. Result extended by object ID can be passed directly to Zabbix API update method.
def global_state_code(self): """ Returns global variables for generating function from ``func_code`` as code. Includes compiled regular expressions and imports. """ self._generate_func_code() if not self._compile_regexps: return '\n'.join( [ 'from fastjsonschema import JsonSchemaException', '', '', ] ) regexs = ['"{}": re.compile(r"{}")'.format(key, value.pattern) for key, value in self._compile_regexps.items()] return '\n'.join( [ 'import re', 'from fastjsonschema import JsonSchemaException', '', '', 'REGEX_PATTERNS = {', ' ' + ',\n '.join(regexs), '}', '', ] )
Returns global variables for generating function from ``func_code`` as code. Includes compiled regular expressions and imports.
def save(self, filename): '''save waypoints to a file''' f = open(filename, mode='w') f.write("QGC WPL 110\n") for w in self.wpoints: if getattr(w, 'comment', None): f.write("# %s\n" % w.comment) f.write("%u\t%u\t%u\t%u\t%f\t%f\t%f\t%f\t%f\t%f\t%f\t%u\n" % ( w.seq, w.current, w.frame, w.command, w.param1, w.param2, w.param3, w.param4, w.x, w.y, w.z, w.autocontinue)) f.close()
save waypoints to a file
def parse_xml_node(self, node): '''Parse an xml.dom Node object representing a preceding condition into this object. ''' super(Preceding, self).parse_xml_node(node) p_nodes = node.getElementsByTagNameNS(RTS_NS, 'Preceding') if p_nodes.length != 1: raise InvalidParticipantNodeError p_node = p_nodes[0] if p_node.hasAttributeNS(RTS_NS, 'timeout'): self.timeout = int(p_node.getAttributeNS(RTS_NS, 'timeout')) else: self.timeout = 0 if p_node.hasAttributeNS(RTS_NS, 'sendingTiming'): self.sending_timing = p_node.getAttributeNS(RTS_NS, 'sendingTiming') else: self.sending_timing = 'ASYNC' self._preceding_components = [] for c in p_node.getElementsByTagNameNS(RTS_NS, 'PrecedingComponents'): self._preceding_components.append(TargetExecutionContext().parse_xml_node(c)) return self
Parse an xml.dom Node object representing a preceding condition into this object.
def connect(): """Connect to FTP server, login and return an ftplib.FTP instance.""" ftp_class = ftplib.FTP if not SSL else ftplib.FTP_TLS ftp = ftp_class(timeout=TIMEOUT) ftp.connect(HOST, PORT) ftp.login(USER, PASSWORD) if SSL: ftp.prot_p() # secure data connection return ftp
Connect to FTP server, login and return an ftplib.FTP instance.
def add_argument(self, parser, bootstrap=False): """Add dict-style item as an argument to the given parser. The dict item will take all the nested items in the dictionary and namespace them with the dict name, adding each child item as their own CLI argument. Examples: A non-nested dict item with the name 'db' and children named 'port' and 'host' will result in the following being valid CLI args: ['--db-host', 'localhost', '--db-port', '1234'] Args: parser (argparse.ArgumentParser): The parser to add this item to. bootstrap (bool): Flag to indicate whether you only want to mark this item as required or not. """ if self.cli_expose: for child in self.children.values(): child.add_argument(parser, bootstrap)
Add dict-style item as an argument to the given parser. The dict item will take all the nested items in the dictionary and namespace them with the dict name, adding each child item as their own CLI argument. Examples: A non-nested dict item with the name 'db' and children named 'port' and 'host' will result in the following being valid CLI args: ['--db-host', 'localhost', '--db-port', '1234'] Args: parser (argparse.ArgumentParser): The parser to add this item to. bootstrap (bool): Flag to indicate whether you only want to mark this item as required or not.
def safe_sort(values, labels=None, na_sentinel=-1, assume_unique=False): """ Sort ``values`` and reorder corresponding ``labels``. ``values`` should be unique if ``labels`` is not None. Safe for use with mixed types (int, str), orders ints before strs. .. versionadded:: 0.19.0 Parameters ---------- values : list-like Sequence; must be unique if ``labels`` is not None. labels : list_like Indices to ``values``. All out of bound indices are treated as "not found" and will be masked with ``na_sentinel``. na_sentinel : int, default -1 Value in ``labels`` to mark "not found". Ignored when ``labels`` is None. assume_unique : bool, default False When True, ``values`` are assumed to be unique, which can speed up the calculation. Ignored when ``labels`` is None. Returns ------- ordered : ndarray Sorted ``values`` new_labels : ndarray Reordered ``labels``; returned when ``labels`` is not None. Raises ------ TypeError * If ``values`` is not list-like or if ``labels`` is neither None nor list-like * If ``values`` cannot be sorted ValueError * If ``labels`` is not None and ``values`` contain duplicates. """ if not is_list_like(values): raise TypeError("Only list-like objects are allowed to be passed to" "safe_sort as values") if not isinstance(values, np.ndarray): # don't convert to string types dtype, _ = infer_dtype_from_array(values) values = np.asarray(values, dtype=dtype) def sort_mixed(values): # order ints before strings, safe in py3 str_pos = np.array([isinstance(x, str) for x in values], dtype=bool) nums = np.sort(values[~str_pos]) strs = np.sort(values[str_pos]) return np.concatenate([nums, np.asarray(strs, dtype=object)]) sorter = None if lib.infer_dtype(values, skipna=False) == 'mixed-integer': # unorderable in py3 if mixed str/int ordered = sort_mixed(values) else: try: sorter = values.argsort() ordered = values.take(sorter) except TypeError: # try this anyway ordered = sort_mixed(values) # labels: if labels is None: return ordered if not is_list_like(labels): raise TypeError("Only list-like objects or None are allowed to be" "passed to safe_sort as labels") labels = ensure_platform_int(np.asarray(labels)) from pandas import Index if not assume_unique and not Index(values).is_unique: raise ValueError("values should be unique if labels is not None") if sorter is None: # mixed types (hash_klass, _), values = algorithms._get_data_algo( values, algorithms._hashtables) t = hash_klass(len(values)) t.map_locations(values) sorter = ensure_platform_int(t.lookup(ordered)) reverse_indexer = np.empty(len(sorter), dtype=np.int_) reverse_indexer.put(sorter, np.arange(len(sorter))) mask = (labels < -len(values)) | (labels >= len(values)) | \ (labels == na_sentinel) # (Out of bound indices will be masked with `na_sentinel` next, so we may # deal with them here without performance loss using `mode='wrap'`.) new_labels = reverse_indexer.take(labels, mode='wrap') np.putmask(new_labels, mask, na_sentinel) return ordered, ensure_platform_int(new_labels)
Sort ``values`` and reorder corresponding ``labels``. ``values`` should be unique if ``labels`` is not None. Safe for use with mixed types (int, str), orders ints before strs. .. versionadded:: 0.19.0 Parameters ---------- values : list-like Sequence; must be unique if ``labels`` is not None. labels : list_like Indices to ``values``. All out of bound indices are treated as "not found" and will be masked with ``na_sentinel``. na_sentinel : int, default -1 Value in ``labels`` to mark "not found". Ignored when ``labels`` is None. assume_unique : bool, default False When True, ``values`` are assumed to be unique, which can speed up the calculation. Ignored when ``labels`` is None. Returns ------- ordered : ndarray Sorted ``values`` new_labels : ndarray Reordered ``labels``; returned when ``labels`` is not None. Raises ------ TypeError * If ``values`` is not list-like or if ``labels`` is neither None nor list-like * If ``values`` cannot be sorted ValueError * If ``labels`` is not None and ``values`` contain duplicates.
def set_file_atrificat_of_project(self, doc, symbol, value): """Sets a file name, uri or home artificat. Raises OrderError if no package or file defined. """ if self.has_package(doc) and self.has_file(doc): self.file(doc).add_artifact(symbol, value) else: raise OrderError('File::Artificat')
Sets a file name, uri or home artificat. Raises OrderError if no package or file defined.
def config_args(self): """Set config options""" # Module list options: self.arg_parser.add_argument('--version', action='version', version='%(prog)s ' + str(__version__)) self.arg_parser.add_argument('--verbose', action='store_true', dest = 'verbosemode', help=_('set verbose terminal output')) self.arg_parser.add_argument('-s', action='store_true', dest = 'silentmode', help=_('silence terminal output')) self.arg_parser.add_argument('--list-parsers', action='store_true', dest='list_parsers', help=_('return a list of available parsers')) self.arg_parser.add_argument('-p', action='store', dest='parser', default='syslog', help=_('select a parser (default: syslog)')) self.arg_parser.add_argument('-z', '--unzip', action='store_true', dest='unzip', help=_('include files compressed with gzip')) self.arg_parser.add_argument('-t', action='store', dest='tzone', help=_('specify timezone offset to UTC (e.g. \'+0500\')')) self.arg_parser.add_argument('files', # nargs needs to be * not + so --list-filters/etc # will work without file arg metavar='file', nargs='*', help=_('specify input files')) # self.arg_parser.add_argument_group(self.parse_args) self.arg_parser.add_argument_group(self.filter_args) self.arg_parser.add_argument_group(self.output_args) self.args = self.arg_parser.parse_args()
Set config options
def _rest_request_to_json(self, address, object_path, service_name, requests_config, tags, *args, **kwargs): """ Query the given URL and return the JSON response """ response = self._rest_request(address, object_path, service_name, requests_config, tags, *args, **kwargs) try: response_json = response.json() except JSONDecodeError as e: self.service_check( service_name, AgentCheck.CRITICAL, tags=['url:%s' % self._get_url_base(address)] + tags, message='JSON Parse failed: {0}'.format(e), ) raise return response_json
Query the given URL and return the JSON response
def main(): """ Main function when running as a program. """ global args args = parse_args() if not args: return 1 state = MyState(args) for path in args.paths: if os.path.isdir(path): walk_dir(path, args, state) else: safe_process_files(os.path.dirname(path), [os.path.basename(path)], args, state) if state.should_quit(): break if state.failed_files: sys.stderr.write("error: %i/%i AEADs failed\n" % (len(state.failed_files), state.file_count)) return 1 if args.debug: sys.stderr.write("Successfully processed %i AEADs\n" % (state.file_count))
Main function when running as a program.
def calc_crc16(buf): """ Drop in pure python replacement for ekmcrc.c extension. Args: buf (bytes): String or byte array (implicit Python 2.7 cast) Returns: str: 16 bit CRC per EKM Omnimeters formatted as hex string. """ crc_table = [0x0000, 0xc0c1, 0xc181, 0x0140, 0xc301, 0x03c0, 0x0280, 0xc241, 0xc601, 0x06c0, 0x0780, 0xc741, 0x0500, 0xc5c1, 0xc481, 0x0440, 0xcc01, 0x0cc0, 0x0d80, 0xcd41, 0x0f00, 0xcfc1, 0xce81, 0x0e40, 0x0a00, 0xcac1, 0xcb81, 0x0b40, 0xc901, 0x09c0, 0x0880, 0xc841, 0xd801, 0x18c0, 0x1980, 0xd941, 0x1b00, 0xdbc1, 0xda81, 0x1a40, 0x1e00, 0xdec1, 0xdf81, 0x1f40, 0xdd01, 0x1dc0, 0x1c80, 0xdc41, 0x1400, 0xd4c1, 0xd581, 0x1540, 0xd701, 0x17c0, 0x1680, 0xd641, 0xd201, 0x12c0, 0x1380, 0xd341, 0x1100, 0xd1c1, 0xd081, 0x1040, 0xf001, 0x30c0, 0x3180, 0xf141, 0x3300, 0xf3c1, 0xf281, 0x3240, 0x3600, 0xf6c1, 0xf781, 0x3740, 0xf501, 0x35c0, 0x3480, 0xf441, 0x3c00, 0xfcc1, 0xfd81, 0x3d40, 0xff01, 0x3fc0, 0x3e80, 0xfe41, 0xfa01, 0x3ac0, 0x3b80, 0xfb41, 0x3900, 0xf9c1, 0xf881, 0x3840, 0x2800, 0xe8c1, 0xe981, 0x2940, 0xeb01, 0x2bc0, 0x2a80, 0xea41, 0xee01, 0x2ec0, 0x2f80, 0xef41, 0x2d00, 0xedc1, 0xec81, 0x2c40, 0xe401, 0x24c0, 0x2580, 0xe541, 0x2700, 0xe7c1, 0xe681, 0x2640, 0x2200, 0xe2c1, 0xe381, 0x2340, 0xe101, 0x21c0, 0x2080, 0xe041, 0xa001, 0x60c0, 0x6180, 0xa141, 0x6300, 0xa3c1, 0xa281, 0x6240, 0x6600, 0xa6c1, 0xa781, 0x6740, 0xa501, 0x65c0, 0x6480, 0xa441, 0x6c00, 0xacc1, 0xad81, 0x6d40, 0xaf01, 0x6fc0, 0x6e80, 0xae41, 0xaa01, 0x6ac0, 0x6b80, 0xab41, 0x6900, 0xa9c1, 0xa881, 0x6840, 0x7800, 0xb8c1, 0xb981, 0x7940, 0xbb01, 0x7bc0, 0x7a80, 0xba41, 0xbe01, 0x7ec0, 0x7f80, 0xbf41, 0x7d00, 0xbdc1, 0xbc81, 0x7c40, 0xb401, 0x74c0, 0x7580, 0xb541, 0x7700, 0xb7c1, 0xb681, 0x7640, 0x7200, 0xb2c1, 0xb381, 0x7340, 0xb101, 0x71c0, 0x7080, 0xb041, 0x5000, 0x90c1, 0x9181, 0x5140, 0x9301, 0x53c0, 0x5280, 0x9241, 0x9601, 0x56c0, 0x5780, 0x9741, 0x5500, 0x95c1, 0x9481, 0x5440, 0x9c01, 0x5cc0, 0x5d80, 0x9d41, 0x5f00, 0x9fc1, 0x9e81, 0x5e40, 0x5a00, 0x9ac1, 0x9b81, 0x5b40, 0x9901, 0x59c0, 0x5880, 0x9841, 0x8801, 0x48c0, 0x4980, 0x8941, 0x4b00, 0x8bc1, 0x8a81, 0x4a40, 0x4e00, 0x8ec1, 0x8f81, 0x4f40, 0x8d01, 0x4dc0, 0x4c80, 0x8c41, 0x4400, 0x84c1, 0x8581, 0x4540, 0x8701, 0x47c0, 0x4680, 0x8641, 0x8201, 0x42c0, 0x4380, 0x8341, 0x4100, 0x81c1, 0x8081, 0x4040] crc = 0xffff for c in buf: index = (crc ^ ord(c)) & 0xff crct = crc_table[index] crc = (crc >> 8) ^ crct crc = (crc << 8) | (crc >> 8) crc &= 0x7F7F return "%04x" % crc
Drop in pure python replacement for ekmcrc.c extension. Args: buf (bytes): String or byte array (implicit Python 2.7 cast) Returns: str: 16 bit CRC per EKM Omnimeters formatted as hex string.
def flush_template(context, declaration=None, reconstruct=True): """Emit the code needed to flush the buffer. Will only emit the yield and clear if the buffer is known to be dirty. """ if declaration is None: declaration = Line(0, '') if {'text', 'dirty'}.issubset(context.flag): yield declaration.clone(line='yield "".join(_buffer)') context.flag.remove('text') # This will force a new buffer to be constructed. context.flag.remove('dirty') if reconstruct: for i in ensure_buffer(context): yield i if declaration.stripped == 'yield': yield declaration
Emit the code needed to flush the buffer. Will only emit the yield and clear if the buffer is known to be dirty.
def evaluate_with_predictions(data_file, predictions): ''' Evalutate with predictions/ ''' expected_version = '1.1' with open(data_file) as dataset_file: dataset_json = json.load(dataset_file) if dataset_json['version'] != expected_version: print('Evaluation expects v-' + expected_version + ', but got dataset with v-' + dataset_json['version'], file=sys.stderr) dataset = dataset_json['data'] result = _evaluate(dataset, predictions) return result['exact_match']
Evalutate with predictions/
def permission_required(perm, queryset=None, login_url=None, raise_exception=False): """ Permission check decorator for function-base generic view This decorator works as function decorator Parameters ---------- perm : string A permission string queryset : queryset or model A queryset or model for finding object. With classbased generic view, ``None`` for using view default queryset. When the view does not define ``get_queryset``, ``queryset``, ``get_object``, or ``object`` then ``obj=None`` is used to check permission. With functional generic view, ``None`` for using passed queryset. When non queryset was passed then ``obj=None`` is used to check permission. Examples -------- >>> @permission_required('auth.change_user') >>> def update_auth_user(request, *args, **kwargs): ... pass """ def wrapper(view_func): @wraps(view_func, assigned=available_attrs(view_func)) def inner(request, *args, **kwargs): _kwargs = copy.copy(kwargs) # overwrite queryset if specified if queryset: _kwargs['queryset'] = queryset # get object from view if 'date_field' in _kwargs: fn = get_object_from_date_based_view else: fn = get_object_from_list_detail_view if fn.validate(request, *args, **_kwargs): obj = fn(request, *args, **_kwargs) else: # required arguments is not passed obj = None if not request.user.has_perm(perm, obj=obj): if raise_exception: raise PermissionDenied else: return redirect_to_login(request, login_url) return view_func(request, *args, **_kwargs) return inner return wrapper
Permission check decorator for function-base generic view This decorator works as function decorator Parameters ---------- perm : string A permission string queryset : queryset or model A queryset or model for finding object. With classbased generic view, ``None`` for using view default queryset. When the view does not define ``get_queryset``, ``queryset``, ``get_object``, or ``object`` then ``obj=None`` is used to check permission. With functional generic view, ``None`` for using passed queryset. When non queryset was passed then ``obj=None`` is used to check permission. Examples -------- >>> @permission_required('auth.change_user') >>> def update_auth_user(request, *args, **kwargs): ... pass
def header(heading_text, header_level, style="atx"): """Return a header of specified level. Keyword arguments: style -- Specifies the header style (default atx). The "atx" style uses hash signs, and has 6 levels. The "setext" style uses dashes or equals signs for headers of levels 1 and 2 respectively, and is limited to those two levels. Specifying a level outside of the style's range results in a ValueError. >>> header("Main Title", 1) '# Main Title' >>> header("Smaller subtitle", 4) '#### Smaller subtitle' >>> header("Setext style", 2, style="setext") 'Setext style\\n---' """ if not isinstance(header_level, int): raise TypeError("header_level must be int") if style not in ["atx", "setext"]: raise ValueError("Invalid style %s (choose 'atx' or 'setext')" % style) if style == "atx": if not 1 <= header_level <= 6: raise ValueError("Invalid level %d for atx" % header_level) return ("#" * header_level) + " " + esc_format(heading_text) else: if not 0 < header_level < 3: raise ValueError("Invalid level %d for setext" % header_level) header_character = "=" if header_level == 1 else "-" return esc_format(heading_text) + ("\n%s" % (header_character * 3))
Return a header of specified level. Keyword arguments: style -- Specifies the header style (default atx). The "atx" style uses hash signs, and has 6 levels. The "setext" style uses dashes or equals signs for headers of levels 1 and 2 respectively, and is limited to those two levels. Specifying a level outside of the style's range results in a ValueError. >>> header("Main Title", 1) '# Main Title' >>> header("Smaller subtitle", 4) '#### Smaller subtitle' >>> header("Setext style", 2, style="setext") 'Setext style\\n---'
def get_alert(self, id, **kwargs): # noqa: E501 """Get a specific alert # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_alert(id, async_req=True) >>> result = thread.get() :param async_req bool :param str id: (required) :return: ResponseContainerAlert If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.get_alert_with_http_info(id, **kwargs) # noqa: E501 else: (data) = self.get_alert_with_http_info(id, **kwargs) # noqa: E501 return data
Get a specific alert # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_alert(id, async_req=True) >>> result = thread.get() :param async_req bool :param str id: (required) :return: ResponseContainerAlert If the method is called asynchronously, returns the request thread.
def _shutdown_transport(self): """Unwrap a Python 2.6 SSL socket, so we can call shutdown()""" if self.sock is not None: try: unwrap = self.sock.unwrap except AttributeError: return try: self.sock = unwrap() except ValueError: # Failure within SSL might mean unwrap exists but socket is not # deemed wrapped pass
Unwrap a Python 2.6 SSL socket, so we can call shutdown()
def format_response_data_type(self, response_data): """格式化返回的值为正确的类型 :param response_data: 返回的数据 """ if isinstance(response_data, list) and not isinstance( response_data, str ): return response_data int_match_str = "|".join(self.config["response_format"]["int"]) float_match_str = "|".join(self.config["response_format"]["float"]) for item in response_data: for key in item: try: if re.search(int_match_str, key) is not None: item[key] = helpers.str2num(item[key], "int") elif re.search(float_match_str, key) is not None: item[key] = helpers.str2num(item[key], "float") except ValueError: continue return response_data
格式化返回的值为正确的类型 :param response_data: 返回的数据
def getclosurevars(func): """ Get the mapping of free variables to their current values. Returns a named tuple of dicts mapping the current nonlocal, global and builtin references as seen by the body of the function. A final set of unbound names that could not be resolved is also provided. Note: Modified function from the Python 3.5 inspect standard library module Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 Python Software Foundation; All Rights Reserved" See also py-cloud-compute-cannon/NOTICES. """ if inspect.ismethod(func): func = func.__func__ elif not inspect.isroutine(func): raise TypeError("'{!r}' is not a Python function".format(func)) # AMVMOD: deal with python 2 builtins that don't define these code = getattr(func, '__code__', None) closure = getattr(func, '__closure__', None) co_names = getattr(code, 'co_names', ()) glb = getattr(func, '__globals__', {}) # Nonlocal references are named in co_freevars and resolved # by looking them up in __closure__ by positional index if closure is None: nonlocal_vars = {} else: nonlocal_vars = {var: cell.cell_contents for var, cell in zip(code.co_freevars, func.__closure__)} # Global and builtin references are named in co_names and resolved # by looking them up in __globals__ or __builtins__ global_ns = glb builtin_ns = global_ns.get("__builtins__", builtins.__dict__) if inspect.ismodule(builtin_ns): builtin_ns = builtin_ns.__dict__ global_vars = {} builtin_vars = {} unbound_names = set() for name in co_names: if name in ("None", "True", "False"): # Because these used to be builtins instead of keywords, they # may still show up as name references. We ignore them. continue try: global_vars[name] = global_ns[name] except KeyError: try: builtin_vars[name] = builtin_ns[name] except KeyError: unbound_names.add(name) return {'nonlocal': nonlocal_vars, 'global': global_vars, 'builtin': builtin_vars, 'unbound': unbound_names}
Get the mapping of free variables to their current values. Returns a named tuple of dicts mapping the current nonlocal, global and builtin references as seen by the body of the function. A final set of unbound names that could not be resolved is also provided. Note: Modified function from the Python 3.5 inspect standard library module Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 Python Software Foundation; All Rights Reserved" See also py-cloud-compute-cannon/NOTICES.
def _partition(episodes): """Divides metrics data into true rollouts vs off-policy estimates.""" from ray.rllib.evaluation.sampler import RolloutMetrics rollouts, estimates = [], [] for e in episodes: if isinstance(e, RolloutMetrics): rollouts.append(e) elif isinstance(e, OffPolicyEstimate): estimates.append(e) else: raise ValueError("Unknown metric type: {}".format(e)) return rollouts, estimates
Divides metrics data into true rollouts vs off-policy estimates.
def qteMakeAppletActive(self, applet: (QtmacsApplet, str)): """ Make ``applet`` visible and give it the focus. If ``applet`` is not yet visible it will replace the currently active applet, otherwise only the focus will shift. The ``applet`` parameter can either be an instance of ``QtmacsApplet`` or a string denoting an applet ID. In the latter case the ``qteGetAppletHandle`` method is used to fetch the respective applet instance. |Args| * ``applet`` (**QtmacsApplet**, **str**): the applet to activate. |Returns| * **bool**: whether or not an applet was activated. |Raises| * **QtmacsArgumentError** if at least one argument has an invalid type. """ # If ``applet`` was specified by its ID (ie. a string) then # fetch the associated ``QtmacsApplet`` instance. If # ``applet`` is already an instance of ``QtmacsApplet`` then # use it directly. if isinstance(applet, str): appletObj = self.qteGetAppletHandle(applet) else: appletObj = applet # Sanity check: return if the applet does not exist. if appletObj not in self._qteAppletList: return False # If ``appletObj`` is a mini applet then double check that it # is actually installed and visible. If it is a conventional # applet then insert it into the layout. if self.qteIsMiniApplet(appletObj): if appletObj is not self._qteMiniApplet: self.qteLogger.warning('Wrong mini applet. Not activated.') print(appletObj) print(self._qteMiniApplet) return False if not appletObj.qteIsVisible(): appletObj.show(True) else: if not appletObj.qteIsVisible(): # Add the applet to the layout by replacing the # currently active applet. self.qteReplaceAppletInLayout(appletObj) # Update the qteActiveApplet pointer. Note that the actual # focusing is done exclusively in the focus manager. self._qteActiveApplet = appletObj return True
Make ``applet`` visible and give it the focus. If ``applet`` is not yet visible it will replace the currently active applet, otherwise only the focus will shift. The ``applet`` parameter can either be an instance of ``QtmacsApplet`` or a string denoting an applet ID. In the latter case the ``qteGetAppletHandle`` method is used to fetch the respective applet instance. |Args| * ``applet`` (**QtmacsApplet**, **str**): the applet to activate. |Returns| * **bool**: whether or not an applet was activated. |Raises| * **QtmacsArgumentError** if at least one argument has an invalid type.
def delete( self, resource_group_name, if_match, provisioning_service_name, certificate_name, certificatename=None, certificateraw_bytes=None, certificateis_verified=None, certificatepurpose=None, certificatecreated=None, certificatelast_updated=None, certificatehas_private_key=None, certificatenonce=None, custom_headers=None, raw=False, **operation_config): """Delete the Provisioning Service Certificate. Deletes the specified certificate assosciated with the Provisioning Service. :param resource_group_name: Resource group identifier. :type resource_group_name: str :param if_match: ETag of the certificate :type if_match: str :param provisioning_service_name: The name of the provisioning service. :type provisioning_service_name: str :param certificate_name: This is a mandatory field, and is the logical name of the certificate that the provisioning service will access by. :type certificate_name: str :param certificatename: This is optional, and it is the Common Name of the certificate. :type certificatename: str :param certificateraw_bytes: Raw data within the certificate. :type certificateraw_bytes: bytearray :param certificateis_verified: Indicates if certificate has been verified by owner of the private key. :type certificateis_verified: bool :param certificatepurpose: A description that mentions the purpose of the certificate. Possible values include: 'clientAuthentication', 'serverAuthentication' :type certificatepurpose: str or ~azure.mgmt.iothubprovisioningservices.models.CertificatePurpose :param certificatecreated: Time the certificate is created. :type certificatecreated: datetime :param certificatelast_updated: Time the certificate is last updated. :type certificatelast_updated: datetime :param certificatehas_private_key: Indicates if the certificate contains a private key. :type certificatehas_private_key: bool :param certificatenonce: Random number generated to indicate Proof of Possession. :type certificatenonce: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: None or ClientRawResponse if raw=true :rtype: None or ~msrest.pipeline.ClientRawResponse :raises: :class:`ErrorDetailsException<azure.mgmt.iothubprovisioningservices.models.ErrorDetailsException>` """ # Construct URL url = self.delete.metadata['url'] path_format_arguments = { 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'provisioningServiceName': self._serialize.url("provisioning_service_name", provisioning_service_name, 'str'), 'certificateName': self._serialize.url("certificate_name", certificate_name, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} if certificatename is not None: query_parameters['certificate.name'] = self._serialize.query("certificatename", certificatename, 'str') if certificateraw_bytes is not None: query_parameters['certificate.rawBytes'] = self._serialize.query("certificateraw_bytes", certificateraw_bytes, 'bytearray') if certificateis_verified is not None: query_parameters['certificate.isVerified'] = self._serialize.query("certificateis_verified", certificateis_verified, 'bool') if certificatepurpose is not None: query_parameters['certificate.purpose'] = self._serialize.query("certificatepurpose", certificatepurpose, 'str') if certificatecreated is not None: query_parameters['certificate.created'] = self._serialize.query("certificatecreated", certificatecreated, 'iso-8601') if certificatelast_updated is not None: query_parameters['certificate.lastUpdated'] = self._serialize.query("certificatelast_updated", certificatelast_updated, 'iso-8601') if certificatehas_private_key is not None: query_parameters['certificate.hasPrivateKey'] = self._serialize.query("certificatehas_private_key", certificatehas_private_key, 'bool') if certificatenonce is not None: query_parameters['certificate.nonce'] = self._serialize.query("certificatenonce", certificatenonce, 'str') query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.delete(url, query_parameters) response = self._client.send(request, header_parameters, stream=False, **operation_config) if response.status_code not in [200, 204]: raise models.ErrorDetailsException(self._deserialize, response) if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response
Delete the Provisioning Service Certificate. Deletes the specified certificate assosciated with the Provisioning Service. :param resource_group_name: Resource group identifier. :type resource_group_name: str :param if_match: ETag of the certificate :type if_match: str :param provisioning_service_name: The name of the provisioning service. :type provisioning_service_name: str :param certificate_name: This is a mandatory field, and is the logical name of the certificate that the provisioning service will access by. :type certificate_name: str :param certificatename: This is optional, and it is the Common Name of the certificate. :type certificatename: str :param certificateraw_bytes: Raw data within the certificate. :type certificateraw_bytes: bytearray :param certificateis_verified: Indicates if certificate has been verified by owner of the private key. :type certificateis_verified: bool :param certificatepurpose: A description that mentions the purpose of the certificate. Possible values include: 'clientAuthentication', 'serverAuthentication' :type certificatepurpose: str or ~azure.mgmt.iothubprovisioningservices.models.CertificatePurpose :param certificatecreated: Time the certificate is created. :type certificatecreated: datetime :param certificatelast_updated: Time the certificate is last updated. :type certificatelast_updated: datetime :param certificatehas_private_key: Indicates if the certificate contains a private key. :type certificatehas_private_key: bool :param certificatenonce: Random number generated to indicate Proof of Possession. :type certificatenonce: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: None or ClientRawResponse if raw=true :rtype: None or ~msrest.pipeline.ClientRawResponse :raises: :class:`ErrorDetailsException<azure.mgmt.iothubprovisioningservices.models.ErrorDetailsException>`
def source_file(pymux, variables): """ Source configuration file. """ filename = os.path.expanduser(variables['<filename>']) try: with open(filename, 'rb') as f: for line in f: line = line.decode('utf-8') handle_command(pymux, line) except IOError as e: raise CommandException('IOError: %s' % (e, ))
Source configuration file.
def run(self): """Run flux analysis command.""" # Load compound information def compound_name(id): if id not in self._model.compounds: return id return self._model.compounds[id].properties.get('name', id) # Reaction genes information def reaction_genes_string(id): if id not in self._model.reactions: return '' return self._model.reactions[id].properties.get('genes', '') reaction = self._get_objective() if not self._mm.has_reaction(reaction): self.fail( 'Specified reaction is not in model: {}'.format(reaction)) loop_removal = self._get_loop_removal_option() if loop_removal == 'none': result = self.run_fba(reaction) elif loop_removal == 'l1min': result = self.run_fba_minimized(reaction) elif loop_removal == 'tfba': result = self.run_tfba(reaction) optimum = None total_reactions = 0 nonzero_reactions = 0 for reaction_id, flux in sorted(result): total_reactions += 1 if abs(flux) > self._args.epsilon: nonzero_reactions += 1 if abs(flux) > self._args.epsilon or self._args.all_reactions: rx = self._mm.get_reaction(reaction_id) rx_trans = rx.translated_compounds(compound_name) genes = reaction_genes_string(reaction_id) print('{}\t{}\t{}\t{}'.format( reaction_id, flux, rx_trans, genes)) # Remember flux of requested reaction if reaction_id == reaction: optimum = flux logger.info('Objective flux: {}'.format(optimum)) logger.info('Reactions at zero flux: {}/{}'.format( total_reactions - nonzero_reactions, total_reactions))
Run flux analysis command.
def Serialize(self, writer): """ Serialize object. Raises: Exception: if hash writing fails. Args: writer (neo.IO.BinaryWriter): """ try: writer.WriteByte(self.Type) writer.WriteHashes(self.Hashes) except Exception as e: logger.error(f"COULD NOT WRITE INVENTORY HASHES ({self.Type} {self.Hashes}) {e}")
Serialize object. Raises: Exception: if hash writing fails. Args: writer (neo.IO.BinaryWriter):
def par_relax_AX(i): """Parallel implementation of relaxation if option ``RelaxParam`` != 1.0. """ global mp_X global mp_Xnr global mp_DX global mp_DXnr mp_Xnr[mp_grp[i]:mp_grp[i+1]] = mp_X[mp_grp[i]:mp_grp[i+1]] mp_DXnr[i] = mp_DX[i] if mp_rlx != 1.0: grpind = slice(mp_grp[i], mp_grp[i+1]) mp_X[grpind] = mp_rlx * mp_X[grpind] + (1-mp_rlx)*mp_Y1[grpind] mp_DX[i] = mp_rlx*mp_DX[i] + (1-mp_rlx)*mp_Y0[i]
Parallel implementation of relaxation if option ``RelaxParam`` != 1.0.
def create_provider_directory(provider, redirect_uri): """Helper function for creating a provider directory""" dir = CLIENT.directories.create({ 'name': APPLICATION.name + '-' + provider, 'provider': { 'client_id': settings.STORMPATH_SOCIAL[provider.upper()]['client_id'], 'client_secret': settings.STORMPATH_SOCIAL[provider.upper()]['client_secret'], 'redirect_uri': redirect_uri, 'provider_id': provider, }, }) APPLICATION.account_store_mappings.create({ 'application': APPLICATION, 'account_store': dir, 'list_index': 99, 'is_default_account_store': False, 'is_default_group_store': False, })
Helper function for creating a provider directory
def _log_multivariate_normal_density_tied(X, means, covars): """Compute Gaussian log-density at X for a tied model.""" cv = np.tile(covars, (means.shape[0], 1, 1)) return _log_multivariate_normal_density_full(X, means, cv)
Compute Gaussian log-density at X for a tied model.
def get(self, request, uri): """ Return published node or specified version. JSON Response: {uri: x, content: y} """ uri = self.decode_uri(uri) node = cio.get(uri, lazy=False) if node.content is None: raise Http404 return self.render_to_json({ 'uri': node.uri, 'content': node.content })
Return published node or specified version. JSON Response: {uri: x, content: y}
def sg_mse(tensor, opt): r"""Returns squared error between `tensor` and `target`. Args: tensor: A `Tensor`. opt: target: A `Tensor` with the same shape and dtype as `tensor`. name: A `string`. A name to display in the tensor board web UI. Returns: A `Tensor` of the same shape and dtype as `tensor` For example, ``` tensor = [[34, 11, 40], [13, 30, 42]] target = [[34, 10, 41], [14, 31, 40]] tensor.sg_mse(target=target) => [[ 0. 1. 1.] [ 1. 1. 4.]] ``` """ assert opt.target is not None, 'target is mandatory.' # squared error out = tf.identity(tf.square(tensor - opt.target), 'mse') # add summary tf.sg_summary_loss(out, name=opt.name) return out
r"""Returns squared error between `tensor` and `target`. Args: tensor: A `Tensor`. opt: target: A `Tensor` with the same shape and dtype as `tensor`. name: A `string`. A name to display in the tensor board web UI. Returns: A `Tensor` of the same shape and dtype as `tensor` For example, ``` tensor = [[34, 11, 40], [13, 30, 42]] target = [[34, 10, 41], [14, 31, 40]] tensor.sg_mse(target=target) => [[ 0. 1. 1.] [ 1. 1. 4.]] ```
def create_channels(chan_name=None, n_chan=None): """Create instance of Channels with random xyz coordinates Parameters ---------- chan_name : list of str names of the channels n_chan : int if chan_name is not specified, this defines the number of channels Returns ------- instance of Channels where the location of the channels is random """ if chan_name is not None: n_chan = len(chan_name) elif n_chan is not None: chan_name = _make_chan_name(n_chan) else: raise TypeError('You need to specify either the channel names (chan_name) or the number of channels (n_chan)') xyz = round(random.randn(n_chan, 3) * 10, decimals=2) return Channels(chan_name, xyz)
Create instance of Channels with random xyz coordinates Parameters ---------- chan_name : list of str names of the channels n_chan : int if chan_name is not specified, this defines the number of channels Returns ------- instance of Channels where the location of the channels is random
def relieve_state_machines(self, model, prop_name, info): """ The method relieves observed models before those get removed from the list of state_machines hold by observed StateMachineMangerModel. The method register as observer of observable StateMachineMangerModel.state_machines.""" if info['method_name'] == '__setitem__': pass elif info['method_name'] == '__delitem__': self.relieve_model(self.state_machine_manager_model.state_machines[info['args'][0]]) if self.state_machine_manager_model.state_machines[info['args'][0]].root_state: self.relieve_model(self.state_machine_manager_model.state_machines[info['args'][0]].root_state) # otherwise relieved by root_state assign notification # self.logger.info(NotificationOverview(info)) else: self.logger.warning(NotificationOverview(info))
The method relieves observed models before those get removed from the list of state_machines hold by observed StateMachineMangerModel. The method register as observer of observable StateMachineMangerModel.state_machines.
def comments(self): """The AST comments.""" if self._comments is None: self._comments = [c for c in self.grammar.children if c.is_type(TokenType.comment)] return self._comments
The AST comments.
def log(self, cause=None, do_message=True, custom_msg=None): """ Loads exception data from the current exception frame - should be called inside the except block :return: """ message = error_message(self, cause=cause) exc_type, exc_value, exc_traceback = sys.exc_info() traceback_formatted = traceback.format_exc() traceback_val = traceback.extract_tb(exc_traceback) md5 = hashlib.md5(traceback_formatted.encode('utf-8')).hexdigest() if md5 in self._db: # self.logger.debug('Exception trace logged: %s' % md5) return if custom_msg is not None and cause is not None: self.logger.debug('%s : %s' % (custom_msg, cause)) elif custom_msg is not None: self.logger.debug(custom_msg) elif cause is not None: self.logger.debug('%s' % cause) self.logger.debug(traceback_formatted) self._db.add(md5)
Loads exception data from the current exception frame - should be called inside the except block :return:
def get_ip_address(): """Simple utility to get host IP address.""" try: s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect(("8.8.8.8", 80)) ip_address = s.getsockname()[0] except socket_error as sockerr: if sockerr.errno != errno.ENETUNREACH: raise sockerr ip_address = socket.gethostbyname(socket.getfqdn()) finally: s.close() return ip_address
Simple utility to get host IP address.
def main(argString=None): """The main function of this module. :param argString: the options. :type argString: list of strings """ # Getting and checking the options args = parseArgs(argString) checkArgs(args) logger.info("Options used:") for key, value in vars(args).iteritems(): logger.info(" --{} {}".format(key.replace("_", "-"), value)) # Checking if the output directory exists, creating it otherwise if not os.path.isdir(args.out_dir): os.mkdir(args.out_dir) # Ordering the directories according to their name qc_dir = order_qc_dir(args.qc_dir) # First, we want to merge the required files merge_required_files(qc_dir, args.out_dir) # Then, we want to copy the initial_files file copy_initial_files(os.path.join(qc_dir[0], "initial_files.txt"), args.out_dir) # Get the final number of markers and samples final_nb_markers, final_nb_samples = get_final_numbers( os.path.join(qc_dir[-1], "final_files.txt"), args.out_dir, ) # Getting the steps summary file (TeX) summary_files = get_summary_files(qc_dir) # Generating the report generate_report(args.out_dir, summary_files, final_nb_markers, final_nb_samples, args)
The main function of this module. :param argString: the options. :type argString: list of strings
def tasks_from_nids(self, nids): """ Return the list of tasks associated to the given list of node identifiers (nids). .. note:: Invalid ids are ignored """ if not isinstance(nids, collections.abc.Iterable): nids = [nids] n2task = {task.node_id: task for task in self.iflat_tasks()} return [n2task[n] for n in nids if n in n2task]
Return the list of tasks associated to the given list of node identifiers (nids). .. note:: Invalid ids are ignored
def depends_on(self, dependency): """ List of packages that depend on dependency :param dependency: package name, e.g. 'vext' or 'Pillow' """ packages = self.package_info() return [package for package in packages if dependency in package.get("requires", "")]
List of packages that depend on dependency :param dependency: package name, e.g. 'vext' or 'Pillow'
def addresses_from_address_families(address_mapper, specs): """Given an AddressMapper and list of Specs, return matching BuildFileAddresses. :raises: :class:`ResolveError` if: - there were no matching AddressFamilies, or - the Spec matches no addresses for SingleAddresses. :raises: :class:`AddressLookupError` if no targets are matched for non-SingleAddress specs. """ # Capture a Snapshot covering all paths for these Specs, then group by directory. snapshot = yield Get(Snapshot, PathGlobs, _spec_to_globs(address_mapper, specs)) dirnames = {dirname(f) for f in snapshot.files} address_families = yield [Get(AddressFamily, Dir(d)) for d in dirnames] address_family_by_directory = {af.namespace: af for af in address_families} matched_addresses = OrderedSet() for spec in specs: # NB: if a spec is provided which expands to some number of targets, but those targets match # --exclude-target-regexp, we do NOT fail! This is why we wait to apply the tag and exclude # patterns until we gather all the targets the spec would have matched without them. try: addr_families_for_spec = spec.matching_address_families(address_family_by_directory) except Spec.AddressFamilyResolutionError as e: raise raise_from(ResolveError(e), e) try: all_addr_tgt_pairs = spec.address_target_pairs_from_address_families(addr_families_for_spec) except Spec.AddressResolutionError as e: raise raise_from(AddressLookupError(e), e) except SingleAddress._SingleAddressResolutionError as e: _raise_did_you_mean(e.single_address_family, e.name, source=e) matched_addresses.update( addr for (addr, tgt) in all_addr_tgt_pairs if specs.matcher.matches_target_address_pair(addr, tgt) ) # NB: This may be empty, as the result of filtering by tag and exclude patterns! yield BuildFileAddresses(tuple(matched_addresses))
Given an AddressMapper and list of Specs, return matching BuildFileAddresses. :raises: :class:`ResolveError` if: - there were no matching AddressFamilies, or - the Spec matches no addresses for SingleAddresses. :raises: :class:`AddressLookupError` if no targets are matched for non-SingleAddress specs.
def _matcher(self, other): """ QueryCGRContainer < CGRContainer QueryContainer < QueryCGRContainer[more general] """ if isinstance(other, CGRContainer): return GraphMatcher(other, self, lambda x, y: y == x, lambda x, y: y == x) elif isinstance(other, QueryCGRContainer): return GraphMatcher(other, self, lambda x, y: x == y, lambda x, y: x == y) raise TypeError('only cgr_query-cgr or cgr_query-cgr_query possible')
QueryCGRContainer < CGRContainer QueryContainer < QueryCGRContainer[more general]
def decode(self, bytes, raw=False): """decode(bytearray, raw=False) -> value Decodes the given bytearray according to this PrimitiveType definition. NOTE: The parameter ``raw`` is present to adhere to the ``decode()`` inteface, but has no effect for PrimitiveType definitions. """ return struct.unpack(self.format, buffer(bytes))[0]
decode(bytearray, raw=False) -> value Decodes the given bytearray according to this PrimitiveType definition. NOTE: The parameter ``raw`` is present to adhere to the ``decode()`` inteface, but has no effect for PrimitiveType definitions.
def get_seq(self,obj,default=None): """Return sequence.""" if is_sequence(obj): return obj if is_number(obj): return [obj] if obj is None and default is not None: log.warning('using default value (%s)'%(default)) return self.get_seq(default) raise ValueError('expected sequence|number but got %s'%(type(obj)))
Return sequence.
def obfn_f(self, X=None): r"""Compute data fidelity term :math:`(1/2) \| D \mathbf{x} - \mathbf{s} \|_2^2`. """ if X is None: X = self.X return 0.5 * np.linalg.norm((self.D.dot(X) - self.S).ravel())**2
r"""Compute data fidelity term :math:`(1/2) \| D \mathbf{x} - \mathbf{s} \|_2^2`.
def get_new_author(self, api_author): """ Instantiate a new Author from api data. :param api_author: the api data for the Author :return: the new Author """ return Author(site_id=self.site_id, wp_id=api_author["ID"], **self.api_object_data("author", api_author))
Instantiate a new Author from api data. :param api_author: the api data for the Author :return: the new Author
def can_create(self): """ If the key_name, value_name, and value_type has been provided returns that the Registry Key can be created, otherwise returns that the Registry Key cannot be created. Returns: """ if ( self.data.get('key_name') and self.data.get('value_name') and self.data.get('value_type') ): return True return False
If the key_name, value_name, and value_type has been provided returns that the Registry Key can be created, otherwise returns that the Registry Key cannot be created. Returns:
def _aligned_series(*many_series): """ Return a new list of series containing the data in the input series, but with their indices aligned. NaNs will be filled in for missing values. Parameters ---------- *many_series The series to align. Returns ------- aligned_series : iterable[array-like] A new list of series containing the data in the input series, but with their indices aligned. NaNs will be filled in for missing values. """ head = many_series[0] tail = many_series[1:] n = len(head) if (isinstance(head, np.ndarray) and all(len(s) == n and isinstance(s, np.ndarray) for s in tail)): # optimization: ndarrays of the same length are already aligned return many_series # dataframe has no ``itervalues`` return ( v for _, v in iteritems(pd.concat(map(_to_pandas, many_series), axis=1)) )
Return a new list of series containing the data in the input series, but with their indices aligned. NaNs will be filled in for missing values. Parameters ---------- *many_series The series to align. Returns ------- aligned_series : iterable[array-like] A new list of series containing the data in the input series, but with their indices aligned. NaNs will be filled in for missing values.
def key_from_protobuf(pb): """Factory method for creating a key based on a protobuf. The protobuf should be one returned from the Cloud Datastore Protobuf API. :type pb: :class:`.entity_pb2.Key` :param pb: The Protobuf representing the key. :rtype: :class:`google.cloud.datastore.key.Key` :returns: a new `Key` instance """ path_args = [] for element in pb.path: path_args.append(element.kind) if element.id: # Simple field (int64) path_args.append(element.id) # This is safe: we expect proto objects returned will only have # one of `name` or `id` set. if element.name: # Simple field (string) path_args.append(element.name) project = None if pb.partition_id.project_id: # Simple field (string) project = pb.partition_id.project_id namespace = None if pb.partition_id.namespace_id: # Simple field (string) namespace = pb.partition_id.namespace_id return Key(*path_args, namespace=namespace, project=project)
Factory method for creating a key based on a protobuf. The protobuf should be one returned from the Cloud Datastore Protobuf API. :type pb: :class:`.entity_pb2.Key` :param pb: The Protobuf representing the key. :rtype: :class:`google.cloud.datastore.key.Key` :returns: a new `Key` instance
def root(): """Home page.""" return { "message": "Welcome to the SIP Master Controller (flask variant)", "_links": { "items": [ { "Link": "Health", "href": "{}health".format(request.url) }, { "Link": "Version", "href": "{}version".format(request.url) }, { "Link": "Allowed target states", "href": "{}allowed_target_sdp_states".format(request.url) }, { "Link": "SDP state", "href": "{}state".format(request.url) }, { "Link": "SDP target state", "href": "{}state/target".format(request.url) }, { "Link": "SDP target state", "href": "{}target_state".format(request.url) }, { "Link": "SDP current state", "href": "{}state/current".format(request.url) }, { "Link": "Scheduling Block Instances", "href": "{}scheduling_block_instances".format(request.url) }, { "Link": "Processing Blocks", "href": "{}processing_blocks".format(request.url) }, { "Link": "Resource Availability", "href": "{}resource_availability".format(request.url) }, { "Link": "Configure SBI", "href": "{}configure_sbi".format(request.url) } ] } }
Home page.
def security_rules_list(security_group, resource_group, **kwargs): ''' .. versionadded:: 2019.2.0 List security rules within a network security group. :param security_group: The network security group to query. :param resource_group: The resource group name assigned to the network security group. CLI Example: .. code-block:: bash salt-call azurearm_network.security_rules_list testnsg testgroup ''' netconn = __utils__['azurearm.get_client']('network', **kwargs) try: secrules = netconn.security_rules.list( network_security_group_name=security_group, resource_group_name=resource_group ) result = __utils__['azurearm.paged_object_to_list'](secrules) except CloudError as exc: __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs) result = {'error': str(exc)} return result
.. versionadded:: 2019.2.0 List security rules within a network security group. :param security_group: The network security group to query. :param resource_group: The resource group name assigned to the network security group. CLI Example: .. code-block:: bash salt-call azurearm_network.security_rules_list testnsg testgroup
def validate_args(**args): """ function to check if input query is not None and set missing arguments to default value """ if not args['query']: print("\nMissing required query argument.") sys.exit() for key in DEFAULTS: if key not in args: args[key] = DEFAULTS[key] return args
function to check if input query is not None and set missing arguments to default value
def delete_types(self, base_key, out_key, *types): """ Method to delete a parameter from a parameter documentation. This method deletes the given `param` from the `base_key` item in the :attr:`params` dictionary and creates a new item with the original documentation without the description of the param. This method works for ``'Results'`` like sections. See the :meth:`keep_types` method for an example. Parameters ---------- base_key: str key in the :attr:`params` dictionary out_key: str Extension for the base key (the final key will be like ``'%s.%s' % (base_key, out_key)`` ``*types`` str. The type identifier of which the documentations shall deleted See Also -------- delete_params""" self.params['%s.%s' % (base_key, out_key)] = self.delete_types_s( self.params[base_key], types)
Method to delete a parameter from a parameter documentation. This method deletes the given `param` from the `base_key` item in the :attr:`params` dictionary and creates a new item with the original documentation without the description of the param. This method works for ``'Results'`` like sections. See the :meth:`keep_types` method for an example. Parameters ---------- base_key: str key in the :attr:`params` dictionary out_key: str Extension for the base key (the final key will be like ``'%s.%s' % (base_key, out_key)`` ``*types`` str. The type identifier of which the documentations shall deleted See Also -------- delete_params
def serializeCorpus(self): """ This method creates a fixture for the "django-tethne_corpus" model. Returns ------- corpus_details in JSON format which can written to a file. """ corpus_details = [{ "model": "django-tethne.corpus", "pk": self.corpus_id, "fields": { "source": self.source, "date_created":strftime("%Y-%m-%d %H:%M:%S", gmtime()), "length" : len(self.corpus), } }] return corpus_details
This method creates a fixture for the "django-tethne_corpus" model. Returns ------- corpus_details in JSON format which can written to a file.
def Expand(self): """Reads the contents of the current node and the full subtree. It then makes the subtree available until the next xmlTextReaderRead() call """ ret = libxml2mod.xmlTextReaderExpand(self._o) if ret is None:raise treeError('xmlTextReaderExpand() failed') __tmp = xmlNode(_obj=ret) return __tmp
Reads the contents of the current node and the full subtree. It then makes the subtree available until the next xmlTextReaderRead() call
def path_glob(pattern, current_dir=None): """Use pathlib for ant-like patterns, like: "**/*.py" :param pattern: File/directory pattern to use (as string). :param current_dir: Current working directory (as Path, pathlib.Path, str) :return Resolved Path (as path.Path). """ if not current_dir: current_dir = pathlib.Path.cwd() elif not isinstance(current_dir, pathlib.Path): # -- CASE: string, path.Path (string-like) current_dir = pathlib.Path(str(current_dir)) for p in current_dir.glob(pattern): yield Path(str(p))
Use pathlib for ant-like patterns, like: "**/*.py" :param pattern: File/directory pattern to use (as string). :param current_dir: Current working directory (as Path, pathlib.Path, str) :return Resolved Path (as path.Path).
def on_created(self, event): """ on_created handler """ logger.debug("file created: %s", event.src_path) if not event.is_directory: self.update_file(event.src_path)
on_created handler
def delete_topic(self, topic_name, fail_not_exist=False): """Delete a topic entity. :param topic_name: The name of the topic to delete. :type topic_name: str :param fail_not_exist: Whether to raise an exception if the named topic is not found. If set to True, a ServiceBusResourceNotFound will be raised. Default value is False. :type fail_not_exist: bool :raises: ~azure.servicebus.common.errors.ServiceBusConnectionError if the namesapce is not found. :raises: ~azure.servicebus.common.errors.ServiceBusResourceNotFound if the topic is not found and `fail_not_exist` is set to True. """ try: return self.mgmt_client.delete_topic(topic_name, fail_not_exist=fail_not_exist) except requests.exceptions.ConnectionError as e: raise ServiceBusConnectionError("Namespace: {} not found".format(self.service_namespace), e) except azure.common.AzureMissingResourceHttpError as e: raise ServiceBusResourceNotFound("Specificed queue does not exist.", e)
Delete a topic entity. :param topic_name: The name of the topic to delete. :type topic_name: str :param fail_not_exist: Whether to raise an exception if the named topic is not found. If set to True, a ServiceBusResourceNotFound will be raised. Default value is False. :type fail_not_exist: bool :raises: ~azure.servicebus.common.errors.ServiceBusConnectionError if the namesapce is not found. :raises: ~azure.servicebus.common.errors.ServiceBusResourceNotFound if the topic is not found and `fail_not_exist` is set to True.
def render_layout(self, form, context): """ Returns safe html of the rendering of the layout """ form.rendered_fields = [] html = self.layout.render(form, self.form_style, context) for field in form.fields.keys(): if not field in form.rendered_fields: html += render_field(field, form, self.form_style, context) return mark_safe(html)
Returns safe html of the rendering of the layout
def make_index_lookup(list_, dict_factory=dict): r""" Args: list_ (list): assumed to have unique items Returns: dict: mapping from item to index CommandLine: python -m utool.util_list --exec-make_index_lookup Example: >>> # ENABLE_DOCTEST >>> from utool.util_list import * # NOQA >>> import utool as ut >>> list_ = [5, 3, 8, 2] >>> idx2_item = ut.make_index_lookup(list_) >>> result = ut.repr2(idx2_item, nl=False) >>> assert ut.dict_take(idx2_item, list_) == list(range(len(list_))) >>> print(result) {2: 3, 3: 1, 5: 0, 8: 2} """ return dict_factory(zip(list_, range(len(list_))))
r""" Args: list_ (list): assumed to have unique items Returns: dict: mapping from item to index CommandLine: python -m utool.util_list --exec-make_index_lookup Example: >>> # ENABLE_DOCTEST >>> from utool.util_list import * # NOQA >>> import utool as ut >>> list_ = [5, 3, 8, 2] >>> idx2_item = ut.make_index_lookup(list_) >>> result = ut.repr2(idx2_item, nl=False) >>> assert ut.dict_take(idx2_item, list_) == list(range(len(list_))) >>> print(result) {2: 3, 3: 1, 5: 0, 8: 2}
def primary_key(self, hkey, rkey=None): """ Construct a primary key dictionary You can either pass in a (hash_key[, range_key]) as the arguments, or you may pass in an Item itself """ if isinstance(hkey, dict): def decode(val): """ Convert Decimals back to primitives """ if isinstance(val, Decimal): return float(val) return val pkey = {self.hash_key.name: decode(hkey[self.hash_key.name])} if self.range_key is not None: pkey[self.range_key.name] = decode(hkey[self.range_key.name]) return pkey else: pkey = {self.hash_key.name: hkey} if self.range_key is not None: if rkey is None: raise ValueError("Range key is missing!") pkey[self.range_key.name] = rkey return pkey
Construct a primary key dictionary You can either pass in a (hash_key[, range_key]) as the arguments, or you may pass in an Item itself
def build_highlight_objects(html, highlights, uniformize_html=True): '''converts a dict of pretty_name --> [tuple(string, score), ...] to `Highlight` objects as specified above. ''' if uniformize_html: try: html = uniform_html(html.encode('utf-8')).decode('utf-8') except Exception, exc: logger.info('failed to get uniform_html(%d bytes) --> %s', len(html), exc, exc_info=True) html = None highlight_objects = [] for category, phrase_scores in highlights.iteritems(): for (phrase, score) in phrase_scores: hl = dict( score=score, category=category, ) ranges = make_xpath_ranges(html, phrase) if ranges: hl['xranges'] = [{'range': r} for r in ranges] elif phrase in html: hl['strings'] = [phrase] else: hl['regexes'] = [{ 'regex': phrase, 'flags': 'i', }] highlight_objects.append(hl) return highlight_objects
converts a dict of pretty_name --> [tuple(string, score), ...] to `Highlight` objects as specified above.
def interface_list(env, securitygroup_id, sortby): """List interfaces associated with security groups.""" mgr = SoftLayer.NetworkManager(env.client) table = formatting.Table(COLUMNS) table.sortby = sortby mask = ( '''networkComponentBindings[ networkComponentId, networkComponent[ id, port, guest[ id, hostname, primaryBackendIpAddress, primaryIpAddress ] ] ]''' ) secgroup = mgr.get_securitygroup(securitygroup_id, mask=mask) for binding in secgroup.get('networkComponentBindings', []): interface_id = binding['networkComponentId'] try: interface = binding['networkComponent'] vsi = interface['guest'] vsi_id = vsi['id'] hostname = vsi['hostname'] priv_pub = 'PRIVATE' if interface['port'] == 0 else 'PUBLIC' ip_address = (vsi['primaryBackendIpAddress'] if interface['port'] == 0 else vsi['primaryIpAddress']) except KeyError: vsi_id = "N/A" hostname = "Not enough permission to view" priv_pub = "N/A" ip_address = "N/A" table.add_row([ interface_id, vsi_id, hostname, priv_pub, ip_address ]) env.fout(table)
List interfaces associated with security groups.
def get_trial_info(current_trial): """Get job information for current trial.""" if current_trial.end_time and ("_" in current_trial.end_time): # end time is parsed from result.json and the format # is like: yyyy-mm-dd_hh-MM-ss, which will be converted # to yyyy-mm-dd hh:MM:ss here time_obj = datetime.datetime.strptime(current_trial.end_time, "%Y-%m-%d_%H-%M-%S") end_time = time_obj.strftime("%Y-%m-%d %H:%M:%S") else: end_time = current_trial.end_time if current_trial.metrics: metrics = eval(current_trial.metrics) else: metrics = None trial_info = { "trial_id": current_trial.trial_id, "job_id": current_trial.job_id, "trial_status": current_trial.trial_status, "start_time": current_trial.start_time, "end_time": end_time, "params": eval(current_trial.params.encode("utf-8")), "metrics": metrics } return trial_info
Get job information for current trial.
def hkdf_extract(salt, input_key_material, hash=hashlib.sha512): ''' Extract a pseudorandom key suitable for use with hkdf_expand from the input_key_material and a salt using HMAC with the provided hash (default SHA-512). salt should be a random, application-specific byte string. If salt is None or the empty string, an all-zeros string of the same length as the hash's block size will be used instead per the RFC. See the HKDF draft RFC and paper for usage notes. ''' hash_len = hash().digest_size if salt == None or len(salt) == 0: salt = bytearray((0,) * hash_len) return hmac.new(bytes(salt), buffer(input_key_material), hash).digest()
Extract a pseudorandom key suitable for use with hkdf_expand from the input_key_material and a salt using HMAC with the provided hash (default SHA-512). salt should be a random, application-specific byte string. If salt is None or the empty string, an all-zeros string of the same length as the hash's block size will be used instead per the RFC. See the HKDF draft RFC and paper for usage notes.
def read(message): """Convert a parsed protobuf message into a histogram.""" require_compatible_version(message.physt_compatible) # Currently the only implementation a_dict = _dict_from_v0342(message) return create_from_dict(a_dict, "Message")
Convert a parsed protobuf message into a histogram.
def set_speed(self, aspirate=None, dispense=None): """ Set the speed (mm/second) the :any:`Pipette` plunger will move during :meth:`aspirate` and :meth:`dispense` Parameters ---------- aspirate: int The speed in millimeters-per-second, at which the plunger will move while performing an aspirate dispense: int The speed in millimeters-per-second, at which the plunger will move while performing an dispense """ if aspirate: self.speeds['aspirate'] = aspirate if dispense: self.speeds['dispense'] = dispense return self
Set the speed (mm/second) the :any:`Pipette` plunger will move during :meth:`aspirate` and :meth:`dispense` Parameters ---------- aspirate: int The speed in millimeters-per-second, at which the plunger will move while performing an aspirate dispense: int The speed in millimeters-per-second, at which the plunger will move while performing an dispense
def WriteEventBody(self, event): """Writes the body of an event object to the output. Args: event (EventObject): event. Raises: NoFormatterFound: If no event formatter can be found to match the data type in the event object. """ output_values = self._GetOutputValues(event) output_values[3] = self._output_mediator.GetMACBRepresentation(event) output_values[6] = event.timestamp_desc or '-' self._WriteOutputValues(output_values)
Writes the body of an event object to the output. Args: event (EventObject): event. Raises: NoFormatterFound: If no event formatter can be found to match the data type in the event object.
def find_censored_md5ext(post_id: int) -> Optional[str]: "Find MD5 for a censored post's ID, return None if can't find." try: last_pull_date = LAST_PULL_DATE_FILE.read_text().strip() except FileNotFoundError: last_pull_date = "" date = datetime.utcnow() date = f"{date.year}{date.month}{date.day}" if last_pull_date != date: update_batches() LAST_PULL_DATE_FILE.parent.mkdir(exist_ok=True, parents=True) LAST_PULL_DATE_FILE.write_text(date) # Faster than converting every ID in files to int post_id = str(post_id) for batch in BATCHES_DIR.iterdir(): with open(batch, "r") as content: for line in content: an_id, its_md5_ext = line.split(":") if post_id == an_id: return its_md5_ext.rstrip().split(".") return None
Find MD5 for a censored post's ID, return None if can't find.
def get(self, user_id, lang='zh_CN'): """ 获取用户基本信息(包括UnionID机制) 详情请参考 https://mp.weixin.qq.com/wiki?t=resource/res_main&id=mp1421140839 :param user_id: 普通用户的标识,对当前公众号唯一 :param lang: 返回国家地区语言版本,zh_CN 简体,zh_TW 繁体,en 英语 :return: 返回的 JSON 数据包 使用示例:: from wechatpy import WeChatClient client = WeChatClient('appid', 'secret') user = client.user.get('openid') """ assert lang in ('zh_CN', 'zh_TW', 'en'), 'lang can only be one of \ zh_CN, zh_TW, en language codes' return self._get( 'user/info', params={ 'openid': user_id, 'lang': lang } )
获取用户基本信息(包括UnionID机制) 详情请参考 https://mp.weixin.qq.com/wiki?t=resource/res_main&id=mp1421140839 :param user_id: 普通用户的标识,对当前公众号唯一 :param lang: 返回国家地区语言版本,zh_CN 简体,zh_TW 繁体,en 英语 :return: 返回的 JSON 数据包 使用示例:: from wechatpy import WeChatClient client = WeChatClient('appid', 'secret') user = client.user.get('openid')
def dryRun(self, func, *args, **kwargs): """Instead of running function with `*args` and `**kwargs`, just print out the function call.""" print >> self.out, \ self.formatterDict.get(func, self.defaultFormatter)(func, *args, **kwargs)
Instead of running function with `*args` and `**kwargs`, just print out the function call.
def path_without_suffix(self): """The relative path to asset without suffix. Example:: >>> attrs = AssetAttributes(environment, 'js/app.js') >>> attrs.path_without_suffix 'js/app' """ if self.suffix: return self.path[:-len(''.join(self.suffix))] return self.path
The relative path to asset without suffix. Example:: >>> attrs = AssetAttributes(environment, 'js/app.js') >>> attrs.path_without_suffix 'js/app'
def get_day_start_ut_span(self): """ Return the first and last day_start_ut Returns ------- first_day_start_ut: int last_day_start_ut: int """ cur = self.conn.cursor() first_day_start_ut, last_day_start_ut = \ cur.execute("SELECT min(day_start_ut), max(day_start_ut) FROM days;").fetchone() return first_day_start_ut, last_day_start_ut
Return the first and last day_start_ut Returns ------- first_day_start_ut: int last_day_start_ut: int
def update(pkg, slot=None, fromrepo=None, refresh=False, binhost=None, **kwargs): ''' .. versionchanged:: 2015.8.12,2016.3.3,2016.11.0 On minions running systemd>=205, `systemd-run(1)`_ is now used to isolate commands which modify installed packages from the ``salt-minion`` daemon's control group. This is done to keep systemd from killing any emerge commands spawned by Salt when the ``salt-minion`` service is restarted. (see ``KillMode`` in the `systemd.kill(5)`_ manpage for more information). If desired, usage of `systemd-run(1)`_ can be suppressed by setting a :mod:`config option <salt.modules.config.get>` called ``systemd.scope``, with a value of ``False`` (no quotes). .. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html .. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html Updates the passed package (emerge --update package) slot Restrict the update to a particular slot. It will update to the latest version within the slot. fromrepo Restrict the update to a particular repository. It will update to the latest version within the repository. binhost has two options try and force. try - tells emerge to try and install the package from a configured binhost. force - forces emerge to install the package from a binhost otherwise it fails out. Return a dict containing the new package names and versions:: {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} CLI Example: .. code-block:: bash salt '*' pkg.update <package name> ''' if salt.utils.data.is_true(refresh): refresh_db() full_atom = pkg if slot is not None: full_atom = '{0}:{1}'.format(full_atom, slot) if fromrepo is not None: full_atom = '{0}::{1}'.format(full_atom, fromrepo) if binhost == 'try': bin_opts = ['-g'] elif binhost == 'force': bin_opts = ['-G'] else: bin_opts = [] old = list_pkgs() cmd = [] if salt.utils.systemd.has_scope(__context__) \ and __salt__['config.get']('systemd.scope', True): cmd.extend(['systemd-run', '--scope']) cmd.extend(['emerge', '--ask', 'n', '--quiet', '--update', '--newuse', '--oneshot']) cmd.extend(bin_opts) cmd.append(full_atom) call = __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=False) if call['retcode'] != 0: needed_changes = _process_emerge_err(call['stdout'], call['stderr']) else: needed_changes = [] __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if needed_changes: raise CommandExecutionError( 'Problem encountered updating package(s)', info={'needed_changes': needed_changes, 'changes': ret} ) return ret
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0 On minions running systemd>=205, `systemd-run(1)`_ is now used to isolate commands which modify installed packages from the ``salt-minion`` daemon's control group. This is done to keep systemd from killing any emerge commands spawned by Salt when the ``salt-minion`` service is restarted. (see ``KillMode`` in the `systemd.kill(5)`_ manpage for more information). If desired, usage of `systemd-run(1)`_ can be suppressed by setting a :mod:`config option <salt.modules.config.get>` called ``systemd.scope``, with a value of ``False`` (no quotes). .. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html .. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html Updates the passed package (emerge --update package) slot Restrict the update to a particular slot. It will update to the latest version within the slot. fromrepo Restrict the update to a particular repository. It will update to the latest version within the repository. binhost has two options try and force. try - tells emerge to try and install the package from a configured binhost. force - forces emerge to install the package from a binhost otherwise it fails out. Return a dict containing the new package names and versions:: {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} CLI Example: .. code-block:: bash salt '*' pkg.update <package name>
def get_field_analysis(self, field): """ Get the FieldAnalysis for a given fieldname :param field: TODO :return: :class:`FieldClassAnalysis` """ class_analysis = self.get_class_analysis(field.get_class_name()) if class_analysis: return class_analysis.get_field_analysis(field) return None
Get the FieldAnalysis for a given fieldname :param field: TODO :return: :class:`FieldClassAnalysis`
def pause(self): """Pauses all the snippet clients under management. This clears the host port of a client because a new port will be allocated in `resume`. """ for client in self._snippet_clients.values(): self._device.log.debug( 'Clearing host port %d of SnippetClient<%s>.', client.host_port, client.package) client.clear_host_port()
Pauses all the snippet clients under management. This clears the host port of a client because a new port will be allocated in `resume`.
def update(self, *args, **kwargs): """See `__setitem__`.""" super(TAG_Compound, self).update(*args, **kwargs) for key, item in self.items(): if item.name is None: item.name = key
See `__setitem__`.
def get_server_build_info(self): """ issues a buildinfo command """ if self.is_online(): try: return self.get_mongo_client().server_info() except OperationFailure, ofe: log_exception(ofe) if "there are no users authenticated" in str(ofe): # this is a pymongo 3.6.1 regression where the buildinfo command fails on non authenticated client # fall-back to an authenticated client admin_db = self.get_db("admin", no_auth=False) return admin_db.command("buildinfo") except Exception, e: log_exception(e) return None
issues a buildinfo command
def scale_sfs(s): """Scale a site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes,) Site frequency spectrum. Returns ------- sfs_scaled : ndarray, int, shape (n_chromosomes,) Scaled site frequency spectrum. """ k = np.arange(s.size) out = s * k return out
Scale a site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes,) Site frequency spectrum. Returns ------- sfs_scaled : ndarray, int, shape (n_chromosomes,) Scaled site frequency spectrum.
def parse_qualifier(parser, event, node): #pylint: disable=unused-argument """Parse CIM/XML QUALIFIER element and return CIMQualifier""" name = _get_required_attribute(node, 'NAME') cim_type = _get_required_attribute(node, 'TYPE') # TODO 2/16 KS: Why is propagated not used? propagated = _get_attribute(node, 'PROPAGATED') (next_event, next_node) = six.next(parser) if _is_end(next_event, next_node, 'QUALIFIER'): return CIMQualifier(name, None, type=cim_type) if _is_start(next_event, next_node, 'VALUE'): value = parse_value(parser, next_event, next_node) elif _is_start(next_event, next_node, 'VALUE.ARRAY'): #pylint: disable=redefined-variable-type # redefined from str to list. value = parse_value_array(parser, next_event, next_node) else: raise ParseError('Expecting (VALUE | VALUE.ARRAY)') result = CIMQualifier(name, tocimobj(cim_type, value)) _get_end_event(parser, 'QUALIFIER') return result
Parse CIM/XML QUALIFIER element and return CIMQualifier
def configure_db(self, hostname, database, username, admin=False): """Configure access to database for username from hostname.""" self.connect(password=self.get_mysql_root_password()) if not self.database_exists(database): self.create_database(database) remote_ip = self.normalize_address(hostname) password = self.get_mysql_password(username) if not self.grant_exists(database, username, remote_ip): if not admin: self.create_grant(database, username, remote_ip, password) else: self.create_admin_grant(username, remote_ip, password) self.flush_priviledges() return password
Configure access to database for username from hostname.
def _load_torrents_directory(self): """ Load torrents directory If it does not exist yet, this request will cause the system to create one """ r = self._req_lixian_get_id(torrent=True) self._downloads_directory = self._load_directory(r['cid'])
Load torrents directory If it does not exist yet, this request will cause the system to create one
def add(self, arg1, arg2=None, arg3=None, bucket_type=None): """ Start assembling a Map/Reduce operation. A shortcut for :func:`RiakMapReduce.add`. :param arg1: the object or bucket to add :type arg1: RiakObject, string :param arg2: a key or list of keys to add (if a bucket is given in arg1) :type arg2: string, list, None :param arg3: key data for this input (must be convertible to JSON) :type arg3: string, list, dict, None :param bucket_type: Optional name of a bucket type :type bucket_type: string, None :rtype: :class:`RiakMapReduce` """ mr = RiakMapReduce(self) return mr.add(arg1, arg2, arg3, bucket_type)
Start assembling a Map/Reduce operation. A shortcut for :func:`RiakMapReduce.add`. :param arg1: the object or bucket to add :type arg1: RiakObject, string :param arg2: a key or list of keys to add (if a bucket is given in arg1) :type arg2: string, list, None :param arg3: key data for this input (must be convertible to JSON) :type arg3: string, list, dict, None :param bucket_type: Optional name of a bucket type :type bucket_type: string, None :rtype: :class:`RiakMapReduce`
def update_layers(self): """ Update layers for a service. """ signals.post_save.disconnect(layer_post_save, sender=Layer) try: LOGGER.debug('Updating layers for service id %s' % self.id) if self.type == 'OGC:WMS': update_layers_wms(self) elif self.type == 'OGC:WMTS': update_layers_wmts(self) elif self.type == 'ESRI:ArcGIS:MapServer': update_layers_esri_mapserver(self) elif self.type == 'ESRI:ArcGIS:ImageServer': update_layers_esri_imageserver(self) elif self.type == 'Hypermap:WorldMapLegacy': update_layers_wm_legacy(self) elif self.type == 'Hypermap:WorldMap': update_layers_geonode_wm(self) elif self.type == 'Hypermap:WARPER': update_layers_warper(self) except: LOGGER.error('Error updating layers for service %s' % self.uuid) signals.post_save.connect(layer_post_save, sender=Layer)
Update layers for a service.
def _extract_timeseries_list(tsvol, roivol, maskvol=None, roi_values=None, zeroe=True): """Partition the timeseries in tsvol according to the ROIs in roivol. If a mask is given, will use it to exclude any voxel outside of it. Parameters ---------- tsvol: numpy.ndarray 4D timeseries volume or a 3D volume to be partitioned roivol: numpy.ndarray 3D ROIs volume maskvol: numpy.ndarray 3D mask volume zeroe: bool If true will remove the null timeseries voxels. Only applied to timeseries (4D) data. roi_values: list of ROI values (int?) List of the values of the ROIs to indicate the order and which ROIs will be processed. Returns ------- ts_list: list A list with the timeseries arrays as items """ _check_for_partition(tsvol, roivol, maskvol) if roi_values is None: roi_values = get_unique_nonzeros(roivol) ts_list = [] for r in roi_values: ts = _partition_data(tsvol, roivol, r, maskvol, zeroe) if len(ts) == 0: ts = np.zeros(tsvol.shape[-1]) ts_list.append(ts) return ts_list
Partition the timeseries in tsvol according to the ROIs in roivol. If a mask is given, will use it to exclude any voxel outside of it. Parameters ---------- tsvol: numpy.ndarray 4D timeseries volume or a 3D volume to be partitioned roivol: numpy.ndarray 3D ROIs volume maskvol: numpy.ndarray 3D mask volume zeroe: bool If true will remove the null timeseries voxels. Only applied to timeseries (4D) data. roi_values: list of ROI values (int?) List of the values of the ROIs to indicate the order and which ROIs will be processed. Returns ------- ts_list: list A list with the timeseries arrays as items
def progress(self) -> List[bool]: """A list of True/False for the number of games the played in the mini series indicating if the player won or lost.""" return [True if p == "W" else False for p in self._data[MiniSeriesData].progress if p != "N"]
A list of True/False for the number of games the played in the mini series indicating if the player won or lost.
def com_google_fonts_check_metadata_nameid_family_name(ttFont, font_metadata): """Checks METADATA.pb font.name field matches family name declared on the name table. """ from fontbakery.utils import get_name_entry_strings familynames = get_name_entry_strings(ttFont, NameID.TYPOGRAPHIC_FAMILY_NAME) if not familynames: familynames = get_name_entry_strings(ttFont, NameID.FONT_FAMILY_NAME) if len(familynames) == 0: yield FAIL, Message("missing", ("This font lacks a FONT_FAMILY_NAME entry" " (nameID={}) in the name" " table.").format(NameID.FONT_FAMILY_NAME)) else: if font_metadata.name not in familynames: yield FAIL, Message("mismatch", ("Unmatched family name in font:" " TTF has \"{}\" while METADATA.pb" " has \"{}\"").format(familynames[0], font_metadata.name)) else: yield PASS, ("Family name \"{}\" is identical" " in METADATA.pb and on the" " TTF file.").format(font_metadata.name)
Checks METADATA.pb font.name field matches family name declared on the name table.
def check_format(self, full_check=True): """Check whether the NDArray format is valid. Parameters ---------- full_check : bool, optional If `True`, rigorous check, O(N) operations. Otherwise basic check, O(1) operations (default True). """ check_call(_LIB.MXNDArraySyncCheckFormat(self.handle, ctypes.c_bool(full_check)))
Check whether the NDArray format is valid. Parameters ---------- full_check : bool, optional If `True`, rigorous check, O(N) operations. Otherwise basic check, O(1) operations (default True).
def offering(self): """ Deprecated. Use course and run independently. """ warnings.warn( "Offering is no longer a supported property of Locator. Please use the course and run properties.", DeprecationWarning, stacklevel=2 ) if not self.course and not self.run: return None elif not self.run and self.course: return self.course return "/".join([self.course, self.run])
Deprecated. Use course and run independently.
def replace_zeros(self, val, zero_thresh=0.0): """ Replaces all zeros in the image with a specified value Returns ------- image dtype value to replace zeros with """ new_data = self.data.copy() new_data[new_data <= zero_thresh] = val return type(self)(new_data.astype(self.data.dtype), frame=self._frame)
Replaces all zeros in the image with a specified value Returns ------- image dtype value to replace zeros with
def nvmlUnitGetUnitInfo(unit): r""" /** * Retrieves the static information associated with a unit. * * For S-class products. * * See \ref nvmlUnitInfo_t for details on available unit info. * * @param unit The identifier of the target unit * @param info Reference in which to return the unit information * * @return * - \ref NVML_SUCCESS if \a info has been populated * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized * - \ref NVML_ERROR_INVALID_ARGUMENT if \a unit is invalid or \a info is NULL */ nvmlReturn_t DECLDIR nvmlUnitGetUnitInfo """ """ /** * Retrieves the static information associated with a unit. * * For S-class products. * * See \ref nvmlUnitInfo_t for details on available unit info. * * @param unit The identifier of the target unit * @param info Reference in which to return the unit information * * @return * - \ref NVML_SUCCESS if \a info has been populated * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized * - \ref NVML_ERROR_INVALID_ARGUMENT if \a unit is invalid or \a info is NULL */ """ c_info = c_nvmlUnitInfo_t() fn = _nvmlGetFunctionPointer("nvmlUnitGetUnitInfo") ret = fn(unit, byref(c_info)) _nvmlCheckReturn(ret) return bytes_to_str(c_info)
r""" /** * Retrieves the static information associated with a unit. * * For S-class products. * * See \ref nvmlUnitInfo_t for details on available unit info. * * @param unit The identifier of the target unit * @param info Reference in which to return the unit information * * @return * - \ref NVML_SUCCESS if \a info has been populated * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized * - \ref NVML_ERROR_INVALID_ARGUMENT if \a unit is invalid or \a info is NULL */ nvmlReturn_t DECLDIR nvmlUnitGetUnitInfo
def initialize(self): """Instantiates the cache area to be ready for updates""" self.Base.metadata.create_all(self.session.bind) logger.debug("initialized sqlalchemy orm tables")
Instantiates the cache area to be ready for updates
def extract_header_comment_key_value_tuples_from_file(file_descriptor): """ Extracts tuples representing comments and localization entries from strings file. Args: file_descriptor (file): The file to read the tuples from Returns: list : List of tuples representing the headers and localization entries. """ file_data = file_descriptor.read() findall_result = re.findall(HEADER_COMMENT_KEY_VALUE_TUPLES_REGEX, file_data, re.MULTILINE | re.DOTALL) returned_list = [] for header_comment, _ignored, raw_comments, key, value in findall_result: comments = re.findall("/\* (.*?) \*/", raw_comments) if len(comments) == 0: comments = [u""] returned_list.append((header_comment, comments, key, value)) return returned_list
Extracts tuples representing comments and localization entries from strings file. Args: file_descriptor (file): The file to read the tuples from Returns: list : List of tuples representing the headers and localization entries.
def newton_solver_comp(f, x0, lb, ub, infos=False, backsteps=10, maxit=50, numdiff=False): '''Solves many independent systems f(x)=0 simultaneously using a simple gradient descent. :param f: objective function to be solved with values p x N . The second output argument represents the derivative with values in (p x p x N) :param x0: initial value ( p x N ) :param lb: bounds for first variable :param ub: bounds for second variable :return: solution x such that f(x) = 0 ''' from numpy import row_stack ind = x0.shape[0] - 1 def fun_lc(xx): x = row_stack([xx, lb]) res = f(x) return res[:ind,:] def fun_uc(xx): x = row_stack([xx, ub]) res = f(x) return res[:ind,:] [sol_nc, nit0] = newton_solver(f, x0, numdiff=True, infos=True) lower_constrained = sol_nc[ind,:] < lb upper_constrained = sol_nc[ind,:] > ub not_constrained = - ( lower_constrained + upper_constrained ) sol = sol_nc.copy() # sol[ind,:] = lb * lower_constrained + ub * upper_constrained + sol_nc[ind,:] * not_constrained # nit = nit0 [sol_lc, nit1] = newton_solver(fun_lc, x0[:-1,:], numdiff=True, infos=True) [sol_uc, nit2] = newton_solver(fun_uc, x0[:-1,:], numdiff=True, infos=True) # nit = nit0 + nit1 + nit2 # sol_lc = row_stack([sol_lc, lb]) sol_uc = row_stack([sol_uc, ub]) # lower_constrained = sol_nc[-1,:] < lb upper_constrained = sol_nc[-1,:] > ub not_constrained = - ( lower_constrained + upper_constrained ) # sol = sol_lc * lower_constrained + sol_uc * upper_constrained + sol_nc * not_constrained return [sol,nit]
Solves many independent systems f(x)=0 simultaneously using a simple gradient descent. :param f: objective function to be solved with values p x N . The second output argument represents the derivative with values in (p x p x N) :param x0: initial value ( p x N ) :param lb: bounds for first variable :param ub: bounds for second variable :return: solution x such that f(x) = 0
def run_function(app_function, event, context): """ Given a function and event context, detect signature and execute, returning any result. """ # getargspec does not support python 3 method with type hints # Related issue: https://github.com/Miserlou/Zappa/issues/1452 if hasattr(inspect, "getfullargspec"): # Python 3 args, varargs, keywords, defaults, _, _, _ = inspect.getfullargspec(app_function) else: # Python 2 args, varargs, keywords, defaults = inspect.getargspec(app_function) num_args = len(args) if num_args == 0: result = app_function(event, context) if varargs else app_function() elif num_args == 1: result = app_function(event, context) if varargs else app_function(event) elif num_args == 2: result = app_function(event, context) else: raise RuntimeError("Function signature is invalid. Expected a function that accepts at most " "2 arguments or varargs.") return result
Given a function and event context, detect signature and execute, returning any result.
def symbol_scores(self, symbol): """Find matches for symbol. :param symbol: A . separated symbol. eg. 'os.path.basename' :returns: A list of tuples of (score, package, reference|None), ordered by score from highest to lowest. """ scores = [] path = [] # sys.path sys path -> import sys # os.path.basename os.path basename -> import os.path # basename os.path basename -> from os.path import basename # path.basename os.path basename -> from os import path def fixup(module, variable): prefix = module.split('.') if variable is not None: prefix.append(variable) seeking = symbol.split('.') new_module = [] while prefix and seeking[0] != prefix[0]: new_module.append(prefix.pop(0)) if new_module: module, variable = '.'.join(new_module), prefix[0] else: variable = None return module, variable def score_walk(scope, scale): sub_path, score = self._score_key(scope, full_key) if score > 0.1: try: i = sub_path.index(None) sub_path, from_symbol = sub_path[:i], '.'.join(sub_path[i + 1:]) except ValueError: from_symbol = None package_path = '.'.join(path + sub_path) package_path, from_symbol = fixup(package_path, from_symbol) scores.append((score * scale, package_path, from_symbol)) for key, subscope in scope._tree.items(): if type(subscope) is not float: path.append(key) score_walk(subscope, subscope.score * scale - 0.1) path.pop() full_key = symbol.split('.') score_walk(self, 1.0) scores.sort(reverse=True) return scores
Find matches for symbol. :param symbol: A . separated symbol. eg. 'os.path.basename' :returns: A list of tuples of (score, package, reference|None), ordered by score from highest to lowest.