content
stringlengths
22
815k
id
int64
0
4.91M
def phi(input): """Phi function. :param input: Float (scalar or array) value. :returns: phi(input). """ return 0.5 * erfc(-input/np.sqrt(2))
34,800
def _is_correct_task(task: str, db: dict) -> bool: """ Check if the current data set is compatible with the specified task. Parameters ---------- task Regression or classification db OpenML data set dictionary Returns ------- bool True if the task and the data set are compatible """ if task == "classification": return db['NumberOfSymbolicFeatures'] == 1 and db['NumberOfClasses'] > 0 elif task == "regression": return True else: return False
34,801
def is_valid_semver(version: str) -> bool: """return True if a value is a valid semantic version """ match = re.match(r'^[0-9]+\.[0-9]+\.[0-9]+(-([0-9a-z]+(\.[0-9a-z]+)*))?$', version) return match is not None
34,802
def test_OutOfStock(): """ Tests if the stock status value is received correctly for two stock status conditions (Out of Stock, Error Occurred) on www.walmart.com. """ walmartScraper = WalmartScraper(OutStockUrl) stock_info, cost = walmartScraper.job() assert stock_info == "Out of Stock" or stock_info == "Error Occurred"
34,803
def main(): """ This is a test. """ options = get_options() layout = generate_matrix(options.board_grid, options.unit_grid, options.unit_n, options.positions) make_dir(options.outdir) save_matrix(layout, options.outdir + '/' + options.file_name) save_fig(layout, options.outdir + '/' + options.file_name)
34,804
async def role_assignments_for_team( name: str, project_name: Optional[str] = None ) -> List[RoleAssignment]: """Gets all role assignments for a team.""" try: return zen_store.get_role_assignments_for_team( team_name=name, project_name=project_name ) except KeyError as error: raise not_found(error) from error
34,805
def set_nested_dict_value(input_dict, key, val): """Uses '.' or '->'-splittable string as key and returns modified dict.""" if not isinstance(input_dict, dict): # dangerous, just replace with dict input_dict = {} key = key.replace("->", ".") # make sure no -> left split_key = key.split('.', 1) if len(split_key) == 2: key_prefix, key_suffix = split_key[0], split_key[1] if key_prefix not in input_dict: input_dict[key_prefix] = {} input_dict[key_prefix] = set_nested_dict_value( input_dict[key_prefix], key_suffix, val) else: # not enough values to unpack input_dict[key] = val return input_dict
34,806
def cube_1(cube_mesh): """ Viewable cube object shifted to 3 on x """ obj = Mock() obj.name = 'cube_1' obj.mode = 'OBJECT' obj.mesh_mock = cube_mesh obj.to_mesh.return_value = cube_mesh obj.matrix_world = Matrix.Identity(4) obj.mesh_mock.vertices = cube_vertices(3) obj.update_from_editmode = Mock() obj.evaluated_get = lambda s: s obj.visible_get.return_value = False obj.hide_viewport = False obj.hide_render = True obj.children = None return obj
34,807
def deserialize_model_fixture(): """ Returns a deserialized version of an instance of the Model class. This simulates the idea that a model instance would be serialized and loaded from disk. """ class Model: def predict(self, values): return [1] return Model()
34,808
def _get_bfp_op(op, name, bfp_args): """ Create the bfp version of the operation op This function is called when a bfp layer is defined. See BFPConv2d and BFPLinear below """ op_name = _get_op_name(name, **bfp_args) if op_name not in _bfp_ops: _bfp_ops[name] = _gen_bfp_op(op, name, bfp_args) return _bfp_ops[name]
34,809
def compute_dmdt(jd: Sequence, mag: Sequence, dmdt_ints_v: str = "v20200318"): """Compute dmdt matrix for time series (jd, mag) See arXiv:1709.06257 :param jd: :param mag: :param dmdt_ints_v: :return: """ jd_diff = pwd_for(jd) mag_diff = pwd_for(mag) dmdt, ex, ey = np.histogram2d( jd_diff, mag_diff, bins=[ DMDT_INTERVALS[dmdt_ints_v]["dt_intervals"], DMDT_INTERVALS[dmdt_ints_v]["dm_intervals"], ], ) dmdt = np.transpose(dmdt) norm = np.linalg.norm(dmdt) if norm != 0.0: dmdt /= np.linalg.norm(dmdt) else: dmdt = np.zeros_like(dmdt) return dmdt
34,810
def all_bin_vecs(arr, v): """ create an array which holds all 2^V binary vectors INPUT arr positive integers from 1 to 2^V, (2^V, ) numpy array v number of variables V OUTPUT edgeconfs all possible binary vectors, (2^V, V) numpy array """ to_str_func = np.vectorize(lambda x: np.binary_repr(x).zfill(v)) strs = to_str_func(arr) edgeconfs = np.zeros((arr.shape[0], v), dtype=np.int8) for bit_ix in range(0, v): fetch_bit_func = np.vectorize(lambda x: x[bit_ix] == '1') edgeconfs[:,bit_ix] = fetch_bit_func(strs)[:,0] return edgeconfs
34,811
def unpack_domains(df): """Unpack domain codes to values. Parameters ---------- df : DataFrame """ df = df.copy() for field, domain in DOMAINS.items(): if field in df.columns: df[field] = df[field].map(domain) return df
34,812
def validate_saml_response(html): """Parse html to validate that saml a saml response was returned.""" soup = BeautifulSoup(html, "html.parser") xml = None for elem in soup.find_all("input", attrs={"name": "SAMLResponse"}): saml_base64 = elem.get("value") xml = codecs.decode(saml_base64.encode("ascii"), "base64").decode("utf-8") if xml is None: logging.error( "Invalid data detected in SAML response." " View the response with the DEBUG loglevel." ) logging.debug(html) sys.exit(1) return xml
34,813
def generate_region_info(region_params): """Generate the `region_params` list in the tiling parameter dict Args: region_params (dict): A `dict` mapping each region-specific parameter to a list of values per FOV Returns: list: The complete set of `region_params` sorted by region """ # define the region params list region_params_list = [] # iterate over all the region parameters, all parameter lists are the same length for i in range(len(region_params['region_start_row'])): # define a dict containing all the region info for the specific FOV region_info = { rp: region_params[rp][i] for rp in region_params } # append info to region_params region_params_list.append(region_info) return region_params_list
34,814
def is_decorator(tree, fname): """Test tree whether it is the decorator ``fname``. ``fname`` may be ``str`` or a predicate, see ``isx``. References of the forms ``f``, ``foo.f`` and ``hq[f]`` are supported. We detect: - ``Name``, ``Attribute`` or ``Captured`` matching the given ``fname`` (non-parametric decorator), and - ``Call`` whose ``.func`` matches the above rule (parametric decorator). """ return isx(tree, fname) or \ (type(tree) is Call and isx(tree.func, fname))
34,815
def without_oaiset_signals(app): """Temporary disable oaiset signals.""" from invenio_oaiserver import current_oaiserver current_oaiserver.unregister_signals_oaiset() yield current_oaiserver.register_signals_oaiset()
34,816
def history(mac, hostname): """Read the history from the sensor.""" global json_body global clear_hosts temp = [] poller = MiFloraPoller(mac, backend) history_list = poller.fetch_history() for entry in history_list: measurement = { "measurement": "monitor_reading", "tags": { "monitor": hostname }, "time": int(entry.wall_time.timestamp()), "fields": { "temperature": entry.temperature, "moisture": entry.moisture, "light": entry.light, "conductivity": entry.conductivity } } temp.append(measurement) if len(history_list) == len(temp) and not len(history_list) == 0: for item in temp: json_body.append(item) clear_hosts.append(hostname)
34,817
def _validate_arguments(is_sequence, is_dataset, use_multiprocessing, workers, steps_per_epoch, validation_data, validation_steps, mode, kwargs): """Raises errors if arguments are invalid. Arguments: is_sequence: Boolean, whether data is a `keras.utils.data_utils.Sequence` instance. is_dataset: Boolean, whether data is a dataset instance. use_multiprocessing: Boolean. If `True`, use process-based threading. If unspecified, `use_multiprocessing` will default to `False`. Note that because this implementation relies on multiprocessing, you should not pass non-picklable arguments to the generator as they can't be passed easily to children processes. workers: Integer. Maximum number of processes to spin up when using process-based threading. If unspecified, `workers` will default to 1. If 0, will execute the generator on the main thread. steps_per_epoch: Total number of steps (batches of samples) before declaring one epoch finished and starting the next epoch. Ignored with the default value of `None`. validation_data: Either a tuple of NumPy/Tensor inputs (i.e. `(x,)` or `(x, y)` or `(x, y, sample_weights)`) or a generator or `keras.utils.data_utils.Sequence` object or Eager Iterator or Dataset. validation_steps: Total number of steps (batches of samples) before declaring validation finished. mode: One of ModeKeys.TRAIN/ModeKeys.TEST/ModeKeys.PREDICT. kwargs: Additional arguments for backwards compatibility. Raises: ValueError: If `steps_per_epoch` or `validation_steps` are not passed for data types that require them, or if unrecognized keyword arguments are passed. """ if not is_sequence and use_multiprocessing and workers > 1: logging.warning( UserWarning('Using a generator with `use_multiprocessing=True`' ' and multiple workers may duplicate your data.' ' Please consider using the `keras.utils.Sequence`' ' class.')) if steps_per_epoch is None and not is_dataset: arg_name = 'steps_per_epoch' if mode == ModeKeys.TRAIN else 'steps' raise ValueError('Please specify the number of steps via the ' '`{}` argument.'.format(arg_name)) val_gen = ( data_utils.is_generator_or_sequence(validation_data) or isinstance(validation_data, iterator_ops.EagerIterator)) if (val_gen and not isinstance(validation_data, data_utils.Sequence) and not validation_steps): raise ValueError('Please specify the `validation_steps` argument.') if any(k != 'steps' for k in kwargs): raise ValueError('Invalid arguments passed: {}'.format( [k for k in kwargs if k != 'steps']))
34,818
def Nbspld1(t, x, k=3): """Same as :func:`Nbspl`, but returns the first derivative too.""" kmax = k if kmax > len(t)-2: raise Exception("Input error in Nbspl: require that k < len(t)-2") t = np.array(t) x = np.array(x)[:, np.newaxis] N = 1.0*((x > t[:-1]) & (x <= t[1:])) dN = np.zeros_like(N) for k in xrange(1, kmax+1): dt = t[k:] - t[:-k] _dt = dt.copy() _dt[dt != 0] = 1./dt[dt != 0] dN = dN[:,:-1]*(x-t[:-k-1])*_dt[:-1] - dN[:,1:]*(x-t[k+1:])*_dt[1:] dN += N[:,:-1]*_dt[:-1] - N[:,1:]*_dt[1:] N = N[:,:-1]*(x-t[:-k-1])*_dt[:-1] - N[:,1:]*(x-t[k+1:])*_dt[1:] return N, dN
34,819
def getStyleSheet(): """Returns a stylesheet object""" stylesheet = StyleSheet1() stylesheet.add(ParagraphStyle(name='Normal', fontName="Helvetica", fontSize=10, leading=12)) stylesheet.add(ParagraphStyle(name='BodyText', parent=stylesheet['Normal'], spaceBefore=14)) stylesheet.add(ParagraphStyle(name='Bold', parent=stylesheet['BodyText'], fontName="Helvetica-Bold")) return stylesheet
34,820
async def test_create_event_format_missing_mandatory_property( client: _TestClient, mocker: MockFixture, token: MockFixture, event: dict, ) -> None: """Should return 422 HTTPUnprocessableEntity.""" EVENT_ID = "event_id_1" RACECLASS_ID = "290e70d5-0933-4af0-bb53-1d705ba7eb95" mocker.patch( "event_service.adapters.events_adapter.EventsAdapter.get_event_by_id", # noqa: B950 return_value=event, ) mocker.patch( "event_service.services.event_format_service.create_id", return_value=RACECLASS_ID, ) mocker.patch( "event_service.adapters.event_format_adapter.EventFormatAdapter.create_event_format", return_value=RACECLASS_ID, ) request_body = {"id": RACECLASS_ID, "optional_property": "Optional_property"} headers = { hdrs.CONTENT_TYPE: "application/json", hdrs.AUTHORIZATION: f"Bearer {token}", } with aioresponses(passthrough=["http://127.0.0.1"]) as m: m.post("http://example.com:8081/authorize", status=204) resp = await client.post( f"/events/{EVENT_ID}/format", headers=headers, json=request_body ) assert resp.status == 422
34,821
def parse_faq_entries(entries): """ Iterate through the condensed FAQ entries to expand all of the keywords and answers """ parsed_entries = {} for entry in entries: for keyword in entry["keywords"]: if keyword not in parsed_entries: parsed_entries[keyword] = entry["answer"] else: print("Error: Found duplicate keyword '{}' in pre-configured FAQ entries.".format(keyword)) exit(1) return parsed_entries
34,822
def make_dqn(statesize, actionsize): """ Create a nn.Module instance for the q leanring model. @param statesize: dimension of the input continuous state space. @param actionsize: dimension of the descrete action space. @return model: nn.Module instance """ pass
34,823
def dh_to_dt(day_str, dh): """decimal hour to unix timestamp""" # return dt.replace(tzinfo=datetime.timezone.utc).timestamp() t0 = datetime.datetime.strptime(day_str, '%Y%m%d') - datetime.datetime(1970, 1, 1) return datetime.datetime.strptime(day_str, '%Y%m%d') + datetime.timedelta(seconds=float(dh*3600))
34,824
def get_statistic(key, key_type, fromtime, endtime, var_names): """ 根据key和时间戳 来获取对应小时统计的报表数据 Paramters: key: key_type: ip, ipc, page, user, did timestamp: t: 生成统计key 对应的type段, 现在默认为None是因为暂时查询key只有ip,ipc 类型的, @todo 视图函数里面扔进来 Return: if key is None: { key(统计leveldb的索引中除了开头type的部分): {var_name1: , var_name2:} } else: {var_name1:, var_name2:} """ var_names_set = set(var_names) logger.debug(DEBUG_PREFIX+ 'in get_statistic...') try: db_path = get_stat_db_path(fromtime) # logger.debug(DEBUG_PREFIX+"传入的fromtime:%s, 获取的对应统计的数据库地址是: %s", fromtime, db_path) except Exception as e: return None if key: logger.debug(DEBUG_PREFIX+" 有指定特定的key") logger.debug(DEBUG_PREFIX+"传入获取统计数据库的key的参数key:%s, key_type:%s", str(key), str(key_type)) key = get_click_stat_key(key, key_type) if key is None: return None logger.debug(DEBUG_PREFIX+"传入获取统计数据库的key是 %s", (key,)) try: db = get_db(db_path) return get_key_stat(key, db, var_names_set) except KeyError: logger.error("db:%s don't have key: %s", db_path, key) return None except LevelDBError: logger.error("db:%s 统计结果不正确", db_path) return None finally: if locals().has_key('db'): del db else: logger.debug(DEBUG_PREFIX+"会遍历所有的key") # url: {var_name1: , var_name2:} ret = dict() # 当传入的key为空时, 来遍历所有的page维度的key, 从里面load所有的var_names # 目前只有page维度会传入空的key prefix = get_stat_key_prefix(key_type) try: db = get_db(db_path) keys = scan_keys(prefix, db, include_value=False) # logger.debug(DEBUG_PREFIX+"将会遍历的统计key_type:%s, prefix:%s 扫到的keys: %s", key_type, (prefix,), keys) for key in keys: key_stat = get_key_stat(key, db, var_names_set) # logger.debug(DEBUG_PREFIX+"key: %s, key in ret? %s ,查询到的数据是:%s",(key,), key in ret.keys(), key_stat) ret[key[1:]] = key_stat except LevelDBError: logger.error("db:%s 统计结果不正确", db_path) return None except Exception as e: logger.error(e) return None finally: if locals().has_key('db'): del db return ret return None
34,825
def run(cmd: Sequence[Union[str, Path]], check=True) -> int: """Run arbitrary command as subprocess""" returncode = run_subprocess( cmd, capture_stdout=False, capture_stderr=False ).returncode if check and returncode: cmd_str = " ".join(str(c) for c in cmd) raise PipxError(f"{cmd_str!r} failed") return returncode
34,826
def write_docker_compose_file(compose_configuration, path): """ Writes the new docker-compose file """ # Check if docker-compose file already exists if os.path.exists(path): rename_existing_file(path) # Write new docker-compose file yaml_for_compose = ruamel.yaml.round_trip_dump(compose_configuration, indent=4, block_seq_indent=2, explicit_start=True) with open(path, "w") as compose_dump_file: compose_dump_file.write(yaml_for_compose)
34,827
def dump_bases(glyphs, records, printable_function): """Prints bases with their classes.""" index = 0 for glyph in glyphs: record = records[index] print_indented("%s: %s" % (glyph, printable_function(record)), indents=2) index += 1
34,828
def _createTopics(topicMap: Mapping[str, str], topicMgr: TopicManager): """ Create notification topics. These are used when some of the notification flags have been set to True (see pub.setNotificationFlags(). The topicMap is a dict where key is the notification type, and value is the topic name to create. Notification type is a string in ('send', 'subscribe', 'unsubscribe', 'newTopic', 'delTopic', 'deadListener'). """ def newTopic(_name, _desc, _required=None, **argsDocs): topic = topicMgr.getOrCreateTopic(_name) topic.setDescription(_desc) topic.setMsgArgSpec(argsDocs, _required) newTopic( _name=topicMap['subscribe'], _desc='whenever a listener is subscribed to a topic', topic='topic that listener has subscribed to', listener='instance of pub.Listener containing listener', newSub='false if listener was already subscribed, true otherwise') newTopic( _name=topicMap['unsubscribe'], _desc='whenever a listener is unsubscribed from a topic', topic='instance of Topic that listener has been unsubscribed from', listener='instance of pub.Listener unsubscribed; None if listener not found', listenerRaw='listener unsubscribed') newTopic( _name=topicMap['send'], _desc='sent at beginning and end of sendMessage()', topic='instance of topic for message being sent', stage='stage of send operation: "pre" or "post" or "in"', listener='which listener being sent to') newTopic( _name=topicMap['newTopic'], _desc='whenever a new topic is defined', topic='instance of Topic created', description='description of topic (use)', args='the argument names/descriptions for arguments that listeners must accept', required='which args are required (all others are optional)') newTopic( _name=topicMap['delTopic'], _desc='whenever a topic is deleted', name='full name of the Topic instance that was destroyed') newTopic( _name=topicMap['deadListener'], _desc='whenever a listener dies without having unsubscribed', topic='instance of Topic that listener was subscribed to', listener='instance of pub.Listener containing dead listener')
34,829
def getDict(fname): """Returns the dict of values of the UserComment""" s = getEXIF(fname, COMMENT_TAG) try: s = s.value except Exception: pass return getDictFromString(s)
34,830
def path_count_cache(metric): """ Decorator to apply caching to the DWWC and DWPC functions from hetmatpy.degree_weight. """ def decorator(user_function): signature = inspect.signature(user_function) @functools.wraps(user_function) def wrapper(*args, **kwargs): bound_args = signature.bind(*args, **kwargs) bound_args.apply_defaults() arguments = bound_args.arguments graph = arguments["graph"] metapath = graph.metagraph.get_metapath(arguments["metapath"]) arguments["metapath"] = metapath damping = arguments["damping"] cached_result = None start = time.perf_counter() supports_cache = ( isinstance(graph, hetmatpy.hetmat.HetMat) and graph.path_counts_cache ) if supports_cache: cache_key = {"metapath": metapath, "metric": metric, "damping": damping} cached_result = graph.path_counts_cache.get(**cache_key) if cached_result: row_names, col_names, matrix = cached_result matrix = sparsify_or_densify(matrix, arguments["dense_threshold"]) matrix = matrix.astype(arguments["dtype"]) if cached_result is None: if arguments["dwwc_method"] is None: # import default_dwwc_method here to avoid circular dependencies from hetmatpy.degree_weight import default_dwwc_method arguments["dwwc_method"] = default_dwwc_method row_names, col_names, matrix = user_function(**arguments) if supports_cache: runtime = time.perf_counter() - start graph.path_counts_cache.set(**cache_key, matrix=matrix, runtime=runtime) return row_names, col_names, matrix return wrapper return decorator
34,831
def test_optim_params1(test_output_dirs: OutputFolderForTests) -> None: """ Test if the optimizer parameters are read correctly for InnerEye configs. """ model = DummyModel() model.set_output_to(test_output_dirs.root_dir) runner = MLRunner(model_config=model) runner.setup() lightning_model = runner.container.model optim, _ = lightning_model.configure_optimizers() assert optim[0].param_groups[0]["lr"] == 1e-3
34,832
def get_metabolite_mapping() -> Mapping[str, Set[Reference]]: """Make the metabolite mapping.""" metabolites_df = get_metabolite_df() smpdb_id_to_metabolites = defaultdict(set) for pathway_id, metabolite_id, metabolite_name in tqdm(metabolites_df.values, desc='mapping metabolites'): smpdb_id_to_metabolites[pathway_id].add(Reference( prefix=PREFIX, identifier=metabolite_id, name=metabolite_name, )) return smpdb_id_to_metabolites
34,833
def has_merge_conflict(commit: str, target_branch: str, remote: str = 'origin') -> bool: """ Returns true if the given commit hash has a merge conflict with the given target branch. """ try: # Always remove the temporary worktree. It's possible that we got # interrupted and left it around. This will raise an exception if the # worktree doesn't exist, which can be safely ignored. git('worktree', 'remove', '--force', '.git/temp-worktree', stdout=get_dev_null(), stderr=get_dev_null()) except GitError: pass git('worktree', 'add', '.git/temp-worktree', f'{remote}/{target_branch}', '--detach', stdout=get_dev_null(), stderr=get_dev_null()) try: git('merge', '--no-commit', commit, git_dir='.git/temp-worktree', stdout=get_dev_null(), stderr=get_dev_null()) return False except GitError: return True finally: git('worktree', 'remove', '--force', '.git/temp-worktree', stdout=get_dev_null(), stderr=get_dev_null())
34,834
def create_folder(): """Creates a temp_folder on the users desktop""" new_folder_path = os.path.join(os.path.join(os.environ['USERPROFILE']), 'Desktop\\temp_folder') try: if not os.path.exists(new_folder_path): os.makedirs(new_folder_path) except OSError: print("Error: Creating directory: " + new_folder_path) return new_folder_path
34,835
def load_csr(data): """ Loads a PEM X.509 CSR. """ return x509.load_pem_x509_csr(data, default_backend())
34,836
def parseIMACS(hdul): """ Parses information from a given HDU, for data produced at IMACS """ start = hdul[0].header['CRVAL1'] step = hdul[0].header['CDELT1'] total = hdul[0].header['NAXIS1'] corr = (hdul[0].header['CRPIX1'] - 1) * step wave = np.arange(start - corr, start + total*step - corr, step) wave = np.reshape(wave, (1, wave.shape[0])) flux = np.reshape(hdul[0].data, (1, hdul[0].data.shape[0])) error = flux * .1 return (wave, flux, error)
34,837
def boll_cross_func_jit(data:np.ndarray,) -> np.ndarray: """ 布林线和K线金叉死叉 状态分析 Numba JIT优化 idx: 0 == open 1 == high 2 == low 3 == close """ BBANDS = TA_BBANDS(data[:,3], timeperiod=20, nbdevup=2) return ret_boll_cross
34,838
def rank_genes_groups( X, labels, # louvain results var_names, groups=None, reference='rest', n_genes=100, **kwds, ): """ Rank genes for characterizing groups. Parameters ---------- X : cupy.ndarray of shape (n_cells, n_genes) The cellxgene matrix to rank genes labels : cudf.Series of size (n_cells,) Observations groupings to consider var_names : cudf.Series of size (n_genes,) Names of genes in X groups : Iterable[str] (default: 'all') Subset of groups, e.g. ['g1', 'g2', 'g3'], to which comparison shall be restricted, or 'all' (default), for all groups. reference : str (default: 'rest') If 'rest', compare each group to the union of the rest of the group. If a group identifier, compare with respect to this group. n_genes : int (default: 100) The number of genes that appear in the returned tables. """ #### Wherever we see "adata.obs[groupby], we should just replace w/ the groups" # for clarity, rename variable if groups == 'all': groups_order = 'all' elif isinstance(groups, (str, int)): raise ValueError('Specify a sequence of groups') else: groups_order = list(groups) if isinstance(groups_order[0], int): groups_order = [str(n) for n in groups_order] if reference != 'rest' and reference not in set(groups_order): groups_order += [reference] if ( reference != 'rest' and reference not in set(labels.cat.categories) ): cats = labels.cat.categories.tolist() raise ValueError( f'reference = {reference} needs to be one of groupby = {cats}.' ) groups_order, groups_masks = select_groups(labels, groups_order) original_reference = reference n_vars = len(var_names) # for clarity, rename variable n_genes_user = n_genes # make sure indices are not OoB in case there are less genes than n_genes if n_genes_user > X.shape[1]: n_genes_user = X.shape[1] # in the following, n_genes is simply another name for the total number of genes n_genes = X.shape[1] n_groups = groups_masks.shape[0] ns = cp.zeros(n_groups, dtype=int) for imask, mask in enumerate(groups_masks): ns[imask] = cp.where(mask)[0].size if reference != 'rest': ireference = cp.where(groups_order == reference)[0][0] reference_indices = cp.arange(n_vars, dtype=int) rankings_gene_scores = [] rankings_gene_names = [] # Perform LogReg # if reference is not set, then the groups listed will be compared to the rest # if reference is set, then the groups listed will be compared only to the other groups listed from cuml.linear_model import LogisticRegression reference = groups_order[0] if len(groups) == 1: raise Exception('Cannot perform logistic regression on a single cluster.') grouping_mask = labels.astype('int').isin(cudf.Series(groups_order).astype('int')) grouping = labels.loc[grouping_mask] X = X[grouping_mask.values, :] # Indexing with a series causes issues, possibly segfault y = labels.loc[grouping] clf = LogisticRegression(**kwds) clf.fit(X.get(), grouping.to_array().astype('float32')) scores_all = cp.array(clf.coef_).T for igroup, group in enumerate(groups_order): if len(groups_order) <= 2: # binary logistic regression scores = scores_all[0] else: scores = scores_all[igroup] partition = cp.argpartition(scores, -n_genes_user)[-n_genes_user:] partial_indices = cp.argsort(scores[partition])[::-1] global_indices = reference_indices[partition][partial_indices] rankings_gene_scores.append(scores[global_indices].get()) ## Shouldn't need to take this off device rankings_gene_names.append(var_names[global_indices].to_pandas()) if len(groups_order) <= 2: break groups_order_save = [str(g) for g in groups_order] if (len(groups) == 2): groups_order_save = [g for g in groups_order if g != reference] scores = np.rec.fromarrays( [n for n in rankings_gene_scores], dtype=[(rn, 'float32') for rn in groups_order_save], ) names = np.rec.fromarrays( [n for n in rankings_gene_names], dtype=[(rn, 'U50') for rn in groups_order_save], ) return scores, names, original_reference
34,839
def init_graph_handler(): """Init GraphHandler.""" graph = get_graph_proto() graph_handler = GraphHandler() graph_handler.put({graph.name: graph}) return graph_handler
34,840
def visualize_2d_activation_map(activation_map: np.ndarray, args: ModelConfigBase, slice_index: int = 0) -> None: """ Saves all feature channels of a 2D activation map as png files :param activation_map: :param args: :param slice_index: :return: """ destination_directory = str(args.outputs_folder / "activation_maps") if not os.path.exists(destination_directory): os.mkdir(destination_directory) for feat in range(activation_map.shape[0]): plt.imshow(vis_activation_map(activation_map[feat])) plt.savefig(os.path.join(destination_directory, "slice_" + str(slice_index) + "_feature_" + (str(feat) + "_Activation_Map.png")))
34,841
def add_average_column(df, *, copy: bool = False): """Add a column averaging the power on all channels. Parameters ---------- %(df_psd)s An 'avg' column is added averaging the power on all channels. %(copy)s Returns ------- %(df_psd)s The average power across channels has been added in the column 'avg'. """ _check_type(copy, (bool,), item_name="copy") df = df.copy() if copy else df ch_names = [ col for col in df.columns if col not in ("participant", "session", "run", "phase", "idx") ] df["avg"] = df[ch_names].mean(axis=1) return df
34,842
def _is_globbed(name, glob): """ Return true if given name matches the glob list. """ if not glob: return True return any((fnmatch.fnmatchcase(name, i) for i in glob))
34,843
def test_notebookclient_get_kernel_id_with_error_status(plugin, mocker): """Test NotebookClient.get_kernel_id() when response has error status.""" response = mocker.Mock() content = b'{"message": "error"}' response.content = content response.status_code = requests.codes.forbidden mocker.patch('requests.get', return_value=response) MockMessageBox = mocker.patch('spyder_notebook.widgets.client.QMessageBox') plugin.client.get_kernel_id() MockMessageBox.warning.assert_called()
34,844
def read_vec_flt(file_or_fd): """[flt-vec] = read_vec_flt(file_or_fd) Read kaldi float vector, ascii or binary input, Parameters ---------- file_or_fd : obj An ark, gzipped ark, pipe or opened file descriptor. Raises ------ ValueError Unsupported data-type of the input file. """ fd = open_or_fd(file_or_fd) binary = fd.read(2) if binary == b"\0B": # binary flag # Data type, type = fd.read(3) if type == b"FV ": sample_size = 4 # floats if type == b"DV ": sample_size = 8 # doubles assert sample_size > 0 # Dimension, assert fd.read(1) == b"\4" # int-size vec_size = struct.unpack("<i", fd.read(4))[0] # vector dim # Read whole vector, buf = fd.read(vec_size * sample_size) if sample_size == 4: ans = np.frombuffer(buf, dtype="float32") elif sample_size == 8: ans = np.frombuffer(buf, dtype="float64") else: raise ValueError("BadSampleSize") return ans else: # ascii, arr = (binary + fd.readline()).strip().split() try: arr.remove("[") arr.remove("]") # optionally except ValueError: pass ans = np.array(arr, dtype=float) if fd is not file_or_fd: fd.close() # cleanup return ans
34,845
def arg_export(name): """Export an argument set.""" def _wrapper(func): _ARG_EXPORTS[name] = func if 'arg_defs' not in dir(func): func.arg_defs = [] return func return _wrapper
34,846
def get_od_base( mode = "H+S & B3LYP+TPSS0"): # od is OrderedDict() """ initial parameters are prepared. mode = "H+S & B3LYP+TPSS0" --> ["B3LYP", "TPSS0"] with speration of H and S "H+S & B3LYP" --> ["B3LYP"] with speration of H and S "H+S & TPSSO" --> ["TPSS0"] with speration of H and S """ if mode == "H+S&B3LYP+TPSS0": od = OrderedDict() od['QC Models (Family ID)'] = [["B3LYP", "TPSS0"]] od['H + S'] = [True] od['CV Mode'] = ['10*5KF/LOO'] od['Em type'] = ['Chemical potential'] od['Regularization'] = ['None'] od['Bounds/Constraints'] = ['None'] aod = OrderedDict() aod['in_file'] = "sheet/EmBT-xM4.csv" aod['out_file'] = "sheet/out_" + mode + ".csv" else: raise ValueError("Not supported: {}".format( mode)) return od, aod
34,847
def generate_all_specs( population_specs, treatment_specs, outcome_specs, model_specs, estimator_specs ): """ Generate all combinations of population, treatment, outcome, causal model and estimator """ causal_graph = CausalGraph(treatment_specs, outcome_specs, model_specs) model_specs = causal_graph.create_gml_model_specs() specs = itertools.product( population_specs, treatment_specs, outcome_specs, model_specs, estimator_specs ) return [spec for spec in specs if is_valid_spec(spec)]
34,848
def wsFoc(r,psi,L1,z0,alpha): """Return optimum focal surface height at radius r as given by Chase & Van Speybroeck """ return .0625*(psi+1)*(r**2*L1/z0**2)/tan(alpha)**2
34,849
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap, draw_function=draw_lines, **kwargs): """ `img` should be the output of a Canny transform. draw_function: Which which accepts image & line to render lanes. Default: draw_lines() Returns an image with hough lines drawn. """ rho = max(rho, 1) lines = cv2.HoughLinesP( img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap, ) line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8) draw_function(line_img, lines, **kwargs) return line_img
34,850
def check(data, shape, splits=100): """Check dataset properties.""" assert data.data.shape == shape assert data.target.shape[0] == shape[0] assert len(list(data.outer_cv)) == splits check_estimator(data)
34,851
def test_get_py_loglevel(): """ Ensure function _get_py_loglevel is exposed """ assert getattr(log, "_get_py_loglevel", None) is not None
34,852
def compare_FISM_diff_spect_by_dn(dns, _f107): """ Compare FISM spectrum for several dates paraeters: ---------- dns <List[datetime]>: List of datetime of the events """ fig = plt.figure(figsize=(6,4), dpi=120) ax = fig.add_subplot(111) ax.set_xlabel(r"Waveband ($\lambda$), nm") ax.set_ylabel(r"Intensity ($\Phi_0$), $Wm^{-2}nm^{-1}$") colors = ["darkred", "darkblue"] lws = [1.2, .8] bins = np.arange(0.05,190.05,0.1) for z,dn in enumerate(dns): fname = "op/tmpfism.csv" spec = {} for ix, dly in enumerate([20,0]): spec[ix] = [] start, end = dn - dt.timedelta(minutes=1+dly), dn + dt.timedelta(minutes=1+dly) for b in bins: uri = "https://lasp.colorado.edu/lisird/latis/dap/fism_flare_hr.csv?&"+\ "time>={:d}-{:02d}-{:02d}T{:02d}:{:02d}:00.000Z&time<={:d}-{:02d}-{:02d}T{:02d}:{:02d}:00.000Z&".format(start.year, start.month, start.day, start.hour, start.minute, end.year, end.month, end.day, end.hour, end.minute)+\ "wavelength~{:.02f}".format(b) resp = requests.get(uri) print(uri) with open(fname, "w") as f: f.write(resp.text) data = pd.read_csv(fname) data["time"] = [dt.datetime(1970,1,1)+dt.timedelta(seconds=x) for x in data["time (seconds since 1970-01-01)"]] spec[ix].append(data["irradiance (W/m^2/nm)"].tolist()[1]) os.remove(fname) spec = np.array(spec[1]) - np.array(spec[0]) ax.loglog(bins, spec, color=colors[z], alpha=0.8, lw=lws[z], label=dn.strftime("%Y-%m-%d %H:%M UT, F107=") + "%.1f"%_f107[z]) ax.set_xlim(bins[0], bins[-1]) if z==0: ax.axhline(2.2e-4,color="k",ls="--") ax.axvline(0.1,color="k",ls="--") ax.axvline(0.8,color="k",ls="--") ax.legend(loc=3) fig.savefig("op/compare_FISM_diff_spect.png", bbox_inches="tight") return
34,853
def config_macsec_keychain_on_device(device, keychain_name, key, crypt_algorithm, key_string, lifetime=None): """ Configures macsec key chain on device Args: device ('obj'): device to use keychain_name ('str'): keychain name to configure key_string ('str'): key string to configure lifetime ('list'): start and end timings ex.) lifetime = ["10:36:55 Aug 18 2021", "10:37:55 Aug 18 2021"] Returns: None Raises: SubCommandFailure """ log.info( "Configure macsec key chain {keychain_name} on device".format( keychain_name=keychain_name) ) try: configs = [ "key chain {keychain_name} macsec".format( keychain_name=keychain_name), "key {key}".format(key=key), "cryptographic-algorithm {crypt_algorithm}".format( crypt_algorithm=crypt_algorithm), "key-string {key_string}".format(key_string=key_string)] if lifetime is not None: configs.append("lifetime local {start} {end}".format( start=lifetime[0], end=lifetime[1])) device.configure(configs) except SubCommandFailure as e: raise SubCommandFailure( "Could not configure macsec key chain {keychain_name} on " "device {device}, Error: {error}".format( keychain_name=keychain_name, device=device.name, error=e) )
34,854
def instances_to_topographies(topographies, surfaces, tags): """Returns a queryset of topographies, based on given instances Given topographies, surfaces and tags are resolved and all topographies are returned which are either - explicitly given - given indirectly by a surface - given indirectly by a tag, if the topography is tagged accordingly - given indirectly by a tag, if its surface is tagged accordingly Parameters ---------- topographies: sequence of topographies surfaces: sequence of surfaces tags: sequence of tags Returns ------- Queryset of topography, distinct """ from .models import Topography topography_ids = [topo.id for topo in topographies] surface_ids = [s.id for s in surfaces] tag_ids = [tag.id for tag in tags] topographies = Topography.objects.filter(id__in=topography_ids) topographies |= Topography.objects.filter(surface__in=surface_ids) topographies |= Topography.objects.filter(surface__tags__in=tag_ids) topographies |= Topography.objects.filter(tags__in=tag_ids) return topographies.distinct().order_by('id')
34,855
def plot_histogram( s: pd.Series, *, number_bins: Optional[int] = None, bin_range: Union[Tuple[int, int], Tuple[int, int]] = None, figsize: Optional[Tuple[int, int]] = (8, 6), bin_width: Optional[int] = None, edgecolor: Optional[str] = '#ffffff', linewidth: Optional[int] = 1, bin_label_bool: Optional[bool] = False, color: Optional[str] = '#0077bb' ) -> Tuple[plt.Figure, axes.Axes]: """ Parameters ---------- s : pd.Series The input series. number_bins : Optional[int] = None The number of equal-width bins in the range s.max() - s.min(). bin_range : Union[Tuple[int, int],Tuple[int, int]] = None, The lower and upper range of the bins. If not provided, range is (s.min(), s.max()). figsize : Optional[Tuple[int, int]] = (8, 6), The figure size width, height (inch). bin_width : Optional[int] = None, The width of the bin in same units as the series s. edgecolor : Optional[str] = '#ffffff', The hexadecimal color value for the bar edges. linewidth : Optional[int] = 1, The bar edges line width (point). bin_label_bool : Optional[bool] = False If True, label the bars with count and percentage of total. color : Optional[str] = '#0077bb' The color of the bar faces. Returns ------- fig, ax : Tuple[plt.Figure, axes.Axes] Examples -------- Example 1 # Create a series of random floats, normal distribution, # with the default parameters. >>> import datasense as ds >>> s = ds.random_data() >>> fig, ax = ds.plot_histogram(s=s) Example 2 # Create a series of random integers, integer distribution, size = 113, # min = 0, max = 13. >>> import datasense as ds >>> s = ds.random_data( >>> distribution='randint', >>> size=113, >>> low=0, >>> high=14 >>> ) >>> fig, ax = ds.plot_histogram(s=s) Example 3 # Create a series of random integers, integer distribution, size = 113, # min = 0, max = 13. # Set histogram parameters to control bin width. >>> s = ds.random_data( >>> distribution='randint', >>> size=113, >>> low=0, >>> high=14 >>> ) >>> fig, ax = ds.plot_histogram( >>> s=s, >>> bin_width=1 ) Example 4 # Create a series of random integers, integer distribution, size = 113, # min = 0, hight = 14, # Set histogram parameters to control bin width and plotting range. >>> s = ds.random_data( >>> distribution='randint', >>> size=113, >>> low=0, >>> high=13 >>> ) >>> fig, ax = ds.plot_histogram( >>> s=s, >>> bin_width=1, >>> bin_range=(0, 10) >>> ) Example 5 # Create a series of random floats, size = 113, # average = 69, standard deviation = 13. # Set histogram parameters to control bin width and plotting range. >>> s = ds.random_data( >>> distribution='norm', >>> size=113, >>> loc=69, >>> scale=13 >>> ) >>> fig, ax = ds.plot_histogram( >>> s=s, >>> bin_width=5, >>> bin_range=(30, 110) >>> ) Example 6 # Create a series of random floats, size = 113, # average = 69, standard deviation = 13. # Set histogram parameters to control bin width, plotting range, labels. # Set colour of the bars. >>> s = ds.random_data( >>> distribution='norm', >>> size=113, >>> loc=69, >>> scale=13 >>> ) >>> fig, ax = ds.plot_histogram( >>> s=s, >>> bin_width=5, >>> bin_range=(30, 110), >>> figsize=(10,8), >>> bin_label_bool=True, >>> color='#33bbee' >>> ) """ fig = plt.figure(figsize=figsize) ax = fig.add_subplot(111) if bin_width and not bin_range: x = (s.max() - s.min()) / bin_width number_bins = math.ceil(x) elif bin_width and bin_range: number_bins = int((bin_range[1] - bin_range[0]) / bin_width) bin_range = bin_range counts, bins, patches = ax.hist( x=s, bins=number_bins, range=bin_range, edgecolor=edgecolor, linewidth=linewidth, color=color ) if bin_label_bool: ax.set_xticks(bins) ax.xaxis.set_major_formatter(FormatStrFormatter('%0.0f')) bin_centers = 0.5 * np.diff(bins) + bins[:-1] for count, x in zip(counts, bin_centers): ax.annotate( text=f'{str(int(count))}', xy=(x, 0), xytext=(0, -18), xycoords=( 'data', 'axes fraction' ), textcoords='offset points', va='top', ha='center' ) percent = f'{(100 * float(count) / counts.sum()):0.0f} %' ax.annotate( text=percent, xy=(x, 0), xytext=(0, -32), xycoords=( 'data', 'axes fraction' ), textcoords='offset points', va='top', ha='center' ) return (fig, ax)
34,856
def get_tracks(): """ Returns all tracks on the minerva DB """ # connect to the database db = connect_minerva_db() # return all the tracks as a list tracks = list(db.tracks.find()) return tracks
34,857
def test_alphabet_as_fstring_violation( assert_errors, parse_ast_tree, default_options, ): """Testing that the fstrings violate the rules.""" tree = parse_ast_tree('f"{0}"'.format(string.ascii_letters)) visitor = WrongStringVisitor(default_options, tree=tree) visitor.run() assert_errors( visitor, [StringConstantRedefinedViolation], ignored_types=FormattedStringViolation, )
34,858
def mult_pair(pair): """Return the product of two, potentially large, numbers.""" return pair[0]*pair[1]
34,859
def _raise(exception): """To raise an exception in a lambda function or expression""" raise exception
34,860
def getproblem(URLs): """ getproblem() : It takes input from the user of codeforces problemID and difficulty level and then by using selenium and chrome webdriver, capturing screenshot of the Codeforces problem using ttypography tag because all the problems of codeforces are stored inside this div tag and saving it in a image.png file. Then saving the image.png as pdf file by using fdf library. """ path = 'image.png' # Creating a Target Output Folder target_folder = './Coderforces_Problem_Scrapper/problems_pdf' if not os.path.exists(target_folder): os.makedirs(target_folder) options = webdriver.ChromeOptions() # Headless = True for taking a scrolling snapshot options.headless = True driver = webdriver.Chrome(DRIVER_PATH, options=options) file_counter = 1 for url in URLs: driver.get(url) # Deciding height by tag required_height = driver.execute_script( 'return document.body.parentNode.scrollHeight') driver.set_window_size(1366, required_height) title = driver.find_element_by_class_name("title").text filename = title[3:] + '.pdf' # Taking SS of everything within the ttypography class driver.find_element_by_class_name('ttypography').screenshot(path) # Opening image with pillow so based to capture its height and width cover = Image.open(path) WIDTH, HEIGHT = cover.size MARGIN = 10 # based on image's height and width we are adjusting the pdf margin and borders pdf = FPDF(unit='pt', format=[WIDTH + 2 * MARGIN, HEIGHT + 2 * MARGIN]) pdf.add_page() # Adding new page to the pdf pdf.image(path, MARGIN, MARGIN) pdf.output(os.path.join(target_folder, filename), "F") # saving the pdf with the specified filename print(f'File saved in your directory ./problems_pdf/{filename} ({file_counter}/{len(URLs)}) !') file_counter += 1
34,861
def get_only_filename(file_list): """ Get filename from file's path and return list that has only filename. Input: file_list: List. file's paths list. Attribute: file_name: String. "01.jpg" file_name_without_ext: String. "01" Return: filename_list: Only filename list. """ filename_list = list() for file_path in file_list: file_name = file_path.split("/")[-1] file_name_without_ext = file_name.split(".")[0] filename_list.append(file_name_without_ext) return filename_list
34,862
def simple_calculate_hmac(sym_key, message, digest_algo=DIGEST_ALGORITHM.SHA256): """Calculates a HMAC of given message using symmetric key.""" message_param = _get_char_param_nullify_if_zero(message) mac = _ctypes.POINTER(_ctypes.c_char)() mac_length = _ctypes.c_size_t() _lib.yaca_simple_calculate_hmac(digest_algo.value, sym_key, message_param, len(message), _ctypes.byref(mac), _ctypes.byref(mac_length)) mac_bytes = mac[:mac_length.value] _lib.yaca_free(mac) return mac_bytes
34,863
def convert_from_sliced_object(data): """Fix the memory of multi-dimensional sliced object.""" if isinstance(data, np.ndarray) and isinstance(data.base, np.ndarray): if not data.flags.c_contiguous: _log_warning("Usage of np.ndarray subset (sliced data) is not recommended " "due to it will double the peak memory cost in LightGBM.") return np.copy(data) return data
34,864
def write_file(conf, data): """Write the data to the file specified in the conf. If there is an existing file in the destination, compare the new contents with the existing contents. Return True if there is a difference. """ owner = conf.get('owner') # Check for user and group id in the environment. try: uid = pwd.getpwnam(owner).pw_uid except KeyError: LOG.error('The specified user does not exist: {}'.format(owner)) sys.exit(1) try: gid = pwd.getpwnam(owner).pw_gid except KeyError: LOG.error('The specified group does not exist: {}'.format(owner)) sys.exit(1) dest = conf.get('dest') perm = int(conf.get('perm', 0)) with tempfile.NamedTemporaryFile(prefix='kolla-mesos', delete=False) as tf: tf.write(data.encode('utf-8')) tf.flush() tf_name = tf.name if os.path.exists(dest) and filecmp.cmp(tf_name, dest, shallow=False): LOG.debug('write_file: %s not changed', dest) return False try: inst_cmd = ' '.join(['sudo', 'install', '-v', '--no-target-directory', '--group=%s' % gid, '--mode=%s' % perm, '--owner=%s' % uid, tf_name, dest]) subprocess.check_call(inst_cmd, shell=True) except subprocess.CalledProcessError as exc: LOG.error(exc) LOG.exception(inst_cmd) return True
34,865
def merge_to_media(apps, schema_editor): """ Migration step that will merge the CarbonSource table and the "Carbon Source (workaround)" metadata, into the "Media" metadata. The values merge with newlines between any existing values, and renders the CarbonSource table as a line each for name, description, and labeling fields. """ Line = apps.get_model("main", "Line") MetadataType = apps.get_model("main", "MetadataType") media = MetadataType.objects.get(uuid="463546e4-a67e-4471-a278-9464e78dbc9d") # first move the table data qs = Line.objects.filter(carbon_source__isnull=False) qs = qs.prefetch_related("carbon_source") for line in qs: media_def = [stringify_carbon_source(cs) for cs in line.carbon_source.all()] media_def.insert(0, extract_metadata(line, media)) set_metadata(line, media, "\n".join(filter(bool, media_def))) line.carbon_source.clear() line.save() # next move workaround data try: workaround = MetadataType.objects.get( uuid="814ab824-3cda-49cb-b838-904236720041" ) qs = Line.objects.filter(metadata__has_key=f"{workaround.pk}") for line in qs: media_def = [ extract_metadata(line, media), extract_metadata(line, workaround), ] set_metadata(line, media, "\n".join(filter(bool, media_def))) line.save() workaround.delete() except MetadataType.DoesNotExist: # don't care about merging if the type does not exist anyway pass # delete the built-in metadata MetadataType.objects.filter(uuid="4ddaf92a-1623-4c30-aa61-4f7407acfacc").delete()
34,866
def stat_helper(path): """os.path.exists will return None for PermissionError (or any other exception) , leading us to believe a file is not present when it, in fact, is. This is behavior is awful, so stat_helper preserves any exception other than FileNotFoundError. """ try: return path.stat() except FileNotFoundError: return None
34,867
def create_beacon_and_now_datetime( game_name: str = "st", waiting_time: float = 12.0, platform_name: str = "pc" ) -> Tuple[beacons.BeaconBase, datetime.datetime]: """Return a BeaconBase instance with start time to current time.""" now = datetime.datetime.now(datetime.timezone.utc) \ .replace(microsecond=0) beacon = create_beacon(game_name=game_name, waiting_time=waiting_time, platform_name=platform_name, start=now) return beacon, now
34,868
def validate(number, check_country=True): """Checks to see if the number provided is a valid IBAN. The country- specific check can be disabled with the check_country argument.""" number = compact(number) # ensure that checksum is valid mod_97_10.validate(number[4:] + number[:4]) # look up the number info = _ibandb.info(number) # check if the bban part of number has the correct structure bban = number[4:] if not _struct_to_re(info[0][1].get('bban', '')).match(bban): raise InvalidFormat() # check the country-specific module if it exists if check_country: module = _get_cc_module(number[:2]) if module: module.validate(number) # return the compact representation return number
34,869
def read_group(fname): """Reads the symmetry group in from the 'rot_perms' styled group output by enum.x. :arg fname: path to the file to read the group from. """ i=0 groupi = [] with open(fname) as f: for line in f: if i > 5: if ('Perm #:') in line: groupi.append(list(map(int, line.split()[4::]))) else: groupi[-1] += list(map(int, line.split())) i += 1 from numpy import array return(list(map(list, array(groupi)-1)))
34,870
def inject_path(path): """ Imports :func: from a python file at :path: and executes it with *args, **kwargs arguments. Everytime this function is called the module is reloaded so that you can alter your debug code while the application is running. The result of the function is returned, otherwise the exception is returned (if one is raised) """ try: dirname = os.path.dirname(path) if dirname not in sys.path: exists_in_sys = False sys.path.append(dirname) else: exists_in_sys = True module_name = os.path.splitext(os.path.split(path)[1])[0] if module_name in sys.modules: reload(sys.modules[module_name]) else: __import__(module_name) if not exists_in_sys: sys.path.remove(dirname) except Exception as e: return e
34,871
def install_and_import_module(module_name, package_name=None, global_name=None): """ Installs the package through pip and attempts to import the installed module. :param module_name: Module to import. :param package_name: (Optional) Name of the package that needs to be installed. If None it is assumed to be equal to the module_name. :param global_name: (Optional) Name under which the module is imported. If None the module_name will be used. This allows to import under a different name with the same effect as e.g. "import numpy as np" where "np" is the global_name under which the module can be accessed. :raises: subprocess.CalledProcessError and ImportError """ if package_name is None: package_name = module_name if global_name is None: global_name = module_name # Blender disables the loading of user site-packages by default. However, pip will still check them to determine # if a dependency is already installed. This can cause problems if the packages is installed in the user # site-packages and pip deems the requirement satisfied, but Blender cannot import the package from the user # site-packages. Hence, the environment variable PYTHONNOUSERSITE is set to disallow pip from checking the user # site-packages. If the package is not already installed for Blender's Python interpreter, it will then try to. # The paths used by pip can be checked with `subprocess.run([bpy.app.binary_path_python, "-m", "site"], check=True)` # Store the original environment variables environ_orig = dict(os.environ) os.environ["PYTHONNOUSERSITE"] = "1" try: # Try to install the package. This may fail with subprocess.CalledProcessError subprocess.run([bpy.app.binary_path_python, "-m", "pip", "install", package_name], check=True) finally: # Always restore the original environment variables os.environ.clear() os.environ.update(environ_orig) # The installation succeeded, attempt to import the module again import_module(module_name, global_name)
34,872
def find_last_layer(model): """ Find last layer. Args: model (_type_): Model. Returns: _type_: Last layer. """ for layer in reversed(model.layers): return layer
34,873
def _update_color_state(widget, state): """Update the colors on `widget`, depending on a given state. Args: widget (:class:`PySide.QtGui.QWidget`): The widget to change. state (:class:`PySide.QtGui.QValidator.State`): A state to display as a new color. Raises: ValueError: If `state` isn't a valid option for this function. """ try: style = _STATE_STYLE_OPTIONS[state] except KeyError: raise ValueError( 'State "{state}" was invalid. Options were, "{_STATE_STYLE_OPTIONS}".' "".format(state=state, _STATE_STYLE_OPTIONS=sorted(_STATE_STYLE_OPTIONS,)) ) widget.setProperty("state", style) style = widget.style() style.unpolish(widget) style.polish(widget)
34,874
def seconds_to_time(sec): """ Convert seconds into time H:M:S """ return "%02d:%02d" % divmod(sec, 60)
34,875
def check_dmd_computation_simple_timeseries(exact, total): """ Check DMD computations on a problem where the true solution is known. All variants of DMD should give identical outputs. """ num_snapshots = 10 A = np.array([[0.9, 0.1], [0.0, 0.8]]) # Create a time series of data data = np.zeros((2, num_snapshots+1)) data[:, 0] = np.random.randn(2) for ii in range(num_snapshots): data[:, ii + 1] = A.dot(data[:, ii]) X = data[:, :-1] Y = data[:, 1:] ADMD = Y.dot(np.linalg.pinv(X)) vals, vecs = np.linalg.eig(ADMD) inds = np.argsort(np.abs(vals))[::-1] vals = vals[inds] vecs = vecs[:, inds] # DMD class with rank 2 DMD = dmdtools.DMD(2, exact, total) DMD.fit(data) dmd_vals, dmd_modes = dmdtools.sort_modes_evals(DMD) for ii in range(len(vals)): assert np.abs(dmd_vals[ii] - vals[ii]) < 1e-10 check_eigenvectors(vecs[:, ii], dmd_modes[:, ii])
34,876
def _compute_nfp_real(l, u, counts, sizes): """Computes the expected number of false positives caused by using u to approximate set sizes in the interval [l, u], using the real set size distribution. Args: l: the lower bound on set sizes. u: the upper bound on set sizes. counts: the complete distribution of set sizes. sizes: the complete domain of set sizes. Return (float): the expected number of false positives. """ if l > u: raise ValueError("l must be less or equal to u") return np.sum((float(sizes[u])-sizes[l:u+1])/float(sizes[u])*counts[l:u+1])
34,877
def tail_log(): """ Watch the tilestache logs """ run("sudo tail -f /var/log/uwsgi/app/tilestache.log")
34,878
def is_equal_subset( subset: Union[Dict, List, Set], superset: Union[Dict, List, Set] ) -> bool: """determine if all shared keys have equal value""" if isinstance(subset, dict): return all( key in superset and is_equal_subset(val, superset[key]) for key, val in subset.items() ) if isinstance(subset, list) or isinstance(subset, set): return all( any(is_equal_subset(subitem, superitem) for superitem in superset) for subitem in subset ) # assume that subset is a plain value if none of the above match return subset == superset
34,879
def test_main_help(monkeypatch, usage, capsys): """Test the help command without argument.""" monkeypatch.setattr(sys, "argv", ["foo", "help"]) main.main() result_lines = capsys.readouterr().out.splitlines() assert len(result_lines) == len(usage)
34,880
def concatenate_over(argname): """Decorator to "vectorize" functions and concatenate outputs """ def _prepare_args(arg_map, value): params = copy(arg_map) params[argname] = value return params @decorator def _concatenate_over(func, *args, **kwargs): """Validate that an agument is a proportion [0, 1.0] """ arg_map = map_parameters_in_fn_call(args, kwargs, func) value = arg_map.get(argname) if isinstance(value, list): return list( itertools.chain.from_iterable( func(**_prepare_args(arg_map, v)) for v in value)) else: return func(**arg_map) return _concatenate_over
34,881
def checkFile(path: str): """ Checks if a file exists, exists program if not readable Only used if a file needs to exist """ if not os.path.exists(path): print('File: "' + path + '", is not readable.') exit(0) return path
34,882
def logicalToPhysicalPoint(window, x, y): """Converts the logical coordinates of a point in a window to physical coordinates. This should be used when points are received directly from a window that is not DPI aware. @param window: The window handle. @param x: The logical x coordinate. @type x: int @param y: The logical y coordinate. @type y: int @return: The physical x and y coordinates. @rtype: tuple of (int, int) """ if not _logicalToPhysicalPoint: return x, y point = ctypes.wintypes.POINT(x, y) _logicalToPhysicalPoint(window, ctypes.byref(point)) return point.x, point.y
34,883
def _node2vec_walks(Tdata, Tindptr, Tindices, sampling_nodes, walklen, return_weight, neighbor_weight): """ Create biased random walks from the transition matrix of a graph in CSR sparse format. Bias method comes from Node2Vec paper. Parameters ---------- Tdata : 1d np.array CSR data vector from a sparse matrix. Can be accessed by M.data Tindptr : 1d np.array CSR index pointer vector from a sparse matrix. Can be accessed by M.indptr Tindices : 1d np.array CSR column vector from a sparse matrix. Can be accessed by M.indices sampling_nodes : 1d np.array of int List of node IDs to start random walks from. Is generally equal to np.arange(n_nodes) repeated for each epoch walklen : int length of the random walks return_weight : float in (0, inf] Weight on the probability of returning to node coming from Having this higher tends the walks to be more like a Breadth-First Search. Having this very high (> 2) makes search very local. Equal to the inverse of p in the Node2Vec paper. explore_weight : float in (0, inf] Weight on the probability of visitng a neighbor node to the one we're coming from in the random walk Having this higher tends the walks to be more like a Depth-First Search. Having this very high makes search more outward. Having this very low makes search very local. Equal to the inverse of q in the Node2Vec paper. Returns ------- out : 2d np.array (n_walks, walklen) A matrix where each row is a biased random walk, and each entry is the ID of the node """ n_walks = len(sampling_nodes) res = np.empty((n_walks, walklen), dtype=Tindices.dtype) for i in numba.prange(n_walks): # Current node (each element is one walk's state) state = sampling_nodes[i] res[i, 0] = state # Do one normal step first state = _node2vec_first_step(state, Tdata, Tindices, Tindptr) for k in range(1, walklen-1): # Write state res[i, k] = state state = _node2vec_inner( res, i, k, state, Tdata, Tindices, Tindptr, return_weight, neighbor_weight ) # Write final states res[i, -1] = state return res
34,884
def send_top(update: Update, context: CallbackContext) -> None: """Process incoming message, send top tracks by the given artist or send an error message.""" logger.info( f'(send_top) Incoming message: args={context.args}, text="{update.message.text}"' ) keyphrase = update.message.text context.bot.send_chat_action( chat_id=update.message.chat_id, action=ChatAction.TYPING ) try: top = asyncio.run(get_top(keyphrase)) except PlaylistRetrievalError as e: logger.error(e) context.bot.send_message( chat_id=update.message.chat_id, text=f"An error occurred, most likely I couldn't find this artist on Last.fm." f"\nMake sure this name is correct.", ) except VideoIDsRetrievalError as e: logger.error(e) context.bot.send_message( chat_id=update.message.chat_id, text=f"Unable to get videos from YouTube." ) except Exception as e: logger.exception(e) context.bot.send_message( chat_id=update.message.chat_id, text=f"Unexpected error, feel free to open an issue on GitHub: " f"github.com/pltnk/toptracksbot/issues/new", ) else: if top: for youtube_id in top: context.bot.send_message( chat_id=update.message.chat_id, text=f"youtube.com/watch?v={youtube_id}", ) else: context.bot.send_message( chat_id=update.message.chat_id, text=f"I couldn't find videos of {keyphrase} on YouTube.", )
34,885
def update(params): """Handles the 'change' operation for modifying a file. Expected flags in 'params' are translated to Json Field names to identify modifications to be made""" dsid = params.get("--dsid", "missing_id") fileid = params.get("--fileid", "missing_id") expectedArgs = {'--catid': 'target_category_id'} kwargs = translate_flags(expectedArgs, params) kwargs["action"] = "change_category" rsp = server.files.action(dsid, fileid, **kwargs) if rsp is None: reportApiError(server, f"Failure attempting to change file id '{fileid}' in dataset '{dsid}'") else: reportSuccess(server, f"Changed file id '{fileid}' in dataset '{dsid}'")
34,886
def turn(direction): """Send the string provided to this function the the fifo to steer the robot in the correct direction """ fifo = open(FIFO, "w", 0) #open the file without buffering; hence the 0 fifo.write(direction) fifo.close() time.sleep(wait) #ensure the reader process can read it before we write again time.sleep(wait)
34,887
def schedule_prettify(schedule): """ Принимает на вход расписание в формате: [День недели, Время, Тип занятия, Наименование занятия, Имя преподавателя, Место проведения] Например: ['Чт', '13:00 – 14:30', 'ПЗ', 'Физическая культура', '', 'Кафедра'] """ if not schedule: return 'Сегодня занятий нету' else: bot_message = '' time = '⌚ ' + schedule[1] + '\n' if schedule[2]: schedule_type = schedule[2] else: schedule_type = '' if schedule[3]: subject = '📝 ' + schedule[-3] + '\n' else: subject = '📝 ' + schedule_type + '\n' if schedule[4]: teacher = '👤 ' + schedule[4] + '\n' else: teacher = '' if schedule[5]: location = '📍 ' + schedule[5] + '\n' else: location = '' bot_message += teacher + subject + time + location + '\n' return bot_message
34,888
def autodelegate(prefix=''): """ Returns a method that takes one argument and calls the method named prefix+arg, calling `notfound()` if there isn't one. Example: urls = ('/prefs/(.*)', 'prefs') class prefs: GET = autodelegate('GET_') def GET_password(self): pass def GET_privacy(self): pass `GET_password` would get called for `/prefs/password` while `GET_privacy` for `GET_privacy` gets called for `/prefs/privacy`. If a user visits `/prefs/password/change` then `GET_password(self, '/change')` is called. """ def internal(self, arg): if '/' in arg: first, rest = arg.split('/', 1) func = prefix + first args = ['/' + rest] else: func = prefix + arg args = [] if hasattr(self, func): try: return getattr(self, func)(*args) except TypeError: return notfound() else: return notfound() return internal
34,889
def _compute_new_static_size(image, min_dimension, max_dimension): """Compute new static shape for resize_to_range method.""" image_shape = image.get_shape().as_list() orig_height = image_shape[0] orig_width = image_shape[1] num_channels = image_shape[2] # Scale factor such that maximal dimension is at most max_dimension orig_max_dim = max(orig_height, orig_width) small_scale_factor = max_dimension / float(orig_max_dim) # if this factor is less than 1 we have to act! # Scale factor such that minimal dimension is at least min_dimension orig_min_dim = min(orig_height, orig_width) large_scale_factor = min_dimension / float(orig_min_dim) # If image is already big enough... do nothing large_scale_factor = max(large_scale_factor, 1.0) # Take the minimum (we ensure that maxdim is not exceeded and if possible min_dim is met also) scale_factor = min(small_scale_factor, large_scale_factor) new_height = int(round(orig_height * scale_factor)) new_width = int(round(orig_width * scale_factor)) new_size = [new_height, new_width] return tf.constant(new_size + [num_channels])
34,890
def deepupdate(original, update): """ Recursively update a dict. Subdict's won't be overwritten but also updated. """ for key, value in original.items(): if key not in update: update[key] = value elif isinstance(value, dict): deepupdate(value, update[key]) return update
34,891
def set_gae_attributes(span): """Set the GAE environment common attributes.""" for env_var, attribute_key in GAE_ATTRIBUTES.items(): attribute_value = os.environ.get(env_var) if attribute_value is not None: pair = {attribute_key: attribute_value} pair_attrs = Attributes(pair)\ .format_attributes_json()\ .get('attributeMap') _update_attr_map(span, pair_attrs)
34,892
def _omega_spectrum_odd_c(n, field): """Spectra of groups \Omega_{2n+1}(q) for odd q. [1, Corollary 6] """ n = (n - 1) // 2 q = field.order p = field.char # (1) t = (q ** n - 1) // 2 a1 = [t, t + 1] # (2) a2 = SemisimpleElements(q, n, min_length=2) # (3) k = 1 a3 = [] while True: n1 = n - (p ** (k - 1) + 1) // 2 if n1 < 1: break t = (q ** n1 - 1) // 2 a3.extend([t * p ** k, (t + 1) * p ** k]) k += 1 # (4) a4 = MixedElements(q, n, lambda k: (p ** (k - 1) + 1) // 2, lambda k: p ** k, min_length=2) # (5) k = numeric.get_exponent(2 * n - 1, p) a5 = [] if k is None else [p * (2 * n - 1)] return itertools.chain(a1, a2, a3, a4, a5)
34,893
def cli() -> None: """File Sort a tool to organize images on a path. To get started, run collect: $ files_sort_out collect To show collected image folders: $ files_sort_out show To remove(exclude) directories from list run: $ files_sort_out exclude <path> Then copy files to a new location: $ files_sort_out copy <path> Or move files to a new location: $ files_sort_out move <path> To find files duplicates run: $ files_sort_out duplicate <path> """ pass
34,894
def get_service(hass, config, discovery_info=None): """Get the ClickSend notification service.""" if not _authenticate(config): _LOGGER.error("You are not authorized to access ClickSend") return None return ClicksendNotificationService(config)
34,895
def mock_features_dtypes(num_rows=100): """Internal function that returns the default full dataset. :param num_rows: The number of observations in the final dataset. Defaults to 100. :type num_rows: int, optional :return: The dataset with all columns included. :rtype tuple: (str, str) """ fake = Faker() def _remove_x_from_number(phone): if "x" in phone: phone = phone[: phone.find("x")] return phone phone_numbers = pd.Series([fake.phone_number() for _ in range(num_rows)]) phone_numbers = phone_numbers.apply(_remove_x_from_number) def _remove_newline_from_address(address): address = address.replace("\n", ", ") return address addresses = pd.Series([fake.address() for _ in range(num_rows)]) addresses = addresses.apply(_remove_newline_from_address) dtypes_dict = { "ints": [i for i in range(-num_rows // 2, num_rows // 2)], "rand_ints": np.random.choice([i for i in range(-5, 5)], num_rows), "floats": [float(i) for i in range(-num_rows // 2, num_rows // 2)], "rand_floats": np.random.uniform(low=-5.0, high=5.0, size=num_rows), "booleans": np.random.choice([True, False], num_rows), "categoricals": np.random.choice( ["First", "Second", "Third", "Fourth"], num_rows ), "dates": pd.date_range("1/1/2001", periods=num_rows), "texts": [ f"My children are miserable failures, all {i} of them!" for i in range(num_rows) ], "ints_nullable": np.random.choice( [i for i in range(-10 // 2, 10 // 2)] + [pd.NA], num_rows ), "floats_nullable": np.random.choice( np.append([float(i) for i in range(-5, 5)], pd.NA), num_rows ), "booleans_nullable": np.random.choice([True, False, None], num_rows), "full_names": pd.Series([fake.name() for _ in range(num_rows)]), "phone_numbers": phone_numbers, "addresses": addresses, "countries": pd.Series([fake.country() for _ in range(num_rows)]), "email_addresses": pd.Series( [fake.ascii_free_email() for _ in range(num_rows)] ), "urls": pd.Series([fake.url() for _ in range(num_rows)]), "currencies": pd.Series([fake.pricetag() for _ in range(num_rows)]), "file_paths": pd.Series([fake.file_path(depth=3) for _ in range(num_rows)]), "ipv4": pd.Series([fake.ipv4() for _ in range(num_rows)]), "ipv6": pd.Series([fake.ipv6() for _ in range(num_rows)]), "lat_longs": pd.Series([fake.latlng() for _ in range(num_rows)]), } return dtypes_dict
34,896
def random_bitstring(n, p, failcount=0): """ Constructs a random bitstring of length n with parity p Parameters ---------- n : int Number of bits. p : int Parity. failcount : int, optional Internal use only. Returns ------- numpy.ndarray """ bitstring = _np.random.randint(0, 2, size=n) if _np.mod(sum(bitstring), 2) == p: return bitstring elif failcount < 100: return _np.array(random_bitstring(n, p, failcount + 1), dtype='int')
34,897
def serializer(message): """serializes the message as JSON""" return json.dumps(message).encode('utf-8')
34,898
def send_mail(request, format=None): """ Send mail to admin """ # serialize request data serializer = MailSerializer(data=request.data) if serializer.is_valid(): try: # create data for mail subject = settings.EMAIL_SUBJECT.format( first_name=request.data["first_name"], last_name=request.data["last_name"], ) msg = request.data["message"] email_from = request.data["email_from"] # send mail EmailMessage(subject, msg, email_from, [settings.EMAIL_TO]).send() # save mail instance serializer.save( owner=request.user, email_to=settings.EMAIL_TO, host_ip=request.META["REMOTE_ADDR"], ) return Response(serializer.data, status=status.HTTP_201_CREATED) except Exception: pass return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
34,899