content
stringlengths
22
815k
id
int64
0
4.91M
def test_sanitize_callable_params(): """Callback function are not serializiable. Therefore, we get them a chance to return something and if the returned type is not accepted, return None. """ opt = "--max_epochs 1".split(" ") parser = ArgumentParser() parser = Trainer.add_argparse_args(parent_parser=parser) params = parser.parse_args(opt) def return_something(): return "something" params.something = return_something def wrapper_something(): return return_something params.wrapper_something_wo_name = lambda: lambda: "1" params.wrapper_something = wrapper_something params = _convert_params(params) params = _flatten_dict(params) params = _sanitize_callable_params(params) assert params["gpus"] == "None" assert params["something"] == "something" assert params["wrapper_something"] == "wrapper_something" assert params["wrapper_something_wo_name"] == "<lambda>"
5,335,600
def get_url(bucket_name, filename): """ Gets the uri to the object. """ client = storage.Client() bucket = client.bucket(bucket_name) blob = bucket.blob(filename) url = blob.public_url if isinstance(url, six.binary_type): url = url.decode('utf-8') return url
5,335,601
def distance(): """ Return a random value of FRB distance, choosen from a range of observed FRB distances. - Args: None. - Returns: FRB distance in meters """ dist_m = np.random.uniform(6.4332967e24,1.6849561e26) return dist_m
5,335,602
def p10k(n, empty="-"): """ Write number as parts per ten thousand. """ if n is None or np.isnan(n): return empty elif n == 0: return "0.0‱" elif np.isinf(n): return _("inf") if n > 0 else _("-inf") return format_number(10000 * n) + "‱"
5,335,603
def output_file(filename: str, *codecs: Codec, **kwargs: Any) -> Output: """ A shortcut to create proper output file. :param filename: output file name. :param codecs: codec list for this output. :param kwargs: output parameters. :return: configured ffmpeg output. """ return Output(output_file=filename, codecs=list(codecs), **kwargs)
5,335,604
def selftest_validate_resilient_circuits_installed(attr_dict, **_): """ selftest.py validation helper method. Validates that 'resilient-circuits' is installed in the env and confirms that the version is >= constants.RESILIENT_LIBRARIES_VERSION :param attr_dict: (required) dictionary of attributes defined in ``selftest_attributes`` :type attr_dict: dict :param path_selftest_py_file: (optional) path to selftest.py :type path_selftest_py_file: str :param package_name: (optional) name of package being validated :type package_name: str :param path_package: (optional) path to package :type path_package: str :return: returns a tuple with the status of the validation and an associated SDKValidateIssue :rtype: (bool, SDKValidateIssue) """ LOG.debug("validating that 'resilient-circuits' is installed in the env...\n") res_circuits_version = sdk_helpers.get_package_version(constants.CIRCUITS_PACKAGE_NAME) if res_circuits_version and res_circuits_version >= pkg_resources.parse_version(constants.RESILIENT_LIBRARIES_VERSION): # installed and correct version return True, SDKValidateIssue( name=attr_dict.get("name"), description=attr_dict.get("pass_msg"), severity=SDKValidateIssue.SEVERITY_LEVEL_DEBUG, solution="" ) elif res_circuits_version and res_circuits_version < pkg_resources.parse_version(constants.RESILIENT_LIBRARIES_VERSION): # resilient-circuits installed but version not supported return False, SDKValidateIssue( name=attr_dict.get("name"), description=attr_dict.get("fail_msg").format(res_circuits_version), severity=attr_dict.get("severity"), solution=attr_dict.get("fail_solution") ) elif not res_circuits_version: # if 'resilient-circuits' not installed return False, SDKValidateIssue( name=attr_dict.get("name"), description=attr_dict.get("missing_msg"), severity=attr_dict.get("severity"), solution=attr_dict.get("missing_solution") ) else: # unknown other error raise SDKException("Unknown error while checking for {0}".format(constants.CIRCUITS_PACKAGE_NAME))
5,335,605
def convert_topology(topology, model_name, doc_string, target_opset, channel_first_inputs=None, options=None, remove_identity=True, verbose=0): """ This function is used to convert our Topology object defined in _parser.py into a ONNX model (type: ModelProto). :param topology: The Topology object we are going to convert :param model_name: GraphProto's name. Let "model" denote the returned model. The string "model_name" would be assigned to "model.graph.name." :param doc_string: A string attached to the produced model :param target_opset: number or dictionary, for example, 7 for ONNX 1.2, and 8 for ONNX 1.3, a dictionary is used to indicate different opset for different domains :param options: see :ref:`l-conv-options` :param remove_identity: removes identity nodes include '1.1.2', '1.2', and so on. :param verbose: displays information while converting :return: a ONNX ModelProto """ if target_opset is None: target_opset = get_latest_tested_opset_version() if isinstance(target_opset, dict): onnx_target_opset = target_opset.get( '', get_latest_tested_opset_version()) else: onnx_target_opset = target_opset if onnx_target_opset > get_opset_number_from_onnx(): found = get_opset_number_from_onnx() raise RuntimeError( "Parameter target_opset {} > {} is higher than the " "version of the installed onnx package. See " "https://github.com/onnx/onnx/blob/master/docs/" "Versioning.md#released-versions" ".".format(onnx_target_opset, found)) if onnx_target_opset > get_latest_tested_opset_version(): warnings.warn( "Parameter target_opset {} > {} is higher than the " "the latest tested version" ".".format( onnx_target_opset, get_latest_tested_opset_version())) container = ModelComponentContainer( target_opset, options=options, registered_models=topology.registered_models, white_op=topology.raw_model._white_op, black_op=topology.raw_model._black_op, verbose=verbose) # Traverse the graph from roots to leaves # This loop could eventually be parallelized. topology.convert_operators(container=container, verbose=verbose) container.ensure_topological_order() if len(container.inputs) == 0: raise RuntimeError("No detected inputs after conversion.") if len(container.outputs) == 0: raise RuntimeError("No detected outputs after conversion.") if verbose >= 2: print("---NODES---") for node in container.nodes: print(" %s - %s: %r -> %r" % ( node.op_type, node.name, node.input, node.output)) # Create a graph from its main components if container.target_opset_onnx < 9: # When calling ModelComponentContainer's add_initializer(...), # nothing is added into the input list. However, for ONNX target # opset < 9, initializers should also be a part of model's # (GraphProto) inputs. Thus, we create ValueInfoProto objects # from initializers (type: TensorProto) directly and then add # them into model's input list. extra_inputs = [] # ValueInfoProto list of the initializers for tensor in container.initializers: # Sometimes (especially when creating optional input values # such as RNN's initial hidden state), an initializer is also # one of the original model's input, so it has been added into # the container's input list. If this is the case, we need to # skip one iteration to avoid duplicated inputs. if tensor.name in [value_info.name for value_info in container.inputs]: continue # Initializers are always tensors so we can just call # make_tensor_value_info(...). value_info = make_tensor_value_info( tensor.name, tensor.data_type, tensor.dims) extra_inputs.append(value_info) # Before ONNX opset 9, initializers were needed to be passed in # with inputs. graph = make_graph(container.nodes, model_name, container.inputs + extra_inputs, container.outputs, container.initializers) else: # In ONNX opset 9 and above, initializers are included as # operator inputs and therefore do not need to be passed as # extra_inputs. graph = make_graph( container.nodes, model_name, container.inputs, container.outputs, container.initializers) # Add extra information related to the graph graph.value_info.extend(container.value_info) # Create model onnx_model = make_model(graph) # Update domain version opv = min(onnx_target_opset, _get_main_opset_version(onnx_model) or onnx_target_opset) if not _update_domain_version(container, onnx_model, verbose=verbose): # Main opset was not added. Doing it here. op_set = onnx_model.opset_import.add() op_set.domain = '' op_set.version = opv if verbose > 0: print('[convert_topology] +opset: name=%r, version=%s' % ( '', opv)) # Add extra information irv = OPSET_TO_IR_VERSION.get(opv, onnx_proto.IR_VERSION) onnx_model.ir_version = irv onnx_model.producer_name = utils.get_producer() onnx_model.producer_version = utils.get_producer_version() onnx_model.domain = utils.get_domain() onnx_model.model_version = utils.get_model_version() onnx_model.doc_string = doc_string # Removes many identity nodes, # the converter may introduct identity nodes # after a zipmap operator and onnx <= 1.7 does not # support that. It does not use onnxconverter-common # as the optimizer only support opset >= 9. if remove_identity: onnx_model = onnx_remove_node_identity(onnx_model) return onnx_model
5,335,606
def _get_intermediates(func_graph): """Returns all tensors in `func_graph` that should be accumulated.""" # We currently accumulate output tensors of most ops in the function and rely # on the pruning pass to get rid of the unused accumulators at runtime. # However, this can bloat the GraphDef and make debugging harder so we perform # some optimizations. # # Optimization we currently perform: # 1. We do not accumulate tensors which already have an accumulator # in the loop body. # 2. We do not accumulate outputs of Identity nodes. When building the # FuncGraph, we add an Identity node for each output (see # `AutomaticControlDependencies.mark_as_return`). Accumulating outputs # of all these nodes bloats the GraphDef quite a bit so we remove those. # Since the gradient of an Identity node does not rely on its forward op's # input this is safe to do. # # Other possible optimizations: # 1. Only accumulate tensors that will be required by the backward pass. # This will require running the gradient pass and hence would increase the # graph building time for the forward pass. # 2. Do not accumulate Const nodes created inside the loop body. # 3. Do not accumulate loop vars that are returned as-is just like captured # tensors. intermediates = [] reverse_captures = dict((v.ref(), k) for k, v in func_graph.captures) for op in func_graph.get_operations(): if op.type == "Identity": continue # Accumulating mutexes can cause deadlock. if op.type == "MutexLock": continue for o in op.outputs: if (o is not func_graph.inputs[0] and # Loop counter. o.dtype != dtypes.resource and # Do not accumulate resource tensors. _get_accumulator(o) is None and # Has existing accumulator. o.ref() not in reverse_captures ): # Captured value, hence loop invariant. intermediates.append(o) return intermediates
5,335,607
def multivariate_logrank_test(event_durations, groups, event_observed=None, alpha=0.95, t_0=-1, suppress_print=False, **kwargs): """ This test is a generalization of the logrank_test: it can deal with n>2 populations (and should be equal when n=2): H_0: all event series are from the same generating processes H_A: there exist atleast one group that differs from the other. Parameters: event_durations: a (n,) numpy array the (partial) lifetimes of all individuals groups: a (n,) numpy array of unique group labels for each individual. event_observed: a (n,) numpy array of event observations: 1 if observed death, 0 if censored. Defaults to all observed. alpha: the level of signifiance desired. t_0: the final time to compare the series' up to. Defaults to all. suppress_print: if True, do not print the summary. Default False. kwargs: add keywords and meta-data to the experiment summary. Returns: summary: a print-friendly summary of the statistical test p_value: the p-value test_result: True if reject the null, (pendantically) None if we can't reject the null. """ if event_observed is None: event_observed = np.ones((event_durations.shape[0], 1)) n = max(event_durations.shape) assert n == max(event_durations.shape) == max(event_observed.shape), "inputs must be of the same length." groups, event_durations, event_observed = map(lambda x: pd.Series(np.reshape(x, (n,))), [groups, event_durations, event_observed]) unique_groups, rm, obs, _ = group_survival_table_from_events(groups, event_durations, event_observed, np.zeros_like(event_durations), t_0) n_groups = unique_groups.shape[0] # compute the factors needed N_j = obs.sum(0).values n_ij = (rm.sum(0).values - rm.cumsum(0).shift(1).fillna(0)) d_i = obs.sum(1) n_i = rm.values.sum() - rm.sum(1).cumsum().shift(1).fillna(0) ev = n_ij.mul(d_i / n_i, axis='index').sum(0) # vector of observed minus expected Z_j = N_j - ev assert abs(Z_j.sum()) < 10e-8, "Sum is not zero." # this should move to a test eventually. # compute covariance matrix V_ = n_ij.mul(np.sqrt(d_i) / n_i, axis='index').fillna(1) V = -np.dot(V_.T, V_) ix = np.arange(n_groups) V[ix, ix] = V[ix, ix] + ev # take the first n-1 groups U = Z_j.ix[:-1].dot(np.linalg.pinv(V[:-1, :-1]).dot(Z_j.ix[:-1])) # Z.T*inv(V)*Z # compute the p-values and tests test_result, p_value = chisq_test(U, n_groups - 1, alpha) summary = pretty_print_summary(test_result, p_value, U, t_0=t_0, test='logrank', alpha=alpha, null_distribution='chi squared', df=n_groups - 1, **kwargs) if not suppress_print: print(summary) return summary, p_value, test_result
5,335,608
def ae(y, p): """Absolute error. Absolute error can be defined as follows: .. math:: \sum_i^n abs(y_i - p_i) where :math:`n` is the number of provided records. Parameters ---------- y : :class:`ndarray` One dimensional array with ground truth values. p : :class:`ndarray` One dimensional array with predicted values. Returns ------- float Absolute error as desribed above. """ return np.abs(y-p).sum()
5,335,609
def create_masks_from_plane(normal, dist, shape): """ Create a binary mask of given size based on a plane defined by its normal and a point on the plane (in voxel coordinates). Parameters ---------- dist: Distance of the plane to the origin (in voxel coordinates). normal: Normal of the plane (in voxel coordinates). shape: Shape of the mask that will be created. Returns ------- Binary mask of specified shape split in two by the given plane. """ grid_x, grid_y, grid_z = np.meshgrid(range(shape[0]), range(shape[1]), range(shape[2]), indexing='ij') position = np.column_stack((grid_x.ravel(order='F'), grid_y.ravel(order='F'), grid_z.ravel(order='F'))) # distance_from_plane = np.dot((position - np.transpose(point)), normal) distance_from_plane = np.dot(position, normal) + dist distance_vol = np.array(distance_from_plane).reshape((shape[0], shape[1], shape[2]), order='F') binary_mask = np.empty(distance_vol.shape, dtype=np.uint8) binary_mask[:, :, :] = distance_vol[:, :, :] >= 0 return binary_mask
5,335,610
def untokenize(tokens: List[str], lang: str = "fr") -> str: """ Inputs a list of tokens output string. ["J'", 'ai'] >>> "J' ai" Parameters ---------- lang : string language code Returns ------- string text """ d = MosesDetokenizer(lang=lang) text: str = d.detokenize(tokens, unescape=False) return text
5,335,611
def RSS_LABEL_TO_DIR(label_, is_html_): """Return the directory path to store URLs and HTML downloaded from RSS @param label_: the RSS label being crawled @param is_html_: True to return HTML directory and FALSE to return URLs directory """ bottom_dir_ = '/'.join(label_.split('-')) ret_ = None if is_html_: ret_ = os.path.join(CONST.RSS_HTML_DIR, bottom_dir_) else: ret_ = os.path.join(CONST.RSS_URLS_DIR, bottom_dir_) if not os.path.exists(ret_): os.makedirs(ret_) return ret_
5,335,612
def unwrap_cachable(func): """ Converts any HashableNodes in the argument list of a function into their standard node counterparts. """ def inner(*args, **kwargs): args, kwargs = _transform_by_type(lambda hashable: hashable.node, HashableNode, *args, **kwargs) return func(*args, **kwargs) return inner
5,335,613
def write_config(config, config_template, config_path): """Writes a new config file based upon the config template. :param config: A dict containing all the key/value pairs which should be used for the new configuration file. :param config_template: The config (jinja2-)template. :param config_path: The place to write the new config file. """ with open(config_path, 'wb') as cfg_file: cfg_file.write( config_template.render(**config).encode("utf-8") )
5,335,614
def tuples_to_full_paths(tuples): """ For a set of tuples of possible end-to-end path [format is: (up_seg, core_seg, down_seg)], return a list of fullpaths. """ res = [] for up_segment, core_segment, down_segment in tuples: if not up_segment and not core_segment and not down_segment: continue if not _check_connected(up_segment, core_segment, down_segment): continue up_iof, up_hofs, up_mtu, up_exp = _copy_segment( up_segment, False, (core_segment or down_segment)) core_iof, core_hofs, core_mtu, core_exp = _copy_segment( core_segment, up_segment, down_segment) down_iof, down_hofs, down_mtu, down_exp = _copy_segment( down_segment, (up_segment or core_segment), False, cons_dir=True) args = [] for iof, hofs in [(up_iof, up_hofs), (core_iof, core_hofs), (down_iof, down_hofs)]: if iof: args.extend([iof, hofs]) path = SCIONPath.from_values(*args) if up_segment: up_core = list(reversed(list(up_segment.iter_asms()))) else: up_core = [] if core_segment: up_core += list(reversed(list(core_segment.iter_asms()))) if_list = _build_interface_list(up_core) if down_segment: down_core = list(down_segment.iter_asms()) else: down_core = [] if_list += _build_interface_list(down_core, cons_dir=True) mtu = _min_mtu(up_mtu, core_mtu, down_mtu) exp = min(up_exp, core_exp, down_exp) path_meta = FwdPathMeta.from_values(path, if_list, mtu, exp) res.append(path_meta) return res
5,335,615
def _fileobj_to_fd(fileobj): """Return a file descriptor from a file object. Parameters: fileobj -- file object or file descriptor Returns: corresponding file descriptor Raises: ValueError if the object is invalid """ if isinstance(fileobj, int): fd = fileobj else: try: fd = int(fileobj.fileno()) except (AttributeError, TypeError, ValueError): raise ValueError('Invalid file object: {!r}'.format(fileobj) ) from None if fd < 0: raise ValueError('Invalid file descriptor: {}'.format(fd)) return fd
5,335,616
def request_changes_pull_request(pull_request=None, body_or_reason=None): """ :param pull_request: :param body_or_reason: :return: """ if not pull_request: raise ValueError("you must provide pull request") if not body_or_reason: raise ValueError("you must provide request changes comment(s)") return pull_request.create_review(event=PULL_REQUEST_EVENT_REQUEST_CHANGES, body=body_or_reason)
5,335,617
def handle( func: Callable, exception_type: Union[Type[Exception], Tuple[Type[Exception]]], *args, **kwargs ): """ Call function with errors handled in cfpm's way. Before using this function, make sure all of func's errors are known and can exit saftly after an error is raised whithout cleaning up. Args: func: The function to be called. exception_type: The type(s) of the exceptions that can be handled safely. """ try: return func(*args, **kwargs) except exception_type as e: error(e)
5,335,618
def hrnetv2_w32(**kwargs): """ HRNetV2-W32 model from 'Deep High-Resolution Representation Learning for Visual Recognition,' https://arxiv.org/abs/1908.07919. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_hrnet(version="w32", model_name="hrnetv2_w32", **kwargs)
5,335,619
def medicare_program_engagement(): """ Produces a wide dataset at the NPI level that shows when a provider entered and exited the three different medicare databases: Part B, Part D, and Physician Compare """ from .utils.globalcache import c partd = part_d_files(summary=True, usecols=['npi', 'total_claim_count']) partd_engage = (partd.assign(PartD_Max_Year=lambda df: df.Year, PartD_Min_Year=lambda df: df.Year) .groupby('npi', as_index=False) .agg({'PartD_Min_Year': min, 'PartD_Max_Year': max}) ) partb = part_b_files(summary=True, columns=['National Provider Identifier', 'Number of Medicare Beneficiaries']) partb_engage = (partb.assign(PartB_Max_Year=lambda df: df.Year, PartB_Min_Year=lambda df: df.Year) .groupby('National Provider Identifier', as_index=False) .agg({'PartB_Min_Year': min, 'PartB_Max_Year': max}) .rename(columns={'National Provider Identifier': 'npi'})) pc = c.physician_compare_select_vars([], drop_duplicates=False, date_var=True) pc_engage = (pc.assign(Year=pc.date.dt.year) .drop(columns='date') .drop_duplicates()) pc_engage = (pc_engage.assign(PC_Max_Year=lambda df: df.Year, PC_Min_Year=lambda df: df.Year) .groupby('NPI', as_index=False) .agg({'PC_Min_Year': min, 'PC_Max_Year': max}) .rename(columns={'NPI': 'npi'})) df = (pc_engage .merge(partd_engage, how='outer') .merge(partb_engage, how='outer') .convert_dtypes({x: 'Int64' for x in pc_engage.columns})) df.loc[((df.PC_Max_Year == 2020) | (df.PartD_Max_Year == 2017) | (df.PartB_Max_Year == 2017)) & ~((df.PartD_Max_Year.notnull() & df.PartB_Max_Year.notnull() & (df.PC_Max_Year < 2020))), 'maybe_active'] = True df = df.assign(maybe_active=df.maybe_active.fillna(False)) df.loc[df.PC_Max_Year == 2020, 'active_2020'] = True df = df.assign(active_2020=df.active_2020.fillna(False)) return df
5,335,620
def _is_bumf(value): """ Return true if this value is filler, en route to skipping over empty lines :param value: value to check :type value: object :return: whether the value is filler :rtype: bool """ if type(value) in (unicode, str): return value.strip() == '' return value is None
5,335,621
def pos_tag(words, engine="unigram", corpus="orchid"): """ Part of Speech tagging function. :param list words: a list of tokenized words :param str engine: * unigram - unigram tagger (default) * perceptron - perceptron tagger * artagger - RDR POS tagger :param str corpus: * orchid - annotated Thai academic articles * pud - Parallel Universal Dependencies (PUD) treebanks :return: returns a list of labels regarding which part of speech it is """ if not words: return [] if engine == "perceptron": from .perceptron import tag as tag_ elif engine == "artagger": def tag_(words, corpus=None): if not words: return [] from artagger import Tagger words_ = Tagger().tag(" ".join(words)) return [(word.word, word.tag) for word in words_] else: # default, use "unigram" ("old") engine from .unigram import tag as tag_ return tag_(words, corpus=corpus)
5,335,622
def derive(control): """ gui.derive will be removed after mGui 2.2; for now it's going to issue a deprecation warning and call `wrap()` """ warnings.warn("gui.derive() should be replaced by gui.wrap()", PendingDeprecationWarning) return wrap(control)
5,335,623
def test_which_set(): """Test which_set selector.""" skip_if_no_sklearn() # one label this_yaml = test_yaml_which_set % {'which_set': 'train'} trainer = yaml_parse.load(this_yaml) trainer.main_loop() # multiple labels this_yaml = test_yaml_which_set % {'which_set': ['train', 'test']} trainer = yaml_parse.load(this_yaml) trainer.main_loop() # improper label (iterator only returns 'train' and 'test' subsets) this_yaml = test_yaml_which_set % {'which_set': 'valid'} try: trainer = yaml_parse.load(this_yaml) trainer.main_loop() raise AssertionError except ValueError: pass # bogus label (not in approved list) this_yaml = test_yaml_which_set % {'which_set': 'bogus'} try: yaml_parse.load(this_yaml) raise AssertionError except ValueError: pass
5,335,624
def harmonizeClassifiedSamples(species,reference_exp_file, query_exp_file, classification_file,fl=None): """ The goal of this function is to take LineageProfilerIterative classified samples to a reference matrix, combine the reference matrix and the query matrix at the gene symbol level, retain the original reference column and row orders, then re-order the query samples within the context of the reference samples the correlations were derived from. In doing so, the harmonization occurs by attempting to map the expression values within a common numerical format to minimize dataset specific effects (folds, non-log expression). The function culminates in a heatmap showing the genes and re-ordered samples in thier listed orders. Outlier samples with low correlations will ultimately need to be represented outside of the reference sample continuum. The function attempts to propagate sample group labels from the reference and query sets (if avaialble, in the sample headers or in a groups file), and indicate original gene and column clusters if available. We call this approach cellHarmony. """ """ Alternative description: To represent new cells/samples within the continuum of established scRNA-Seq profiles we have developed a robust and fast data harmonization function. This data harmonization approach applies a k-nearest neighbor classification approach to order submitted cells along the continuum of reference gene expression profiles, without alternating the order of the reference cells and cluster groups in original reference matrix. The established group labels are in effect propagated to new queried samples. The final data is represented in an intuitive queriable heatmap and dimensionality reduction scatter plot (t-SNE, PCA). To harmonize the input exp. data, the input file formats are emperically determined (log, non-log, log-fold) and standardized to log2 expression. Submitted FASTQ files to AltAnalyze will be optionally pseudoaligned and normalized to maximize compatibility with the existing reference datasets. Alternative classification algoriths and options for submission of new reference sets will be supported. Issues may arrise in the expression harmonization step, however, profiles are already biased towards genes with cell population restricted expression, largely abrogating this problem. """ ### Set the user or default options for cellHarmony Differential Expression Analyses try: pearsonThreshold=fl.PearsonThreshold() except: pearsonThreshold = 0.1 try: peformDiffExpAnalysis=fl.PeformDiffExpAnalysis() except: peformDiffExpAnalysis = True try: use_adjusted_pval=fl.UseAdjPvalue() except: use_adjusted_pval = False try: pvalThreshold=float(fl.PvalThreshold()) except: pvalThreshold = 0.05 try: FoldCutoff = fl.FoldCutoff() except: FoldCutoff = 1.5 customLabels = None try: if len(fl.Labels())>0: customLabels = fl.Labels() except: pass ### Output the alignment results and perform the differential expression analysis output_file,query_output_file,folds_file,DEGs_combined = importAndCombineExpressionFiles(species,reference_exp_file, query_exp_file,classification_file,pearsonThreshold=pearsonThreshold,peformDiffExpAnalysis=peformDiffExpAnalysis, pvalThreshold=pvalThreshold,fold_cutoff=FoldCutoff,use_adjusted_pval=use_adjusted_pval,customLabels=customLabels) output_dir = export.findParentDir(output_file) if len(folds_file)<1: """ If performDiffExp==False, see if a prior folds file exists """ folds_file = string.replace(output_file,'-ReOrdered','-AllCells-folds') ### Output the cellHarmony heatmaps from visualization_scripts import clustering row_method = None; row_metric = 'cosine'; column_method = None; column_metric = 'euclidean'; color_gradient = 'yellow_black_blue' transpose = False; Normalize='median' if runningCommandLine: display = False else: display = True display = False print 'Exporting cellHarmony heatmaps...' heatmaps_dir = output_dir+'/heatmaps/' try: os.mkdir(heatmaps_dir) except: pass try: graphics = clustering.runHCexplicit(query_output_file, [], row_method, row_metric, column_method, column_metric, color_gradient, transpose, Normalize=Normalize, contrast=5, display=display) plot = graphics[-1][-1][:-4]+'.pdf' file = graphics[-1][-1][:-4]+'.txt' shutil.copy(plot,output_dir+'/heatmaps/heatmap-query-aligned.pdf') shutil.copy(file,output_dir+'/heatmaps/heatmap-query-aligned.txt') graphics = clustering.runHCexplicit(output_file, [], row_method, row_metric, column_method, column_metric, color_gradient, transpose, Normalize=Normalize, contrast=5, display=display) plot = graphics[-1][-1][:-4]+'.pdf' file = graphics[-1][-1][:-4]+'.txt' shutil.copy(plot,output_dir+'/heatmaps/heatmap-all-cells-combined.pdf') shutil.copy(file,output_dir+'/heatmaps/heatmap-all-cells-combined.txt') except: print traceback.format_exc() zscore = True graphics=[] transpose='no' try: fl.setSpecies(species); fl.setVendor("3'array") except: import UI fl = UI.ExpressionFileLocationData(folds_file,'','','') fl.setSpecies(species); fl.setVendor("3'array") fl.setOutputDir(output_dir) try: platform=platform except: platform = 'RNASeq' ### Build-UMAP plot import UI import warnings warnings.filterwarnings('ignore') try: try: os.mkdir(fl.OutputDir()+'/UMAP-plots') except: pass """ Output UMAP combined plot colored by reference and query cell identity """ plot = UI.performPCA(output_file, 'no', 'UMAP', False, None, plotType='2D', display=False, geneSetName=None, species=species, zscore=False, reimportModelScores=False, separateGenePlots=False, returnImageLoc=True) plot = plot[-1][-1][:-4]+'.pdf' shutil.copy(plot,fl.OutputDir()+'/UMAP-plots/UMAP-query-vs-ref.pdf') """ Output UMAP combined plot colored by cell tates """ plot = UI.performPCA(output_file, 'no', 'UMAP', False, None, plotType='2D', display=False, geneSetName=None, species='Mm', zscore=False, reimportModelScores=True, separateGenePlots=False, returnImageLoc=True, forceClusters=True) plot = plot[-1][-1][:-4]+'.pdf' shutil.copy(plot,fl.OutputDir()+'/UMAP-plots/UMAP-query-vs-ref-clusters.pdf') """ Output individual UMAP plots colored by cell tates """ groups_file = string.replace(output_file,'exp.','groups.') plots = UI.performPCA(output_file, 'no', 'UMAP', False, None, plotType='2D', display=False, geneSetName=None, species='Mm', zscore=False, reimportModelScores=True, separateGenePlots=False, returnImageLoc=True, forceClusters=True, maskGroups=groups_file) for plot in plots: plot = plot[-1][:-4]+'.pdf' if '-cellHarmony-Reference-' in plot: shutil.copy(plot,fl.OutputDir()+'/UMAP-plots/UMAP-ref-clusters.pdf') else: shutil.copy(plot,fl.OutputDir()+'/UMAP-plots/UMAP-query-clusters.pdf') except: try: print traceback.format_exc() print 'UMAP error encountered (dependency not met), trying t-SNE' UI.performPCA(output_file, 'no', 't-SNE', False, None, plotType='2D', display=False, geneSetName=None, species=species, zscore=True, reimportModelScores=False, separateGenePlots=False, returnImageLoc=True) except: pass useMarkerFinder=False ### Run MarkerFinder if len(DEGs_combined) and useMarkerFinder: exportCombinedMarkerFinderResults(species,platform,fl,folds_file,DEGs_combined) elif len(DEGs_combined): exportPvalueRankedGenes(species,platform,fl,folds_file,DEGs_combined) ### Cleanup directory cleanupOutputFolder(output_dir)
5,335,625
def convert_to_reduced_row_echelon_form(matrix): """ Runs the Gaussian elimination algorithm on the provided matrix, which converts it into an equivalent reduced-row echelon form. This makes it much easier to solve. Note that this will be a "mod-2" version of the Gaussian elimination algorithm, since we're dealing with bit strings instead of regular vectors and matrices for this problem. Parameters: matrix (list[list[bool]]): The matrix to convert. This function assumes that the matrix is for a set of equations where Mx = 0. Note that because of this, and because this is a mod-2 Gaussian elimination, you don't need to include the solution vector (the "= 0" part) in the matrix - it just needs to be a collection of input strings. Remarks: The fact that this is a mod-2 version of Gaussian elimination makes it a lot easier than the normal version. Basically it means the row multiplication step doesn't matter (since the only possible multiplication value is 1, which doesn't do anything) and row addition step just turns into a bitwise XOR for each term in the rows. Also, since we know that each equation is of the form (X · S) % 2 = 0, we can drop the output column entirely. It will always start as a 0, and 0 XOR 0 is always 0, so no matter what the input rows are, it will always be 0 and thus doesn't matter at all. This discussion on the math StackExchange has a good summary of the differences in the mod-2 world: https://math.stackexchange.com/a/45348 """ height = len(matrix) width = len(matrix[0]) current_row = 0 for column_index in range(0, width): # Find the first row that has a 1 in the target column, # ignoring rows we've already processed pivot_row = find_pivot_row(matrix, column_index, current_row) if pivot_row == -1: continue # If it's lower than the current row we're looking at, # flip the two if pivot_row > current_row: swap_rows(matrix, pivot_row, current_row) # Reduce all of the trailing rows by XORing them with # this one. reduce_rows(matrix, current_row, column_index) # Move onto the next row, but if we're out of rows, then # we're done here. current_row += 1 if current_row == height: return
5,335,626
def test_backward_for_binary_cmd_with_inputs_of_different_dim_and_autograd(cmd, shapes): """ Test .backward() on local tensors wrapped in an AutogradTensor (It is useless but this is the most basic example) """ a_shape, b_shape = shapes a = torch.ones(a_shape, requires_grad=True) b = torch.ones(b_shape, requires_grad=True) a = syft.AutogradTensor().on(a) b = syft.AutogradTensor().on(b) a_torch = torch.ones(a_shape, requires_grad=True) b_torch = torch.ones(b_shape, requires_grad=True) c = getattr(a, cmd)(b) c_torch = getattr(a_torch, cmd)(b_torch) ones = torch.ones(c.shape) ones = syft.AutogradTensor().on(ones) c.backward(ones) c_torch.backward(torch.ones(c_torch.shape)) assert (a.child.grad == a_torch.grad).all() assert (b.child.grad == b_torch.grad).all()
5,335,627
def hex_to_base64(hex_): """ Converts hex string to base64 """ return base64.b64encode(bytes.fromhex(hex_))
5,335,628
def test_no_match_context(current_context): """A context doesn't match if it is not "within" the pattern context.""" assert not adstrangerlib._match_context('foo', current_context)
5,335,629
def p_rareopt(p): """rareopt : TOP_OPT EQ value""" p[0] = ParseTreeNode('EQ', raw='assign') opt_node = ParseTreeNode('OPTION', raw=p[1]) if opt_node.raw in boolean_options: p[3].nodetype = 'BOOLEAN' opt_node.values.append(p[3]) p[0].add_child(opt_node) p[0].add_child(p[3])
5,335,630
def dt2iso(orig_dt): """datetime to is8601 format.""" return timeutils.isotime(orig_dt)
5,335,631
def catalog(access_token, user_id, query=None): # noqa: E501 """Query the list of all the RDF graphs&#x27; names (URIs) and the response will be JSON format. # noqa: E501 :param access_token: Authorization access token string :type access_token: dict | bytes :param user_id: the ID of the organization of the client application :type user_id: str :param query: Query GraphsSPARQL Query expression (max 1536). Note the common lowest limit for the entrie url is 2048 as the limit. The query SPARQL string must be url-encoded. The example below is not url-encoded to show the un-encoded SPARQL content. :type query: str :rtype: GraphListType """ if connexion.request.is_json: access_token = AccessToken.from_dict(connexion.request.get_json()) # noqa: E501 return 'do some magic!'
5,335,632
def _make_chrome_policy_json(): """Generates the json string of chrome policy based on values in the db. This policy string has the following form: { "validProxyServers": {"Value": map_of_proxy_server_ips_to_public_key}, "enforceProxyServerValidity": {"Value": boolean} } Returns: A json string of current chrome policy. """ proxy_servers = models.ProxyServer.query.all() proxy_server_dict = {} for server in proxy_servers: proxy_server_dict[server.ip_address] = ( server.get_public_key_as_authorization_file_string()) proxy_server_value_dict = {"Value" : proxy_server_dict} config = ufo.get_user_config() config_value_dict = {"Value" : config.proxy_server_validity} policy_dictionary = { "validProxyServers": proxy_server_value_dict, "enforceProxyServerValidity": config_value_dict, } return json.dumps(policy_dictionary)
5,335,633
def get_rgb_color(party_id): """Get RGB color of party.""" if party_id not in PARTY_TO_COLOR_OR_PARTY: return UNKNOWN_PARTY_COLOR color_or_party = PARTY_TO_COLOR_OR_PARTY[party_id] if isinstance(color_or_party, tuple): return color_or_party return get_rgb_color(color_or_party)
5,335,634
def view_application(application_id): """Views an application with ID. Args: application_id (int): ID of the application. Returns: str: redirect to the appropriate url. """ # Get user application. application = ApplicationModel.query.filter_by(id=application_id).first() isPersonalApplication = False # Redirect if application does not exist. if not application: flash("Application with ID {} is not present in the database.".format(str(application_id)), 'danger') current_app.logger.info("{} tried to view application with ID {} which does not exist in the database".format(current_user.name, str(application_id))) return redirect(url_for('hr.index')) # check if application is a personal application. if current_user.application and current_user.application.id == application_id: isPersonalApplication = True # Check if application corp is the user's corp. if not isPersonalApplication and application.corporation.id is not current_user.get_corp().id: flash('That application is not to your corp.', 'danger') current_app.logger.info("{} tried to view application which is not to their corporation.".format(current_user.name)) return redirect(url_for('hr.index')) # Check if user is viewing a personal application or someone else's application. if not isPersonalApplication and not current_user.has_permission('read_applications'): flash("You do not have the required permission to view other people's applications.", "danger") current_app.logger.info("{} tried to illegally access someone else's application but didn't have the required read_applications permission.".format(current_user.name)) return redirect(url_for('hr.index')) # Make application forms. removeApplicationForm = RemoveApplicationForm() editApplicationForm = EditNoteForm(notes=application.character.notes) # Removal of applications. if request.method == 'POST': # Check if notes were updated. if 'btn' not in request.form: if 'notes' in request.form and editApplicationForm.validate_on_submit(): oldNote = application.character.notes application.character.notes = editApplicationForm.notes.data Database.session.commit() flash("Successfully updated note.", "success") current_app.logger.info("{} updated {}'s note from '{}' to '{}'.".format(current_user.name, application.character.name, oldNote, editApplicationForm.notes.data)) return redirect(url_for('hr.view_application', application_id=application.id)) # Check other button presses. if request.form['btn'] == "RemoveApplication": # Check if application is valid. if not removeApplicationForm.validate_on_submit(): flash('Please make sure you provide a reason when removing an application.', 'danger') return redirect(url_for('hr.view_application', application_id=application.id)) characterName = application.character.name corpName = application.corporation.name rejectionReason = removeApplicationForm.rejection_reason.data # Add note with rejection reason. # If there are already notes, add an enter. if application.character.notes: application.character.notes += "\n" application.character.notes += "Application removed ({}) by {}: {}".format(datetime.utcnow().strftime('%Y/%m/%d'), current_user.name, rejectionReason) Database.session.delete(application) Database.session.commit() flash("Successfully removed application of {} to {}.".format(characterName, corpName), 'success') current_app.logger.info("{} removed application of {} to {} with reason '{}'.".format(current_user.name, characterName, corpName, rejectionReason)) elif request.form['btn'] == "RemovePersonalApplication": characterName = application.character.name corpName = application.corporation.name Database.session.delete(application) Database.session.commit() flash("Successfully removed application of {} to {}.".format(characterName, corpName), 'success') current_app.logger.info("{} removed application of {} to {}.".format(current_user.name, characterName, corpName)) elif request.form['btn'] == "UpdateApplication": application.ready_accepted = not application.ready_accepted newStatus = "Ready to be accepted" if application.ready_accepted else "Being processed" Database.session.commit() flash("Successfully set {} application status to {}.".format(application.character.name, newStatus), 'success') current_app.logger.info("{} edited status of {} application to {}".format(current_user.name, application.character.name, newStatus)) return redirect(url_for('hr.view_application', application_id=application.id)) return redirect(url_for('hr.index')) return render_template('hr/view_application.html', application=application, personal_application=isPersonalApplication, remove_form=removeApplicationForm, edit_form=editApplicationForm, discord_url=current_app.config['DISCORD_RECRUITMENT_INVITE'], client_id=EveAPI['full_auth_preston'].client_id, client_secret=EveAPI['full_auth_preston'].client_secret, scopes=EveAPI['full_auth_preston'].scope)
5,335,635
def test_create_state_space_vs_specialized_kw97(model): """State space reproduces invariant features of the kw97 state space.""" params, options = process_model_or_seed(model) optim_paras, options = process_params_and_options(params, options) # Create old state space arguments. n_periods = options["n_periods"] n_types = optim_paras["n_types"] edu_max = optim_paras["choices"]["school"]["max"] edu_starts = np.array(list(optim_paras["choices"]["school"]["start"])) # Get states and indexer from old state space. if "kw_97_basic" in model: states_old, indexer_old = _create_state_space_kw97_base( n_periods, n_types, edu_starts, edu_max ) else: states_old, indexer_old = _create_state_space_kw97_extended( n_periods, n_types, edu_starts, edu_max ) states_old = states_old[:, :-1] states_new = _create_core_state_space(optim_paras, options) core_period_choice = _create_core_period_choice(states_new, optim_paras, options) # I think here we can get more elegant! Or is this the only way? core_index_to_complex = {i: k for i, k in enumerate(core_period_choice)} core_index_to_indices = { i: core_period_choice[core_index_to_complex[i]] for i in core_index_to_complex } # Create sp indexer indexer = _create_indexer(states_new, core_index_to_indices, optim_paras) # Compare the state spaces via sets as ordering changed in some cases. states_old_set = set(map(tuple, states_old)) states_new_set = set(map(tuple, states_new.to_numpy())) assert states_old_set == states_new_set # Compare indexers via masks for valid indices. for period in range(n_periods): index_old_period = indexer_old[period] != INDEXER_INVALID_INDEX index_old_period = np.nonzero(index_old_period) indices_old = [ [period] + [index_old_period[x][i] for x in range(len(index_old_period) - 1)] for i in range(len(index_old_period[0])) ] for index in indexer.keys(): if index[0] == period: assert list(index) in indices_old for index in indices_old: assert tuple(index) in indexer.keys()
5,335,636
def build_save_containers(platforms, bucket) -> int: """ Entry point to build and upload all built dockerimages in parallel :param platforms: List of platforms :param bucket: S3 bucket name :return: 1 if error occurred, 0 otherwise """ if len(platforms) == 0: return 0 platform_results = Parallel(n_jobs=len(platforms), backend="multiprocessing")( delayed(_build_save_container)(platform, bucket) for platform in platforms) is_error = False for platform_result in platform_results: if platform_result is not None: logging.error('Failed to generate {}'.format(platform_result)) is_error = True return 1 if is_error else 0
5,335,637
def make_map( shape_df, df, dates, adm_key, cols, output_dir, title_prefix=None, log_scale=True, colormap="Reds", outline_df=None, ): """Create a map for each date and column. Parameters ---------- shape_df : geopandas.GeoDataFrame Shapefile information at the required admin level df : pandas.DataFrame Simulation data to plot dates : list of str List of dates to make maps for adm_key : str Admin level key cols : list of str List of columns to make maps for output_dir : str Directory to place created maps title_prefix : str, or None String to add to map prefix log_scale : bool If true, uses log scaling colormap : str, default "Reds" Colormap to use; must be a valid Matplotlib colormap outline_df : geopandas.GeoDataFrame, or None Shapefile for outline """ # Maps are joined one level down from the map-level # National maps color by state, state maps color by county join_key = "adm2" if adm_key == "adm1" else "adm1" # For colorbar scaling data_ranges = [] # If log, use a different colormap and scale columns if log_scale: # formatter = LogFormatter(10, labelOnlyBase=False) # Log scale columns and save for col in cols: col_name = "log_" + col df = pd.concat([df, np.log10(1.0 + df[col]).rename(col_name)], axis=1) # Save min and max data_ranges.append([df[col_name].min(), df[col_name].max()]) else: # formatter = ScalarFormatter() for col in cols: data_ranges.append([df[col].min(), df[col].max()]) # Index by date df["date"] = pd.to_datetime(df["date"]) df = df.set_index(["date", join_key]) # Make maps for each requested data for date in dates: # Get data for this date and merge with shape data date_df = shape_df.merge(df.xs(date, level=0), on=join_key) for i, column in enumerate(cols): # Create title map_title = readable_col_names[column] + " " + str(date.date()) if title_prefix is not None: map_title = title_prefix + " " + map_title val_range = data_ranges[i] # Determine column to plot if log_scale: column = "log_" + column # date_df = date_df.to_crs(epsg=2163) # Plot ax = date_df.plot( column=column, cmap=colormap, vmin=val_range[0], vmax=val_range[1], legend=True, legend_kwds={ "orientation": "horizontal", "shrink": 0.8, "aspect": 50, "label": "Log10-scaled (10 ^ x)", }, edgecolor="black", linewidth=0.1, figsize=(16, 9), ) if outline_df is not None: outline_df.boundary.plot(ax=ax, edgecolor="black", linewidth=0.5) plt.title(map_title) plt.tight_layout() plt.axis("off") axes = plt.gca() axes.set_aspect("equal") # Create filename filename = os.path.join(output_dir, adm_key + "_" + map_title.replace(" ", "") + ".png") # print(filename) # plt.show() df.xs(date, level=0).to_csv(os.path.join(output_dir, adm_key + "_" + map_title.replace(" ", "") + ".csv")) plt.savefig(filename) plt.close()
5,335,638
def get_object_record(obj_key): """ Query the object's record. Args: obj_key: (string) The key of the object. Returns: The object's data record. """ record = None model_names = OBJECT_KEY_HANDLER.get_models(obj_key) for model_name in model_names: try: # Get record. model_obj = apps.get_model(settings.WORLD_DATA_APP, model_name) record = model_obj.objects.get(key=obj_key) break except Exception, e: ostring = "Can not get record %s: %s." % (obj_key, e) print(ostring) print(traceback.print_exc()) continue return record
5,335,639
def diff_cars(c1, c2): """ diffs two cars returns a DiffSet containing DiffItems that tell what's missing in c1 as compared to c2 :param c1: old Booking object :param c2: new Booking object :return: DiffSet (c1-c2) """ strategy = Differ.get_strategy(CAR_DIFF_STRATEGY) return strategy.diff(c1, c2)
5,335,640
def manage_playlists(user): """ List, add, and remove playlists. Parameters ---------- user : user object Object containing all user data. """ user.printPlaylists() if cutie.prompt_yes_or_no('Would you like to remove a playlist?'): user.removePlaylists() if cutie.prompt_yes_or_no('Would you like to add a playlist?'): user.addPlaylists() user.printPlaylists() playlistStr = user.getPlaylistsAsString() config = ConfigParser() config.read('.config.ini') config['spotify']['playlist_id'] = playlistStr with open('.config.ini', 'w') as f: config.write(f)
5,335,641
def find_table_defs(sql_lines): """ Find the table definitions in the given SQL input (a sequence of strings). Yield each complete table definition as a string. Naïvely assumes that "create table" starts a table definition, that ");" ends it, that each of the previous occur on a single line, and that it is OK to ignore quoting, escaping, and context. """ # Gather lines into table definitions and parse them tbl_def = io.StringIO() line = next(sql_lines, None) while line is not None: # Look for the beginning of a table definition beg_match = _table_def_beg_pattern.search(line) # If not found, go to the next line if beg_match is None: line = next(sql_lines, None) continue # The start has been found. Now look for the end. end_match = _table_def_end_pattern.search(line, beg_match.end()) while end_match is None: # Accumulate the current line into the buffer if beg_match is None: tbl_def.write(line) else: tbl_def.write(line[beg_match.start():]) beg_match = None # Get the next line line = next(sql_lines, None) if line is None: # EOF in middle of table definition return end_match = _table_def_end_pattern.search(line) # The end has been found. Add it to the buffer. if beg_match is None: tbl_def.write(line[:end_match.end()]) else: tbl_def.write(line[beg_match.start():end_match.end()]) beg_match = None # Yield the table definition yield tbl_def.getvalue() # Reset the buffer tbl_def = io.StringIO() # Process the rest of the line line = line[end_match.end():]
5,335,642
def dist_env(): """ Return a dict of all variable that distributed training may use. NOTE: you may rewrite this function to suit your cluster environments. """ trainer_id = int(os.getenv("PADDLE_TRAINER_ID", "0")) num_trainers = 1 training_role = os.getenv("PADDLE_TRAINING_ROLE", "TRAINER") assert(training_role == "PSERVER" or training_role == "TRAINER") # - PADDLE_TRAINER_ENDPOINTS means nccl2 mode. # - PADDLE_PSERVER_ENDPOINTS means pserver mode. # - PADDLE_CURRENT_ENDPOINT means current process endpoint. trainer_endpoints = os.getenv("PADDLE_TRAINER_ENDPOINTS") pserver_endpoints = os.getenv("PADDLE_PSERVER_ENDPOINTS") current_endpoint = os.getenv("PADDLE_CURRENT_ENDPOINT") if trainer_endpoints: trainer_endpoints = trainer_endpoints.split(",") num_trainers = len(trainer_endpoints) elif pserver_endpoints: num_trainers = int(os.getenv("PADDLE_TRAINERS_NUM")) return { "trainer_id": trainer_id, "num_trainers": num_trainers, "current_endpoint": current_endpoint, "training_role": training_role, "pserver_endpoints": pserver_endpoints, "trainer_endpoints": trainer_endpoints }
5,335,643
def imsplot_tensor(*imgs_tensor): """ 使用matplotlib.pyplot绘制多个tensor类型图片 图片尺寸应为(bn, c, h, w) 或是单个图片尺寸为(1, c, h, w)的序列 """ count = min(8, len(imgs_tensor)) if(count==0): return col = min(2, count) row = count//col if(count%col > 0): row = row + 1 for i in range(count): plt.subplot(row, col, i+1);imshow_tensor(imgs_tensor[i])
5,335,644
def main(request, response): """Helper handler for Beacon tests. It handles two forms of requests: STORE: A URL with a query string of the form 'cmd=store&sid=<token>&tidx=<test_index>&tid=<test_name>'. Stores the receipt of a sendBeacon() request along with its validation result, returning HTTP 200 OK. Parameters: tidx - the integer index of the test. tid - a friendly identifier or name for the test, used when returning results. STAT: A URL with a query string of the form 'cmd=stat&sid=<token>&tidx_min=<min_test_index>&tidx_max=<max_test_index>'. Retrieves the results of test with indices [min_test_index, max_test_index] and returns them as a JSON array and HTTP 200 OK status code. Due to the eventual read-once nature of the stash, results for a given test are only guaranteed to be returned once, though they may be returned multiple times. Parameters: tidx_min - the lower-bounding integer test index. tidx_max - the upper-bounding integer test index. Example response body: [{"id": "Test1", error: null}, {"id": "Test2", error: "some validation details"}] Common parameters: cmd - the command, 'store' or 'stat'. sid - session id used to provide isolation to a test run comprising multiple sendBeacon() tests. """ session_id = request.GET.first("sid"); command = request.GET.first("cmd").lower(); # Workaround to circumvent the limitation that cache keys # can only be UUID's. def wrap_key(key, path): return (str(path), str(key)) request.server.stash._wrap_key = wrap_key # Append CORS headers if needed. if "origin" in request.GET: response.headers.set("Access-Control-Allow-Origin", request.GET.first("origin")) if "credentials" in request.GET: response.headers.set("Access-Control-Allow-Credentials", request.GET.first("credentials")) # Handle the 'store' and 'stat' commands. if command == "store": # The test id is just used to make the results more human-readable. test_id = request.GET.first("tid") # The test index is used to build a predictable stash key, together # with the unique session id, in order to retrieve a range of results # later knowing the index range. test_idx = request.GET.first("tidx") test_data_key = build_stash_key(session_id, test_idx) test_data = { "id": test_id, "error": None } payload = "" if "Content-Type" in request.headers and \ "form-data" in request.headers["Content-Type"]: if "payload" in request.POST: # The payload was sent as a FormData. payload = request.POST.first("payload") else: # A FormData was sent with an empty payload. pass else: # The payload was sent as either a string, Blob, or BufferSource. payload = request.body payload_parts = filter(None, payload.split(":")) if len(payload_parts) > 0: payload_size = int(payload_parts[0]) # Confirm the payload size sent matches with the number of characters sent. if payload_size != len(payload_parts[1]): test_data["error"] = "expected %d characters but got %d" % (payload_size, len(payload_parts[1])) else: # Confirm the payload contains the correct characters. for i in range(0, payload_size): if payload_parts[1][i] != "*": test_data["error"] = "expected '*' at index %d but got '%s''" % (i, payload_parts[1][i]) break # Store the result in the stash so that it can be retrieved # later with a 'stat' command. request.server.stash.put(test_data_key, test_data) elif command == "stat": test_idx_min = int(request.GET.first("tidx_min")) test_idx_max = int(request.GET.first("tidx_max")) # For each result that has come in, append it to the response. results = [] for test_idx in range(test_idx_min, test_idx_max+1): # +1 because end is exclusive test_data_key = build_stash_key(session_id, test_idx) test_data = request.server.stash.take(test_data_key) if test_data: results.append(test_data) response.headers.set("Content-Type", "text/plain") response.content = json.dumps(results) else: response.status = 400
5,335,645
def set_unique_postfix(env_patch, default_instances): """ Generate a unique postfix and add it to an instance name to avoid 409 HTTP error in case of the instance name was already used during last week """ unique_postfix = f'-{uuid.uuid4().hex[:5]}' # generate 5 digits unique postfix for instance in default_instances: env_variable, value = instance env_patch.setenv(env_variable, value % unique_postfix)
5,335,646
def calc_chi2(model, dof=None): """ Calculate chi-square statistic. Parameters ---------- model : Model Model. dof : int, optional Degrees of freedom statistic. The default is None. Returns ------- tuple chi2 statistic and p-value. """ if dof is None: dof = calc_dof(model) if model.last_result.name_obj == 'FIML': stat = model.last_result.fun / model.n_samples else: stat = model.n_samples * model.last_result.fun return stat, 1 - chi2.cdf(stat, dof)
5,335,647
def QuadraticCommandAddControl(builder, control): """This method is deprecated. Please switch to AddControl.""" return AddControl(builder, control)
5,335,648
def list_(ctx, show_train): """Show information about all builds""" for build in ctx.obj['get_fl33t_client']().list_builds(): click.echo(build) if show_train: click.echo('Train:') click.echo(' - {}'.format(build.train))
5,335,649
def test_max(non_square_gamma_tensor: IGT) -> None: """Test the max operator default behaviour (no args)""" output = non_square_gamma_tensor.max() original_values = non_square_gamma_tensor._values() # Ensure both of these have the same shapes to begin with assert non_square_gamma_tensor.shape == original_values.shape # Ensure resultant shapes are correct target_values = original_values.max() print(f"original shape = {non_square_gamma_tensor.shape}") print(f"target shape = {target_values.shape}") print(f"output shape = {output.shape}") assert output.shape == target_values.shape # Test to see if _values() constructs a proper shape output_values = output._values() assert output_values.shape != original_values.shape assert output_values.shape == target_values.shape # Test to see if the values have been kept the same print(f"Values, {type(original_values)}") print(original_values) print(f"New Values, {type(output_values)}") print(output_values) assert (output_values == target_values).all() old_entities = non_square_gamma_tensor._entities() new_entities = output._entities() assert old_entities.shape != new_entities.shape
5,335,650
def authenticated_api(username, api_root=None, parser=None): """Return an oauthenticated tweety API object.""" auth = tweepy.OAuthHandler(settings.TWITTER_CONSUMER_KEY, settings.TWITTER_CONSUMER_SECRET) try: user = User.objects.get(username__iexact=username) sa = user.social_auth.all()[0] auth.set_access_token(sa.tokens['oauth_token'], sa.tokens['oauth_token_secret']) return tweepy.API(auth, api_root=api_root or settings.TWITTER_API_ROOT, parser=parser or JSONParser()) except: return None
5,335,651
def stop_flops_count(self) -> None: """ A method that will be available after add_flops_counting_methods() is called on a desired net object. Stops computing the mean flops consumption per image. Call whenever you want to pause the computation.""" remove_batch_counter_hook_function(self) self.apply(remove_flops_counter_hook_function)
5,335,652
def run(shop): """ Go shopping. """ # This would possibly not do anything, as it runs when even when using # help on an option. print('Run') print('shop: {0}\n'.format(shop))
5,335,653
def get_model_header(fpath): """ :param fpath: :return: """ with gz.open(fpath, 'rt') as modelfile: header = modelfile.readline().strip().strip('#').split() return header
5,335,654
def _parse_vertex(vertex_row): """Parses a line in a PLY file which encodes a vertex coordinates. Args: vertex_row: string with vertex coordinates and color. Returns: 2-tuple containing a length-3 array of vertex coordinates (as floats) and a length-3 array of RGB color values (as ints between 0 and 255, inclusive). """ vertex = vertex_row.strip().split() # The row must contain coordinates with RGB/RGBA color in addition to that. if len(vertex) >= 6: # Supports only RGB colors now, alpha channel will be ignored. # TODO(b/129298103): add support of RGBA in .ply files. return ([float(coord) for coord in vertex[:3]], [int(channel) for channel in vertex[3:6]]) raise ValueError('PLY file must contain vertices with colors.')
5,335,655
def clean_tag(tag): """clean up tag.""" if tag is None: return None t = tag if isinstance(t, list): t = t[0] if isinstance(t, tuple): t = t[0] if t.startswith('#'): t = t[1:] t = t.strip() t = t.upper() t = t.replace('O', '0') t = t.replace('B', '8') return t
5,335,656
def massM2(param): """ Mass term in the neutrino mass basis. @type param : PhysicsConstants @param param : set of physical parameters to be used. @rtype : numpy array @return : mass matrix in mass basis. """ M2 = np.zeros([param.numneu,param.numneu],complex) for k in np.arange(1,param.numneu,1): M2[k,k] = param.dmsq[k+1] return M2
5,335,657
def remove_persistent_volume_claim(name, mount_path): """ Remove a persistent volume claim in the default notebook server. Parameters ---------- name : str mount_path : str """ load_kube_config() v1 = client.CoreV1Api() custom_api = client.CustomObjectsApi(api_client=ApiClientForJsonPatch()) try: notebook = custom_api.get_namespaced_custom_object( group=NOTEBOOK_GROUP, version="v1", namespace=NOTEBOOK_NAMESPACE, plural="notebooks", name=NOTEBOOK_NAME, _request_timeout=5, ) except ApiException as e: if e.status == 404: warnings.warn(f"Notebook server does not exist. Skipping removing volume mount path {name}") return body = literal_eval(e.body) message = body["message"] raise InternalServerError(f"Error while trying to patch notebook server: {message}") try: pod_vols = enumerate(notebook["spec"]["template"]["spec"]["volumes"]) vol_index = next((i for i, v in pod_vols if v["name"] == f"{name}"), -1) if vol_index == -1: warnings.warn(f"Volume mount path not found: {name}") return v1.delete_namespaced_persistent_volume_claim( name=name, namespace=NOTEBOOK_NAMESPACE, ) body = [ { "op": "remove", "path": f"/spec/template/spec/volumes/{vol_index}", }, { "op": "remove", "path": f"/spec/template/spec/containers/0/volumeMounts/{vol_index}", }, ] custom_api.patch_namespaced_custom_object( group=NOTEBOOK_GROUP, version="v1", namespace=NOTEBOOK_NAMESPACE, plural="notebooks", name=NOTEBOOK_NAME, body=body, ) # Wait for the pod to be ready and have all containers running while True: try: pod = v1.read_namespaced_pod( name=NOTEBOOK_POD_NAME, namespace=NOTEBOOK_NAMESPACE, _request_timeout=5, ) if pod.status.phase == "Running" \ and all([c.state.running for c in pod.status.container_statuses]) \ and not [v for v in pod.spec.volumes if v.name == f"{name}"]: warnings.warn(f"Removed volume {name} in notebook server!") break except ApiException: pass finally: warnings.warn(NOTEBOOK_WAITING_MSG) time.sleep(5) except ApiException as e: body = literal_eval(e.body) message = body["message"] raise InternalServerError(f"Error while trying to patch notebook server: {message}")
5,335,658
def get_input_data(train_file_path='train.json', train=True): """Retrieves training (X) and label (y) matrices. Note that this can take a few seconds to run. Args: train_file_path is the path of the file containing training data. Returns: A tuple containing the X training matrix in the first position, and the y label matrix in the second position. X is of shape (N, 75, 75, 3), where N is the number of training images, 75 x 75 is the dimension of the images, and 3 represents the number of channels for each image. """ with open(train_file_path, 'r') as train_file: json_data = train_file.read() train_data = json.loads(json_data) band_1 = [instance['band_1'] for instance in train_data] band_2 = [instance['band_2'] for instance in train_data] ids = [instance['id'] for instance in train_data] band_1 = np.array([np.array(band).astype(np.float32).reshape(75, 75) for band in band_1]) band_2 = np.array([np.array(band).astype(np.float32).reshape(75, 75) for band in band_2]) # Combine all three channels into an array of 1604 tensors (number of training images) with dimension 75 x 75 x 3 X_train = np.concatenate([band_1[:, :, :, np.newaxis], band_2[:, :, :, np.newaxis]], axis=-1) if train: # True labels of data, either iceberg or not iceberg y_train = np.array([instance['is_iceberg'] for instance in train_data]) return X_train, y_train, ids else: return X_train, ids
5,335,659
def getitimer(*args, **kwargs): # real signature unknown """ Returns current value of given itimer. """ pass
5,335,660
def test_rebase_error(): """ Run 'git up' with a failing rebase """ os.chdir(repo_path) from PyGitUp.gitup import GitUp gitup = GitUp(testing=True) gitup.run()
5,335,661
def dice_coef(y_true, y_pred): """ :param y_true: the labeled mask corresponding to a mammogram scan :param y_pred: the predicted mask of the scan :return: A metric that accounts for precision and recall on the scale from 0 - 1. The closer to 1, the better. Dice = 2 * (|X & Y|)/ |X|+ |Y|) = sum(|A*B|)/(sum(|A|)+sum(|B|)) Citation (MIT License): https://github.com/jocicmarko/ ultrasound-nerve-segmentation/blob/ master/train.py """ y_true_f = K.flatten(y_true) y_pred_f = K.flatten(y_pred) intersection = K.sum(y_true_f * y_pred_f) return (2. * intersection + SMOOTH) / (K.sum(y_true_f) + K.sum(y_pred_f) + SMOOTH)
5,335,662
def linmsg(x, end_pts_msg=None, max_msg=None, fill_value=1.e20): """ Linearly interpolates to fill in missing values. x = Ngl.linmsg(x,end_pts_msg=None,max_msg=None,fill_value=1.e20) x -- A numpy or masked array of any dimensionality that contains missing values. end_pts_msg -- how missing beginning and end points will be returned. If this value is greater than or equal to 0, then the beginning and end points will be returned as missing (default option). If this value is less than 0, then they will be set to the nearest non-missing value. max_msg -- the maximum number of consecutive missing values to be interpolated. If not set, then this function will try to interpolate as many values as it can. fill_value -- The missing value for x. Defaults to 1.e20 if not set. """ # # Set defaults for input parameters not specified by user. # if end_pts_msg is None: end_pts_msg = 0 # # Setting max_msg to 0 will cause the C wrapper to set this to # npts before going into the Fortran routine. # if max_msg is None: max_msg = 0 # # If input array is a numpy masked array, return a numpy masked array. # Otherwise missing values are dealt with using the fill_value. # fv = _get_fill_value(x) if (any(fv is None)): return fplib.linmsg(_promote_scalar(x),end_pts_msg,max_msg,fill_value) else: aret = fplib.linmsg(x.filled(fv), end_pts_msg, max_msg, fv) return ma.masked_array(aret, fill_value=fv)
5,335,663
def test_auto_args_db(*args, **kwargs): """After you write a .csv file for args, test it here.""" print('Test of auto_args_db wrapper.') print('Result for *args:') print(args) print('Results for **kwargs:') print(kwargs)
5,335,664
def get_or_create_actor_by_name(name): """ Return the actor corresponding to name if it does not exist, otherwise create actor with name. :param name: String """ return ta.ActorSystem().createActor(MyClass, globalName=name)
5,335,665
async def test_init(hass, mock_light): """Test platform setup.""" state = hass.states.get("light.bedroom") assert state.state == STATE_OFF assert state.attributes == { ATTR_FRIENDLY_NAME: "Bedroom", ATTR_SUPPORTED_FEATURES: SUPPORT_BRIGHTNESS | SUPPORT_COLOR | SUPPORT_WHITE_VALUE, } with patch.object(hass.loop, "stop"), patch.object( mock_light, "disconnect" ) as mock_disconnect: await hass.async_stop() await hass.async_block_till_done() assert mock_disconnect.called
5,335,666
def client(): """Client to call tests against""" options = { 'bind': '%s:%s' % ('0.0.0.0', '8080'), 'workers': str(number_of_workers()), } return testing.TestClient(falcon.API(), options)
5,335,667
async def test_encrypted_payload_not_supports_encryption( hass, setup_comp, not_supports_encryption ): """Test encrypted payload with no supported encryption.""" await setup_owntracks(hass, {CONF_SECRET: TEST_SECRET_KEY}) await send_message(hass, LOCATION_TOPIC, MOCK_ENCRYPTED_LOCATION_MESSAGE) assert hass.states.get(DEVICE_TRACKER_STATE) is None
5,335,668
async def resolve(ctx, _id, *, msg=''): """Owner only - Resolves a report.""" if not ctx.message.author.id == OWNER_ID: return report = Report.from_id(_id) await report.resolve(ctx, msg) report.commit() await bot.say(f"Resolved `{report.report_id}`: {report.title}.")
5,335,669
def hard_negative_mining(loss, labels, neg_pos_ratio=3): """ 用于训练过程中正负例比例的限制.默认在训练时,负例数量是正例数量的三倍 Args: loss (N, num_priors): the loss for each example. labels (N, num_priors): the labels. neg_pos_ratio: 正负例比例: 负例数量/正例数量 """ pos_mask = labels > 0 num_pos = pos_mask.long().sum(dim=1, keepdim=True) num_neg = num_pos * neg_pos_ratio loss[pos_mask] = -math.inf # 无穷 # 两次sort 找出元素在排序中的位置 _, indexes = loss.sort(dim=1, descending=True) # descending 降序 ,返回 value,index _, orders = indexes.sort(dim=1) neg_mask = orders < num_neg # loss 降序排, 背景为-无穷, 选择排前num_neg的 负无穷,也就是 背景 return pos_mask | neg_mask
5,335,670
def groups(): """ Return groups """ return _clist(getAddressBook().groups())
5,335,671
def round_temp(value): """Round temperature for publishing.""" return round(value, dev_fan.round_temp)
5,335,672
def test_cli_command_defines_a_cli_group() -> None: """Check that cli command defines a CLI group when invoked""" assert isinstance(cli, ZenMLCLI)
5,335,673
def scatter(n_dims, cuboids_per_concept, params, num_samples, max_dim_per_domain, operation): """Creates scatter plots for the betweenness values returned by different combinations of alphas and methods. Parameters: n_dims: number of dimensions cuboids_per_concept: number of cuboids per concept params: a dictionary mapping from configuration names to a dictionary of named parameters for the operation num_samples: number of samples to draw max_dim_per_domain: maximal number of dimensions per domain operation: operation to evaluate""" dimensions = list(range(n_dims)) random.seed(42) results = {} for key, value in params.items(): results[key] = [] counter = 0 fails = 0 while counter < num_samples: # create a random domain structure domains = {} dimensions_left = dimensions j = 0 while len(dimensions_left) > 0: num_dims = random.randint(1, min(len(dimensions_left), max_dim_per_domain)) dims = random.sample(dimensions_left, num_dims) domains[j] = list(dims) dimensions_left = [dim for dim in dimensions_left if dim not in dims] j += 1 # make the conceptual space cs.cs.init(n_dims, domains) # create three concepts with random identical weights, random cuboids, maximal mu and random c w = random_weights(domains) c1_list = [] c2_list = [] c3_list = [] for i in range(cuboids_per_concept): c1_list.append(random_cuboid(dimensions, domains, 0.0, 1.0)) c2_list.append(random_cuboid(dimensions, domains, 0.0, 1.0)) c3_list.append(random_cuboid(dimensions, domains, 0.0, 1.0)) s1 = cs.core.from_cuboids(c1_list, domains) s2 = cs.core.from_cuboids(c2_list, domains) s3 = cs.core.from_cuboids(c3_list, domains) f1 = cs.concept.Concept(s1, random.uniform(0.01, 1.0), random.uniform(1.0, 50.0), w) f2 = cs.concept.Concept(s2, random.uniform(0.01, 1.0), random.uniform(1.0, 50.0), w) f3 = cs.concept.Concept(s3, random.uniform(0.01, 1.0), random.uniform(1.0, 50.0), w) local_res = {} try: for config_name, param_dict in params.items(): local_res[config_name] = operation(f1, f2, f3, param_dict) except Exception: fails += 1 continue for key, res in local_res.items(): results[key].append(res) counter += 1 if counter % 50 == 0: print(("{0}/{1} ...".format(counter, fails))) print(("ran {0} examples, failed {1} times".format(counter, fails))) # all pairs of configurations for first_config, second_config in combinations(list(results.keys()), 2): # draw the plot fig, ax = plt.subplots(figsize=(12,12)) ax.tick_params(axis="x", labelsize=16) ax.tick_params(axis="y", labelsize=16) ax.set_xlim(-0.01,1.01) ax.set_ylim(-0.01,1.01) ax.scatter(results[first_config], results[second_config]) plt.xlabel(first_config, fontsize = 20) plt.ylabel(second_config, fontsize = 20) plt.show() # compute the correlations pearson, _ = pearsonr(results[first_config], results[second_config]) spearman, _ = spearmanr(results[first_config], results[second_config]) print(('{0} - {1}: Pearson {2}, Spearman {3}'.format(first_config, second_config, pearson, spearman)))
5,335,674
def get_key_from_property(prop, key, css_dict=None, include_commented=False): """Returns the entry from the dictionary using the given key""" if css_dict is None: css_dict = get_css_dict()[0] cur = css_dict.get(prop) or css_dict.get(prop[1:-1]) if cur is None: return None value = cur.get(key) if value is not None: return value for v in cur['values']: if (v.startswith('<') or (include_commented and v.startswith('<_'))) and v.endswith('>'): ret = get_key_from_property(v, key, css_dict, include_commented) if ret is not None: return ret
5,335,675
def bbox_area(gt_boxes): """ gt_boxes: (K, 4) ndarray of float area: (k) """ K = gt_boxes.size(0) gt_boxes_area = ((gt_boxes[:,2] - gt_boxes[:,0] + 1) * (gt_boxes[:,3] - gt_boxes[:,1] + 1)).view(K) return gt_boxes_area
5,335,676
def DecodedMessage(tG,x): """ Let G be a coding matrix. tG its transposed matrix. x a n-vector received after decoding. DecodedMessage Solves the equation on k-bits message v: x = v.G => G'v'= x' by applying GaussElimination on G'. ------------------------------------- Parameters: tG: Transposed Coding Matrix. Must have more rows than columns to solve the linear system. Must be full rank. x: n-array. Must be in the Code (in Ker(H)). """ n,k = tG.shape if n < k: raise ValueError('Coding matrix G must have more columns than rows to solve the linear system on v\': G\'v\' = x\'') rtG, rx = GaussElimination(tG,x) rank = sum([a.any() for a in rtG]) if rank!= k: raise ValueError('Coding matrix G must have full rank = k to solve G\'v\' = x\'') message=np.zeros(k).astype(int) message[k-1]=rx[k-1] for i in reversed(range(k-1)): message[i]=abs(rx[i]-BinaryProduct(rtG[i,list(range(i+1,k))],message[list(range(i+1,k))])) return message
5,335,677
def get_ttp_card_info(ttp_number): """ Get information from the specified transport card number. The number is the concatenation of the last 3 numbers of the first row and all the numbers of the second row. See this image: https://tarjetatransportepublico.crtm.es/CRTM-ABONOS/archivos/img/TTP.jpg :param str ttp_number: The number that identifies a transport card. It must be a string of the last 3 numbers of the first row and all the numbers of the second row. :return dict: A dictionary with information of the transport card. It has information regarding the titles in that card, expiring dates, purchase dates, title types (young, normal, old, ...), among others. """ if ttp_number is not None: client = Client(Urls.CITRAM_CARD_SERVICE.value) result = client.service.ConsultaSaldo1(sNumeroTTP=ttp_number) final_result = {'status': result['iCallLogField'], 'card_info': xmltodict.parse(result['sResulXMLField'])} return final_result else: raise NotEnoughParametersException('You must specify a transport card number.')
5,335,678
def test_basic_message_strip_and_splitup(): """ This tests these bits of functionality: * white space and comment stripping similar to how git-commit does it * line splitting * subject extraction * paragraph splitting * body extraction """ m = CommitMessage('''\ # test stripping of comments and preceding empty lines improvement(config): display config error messages without backtrace In order to prevent users from thinking they're seeing a bug in Hopic. This changes the type of ConfigurationError such that Click will display its message without a backtrace. This ensures the displayed information is more to the point. # ------------------------ >8 ------------------------ This line and every other line after the 'cut' line above should not be present in the output. # test stripping of comments and succeeding empty lines ''') assert m.subject == '''improvement(config): display config error messages without backtrace''' assert m.lines[0] == m.subject assert m.paragraphs[0] == '''In order to prevent users from thinking they're seeing a bug in Hopic.''' assert m.paragraphs[0] == m.body.splitlines()[0] assert m.body.splitlines()[0] == m.message.splitlines()[2] assert m.paragraphs[1].splitlines(keepends=True)[0] == '''This changes the type of ConfigurationError such that Click will display\n''' assert m.paragraphs[-1].splitlines(keepends=True)[-1] == '''is more to the point.''' assert m.paragraphs[-1].splitlines()[-1] == m.body.splitlines()[-1] assert m.body.splitlines()[-1] == m.message.splitlines()[-1]
5,335,679
def tag_matches(tag, impl_version='trunk', client_version='trunk'): """Test if specified versions match the tag. Args: tag: skew test expectation tag, e.g. 'impl_lte_5' or 'client_lte_2'. impl_version: WebLayer implementation version number or 'trunk'. client_version: WebLayer implementation version number or 'trunk'. Returns: True if the specified versions match the tag. Raises: AssertionError if the tag is invalid. """ # 'All' is special cased to match anything. if tag == 'all': return True # Extract the three components from the tag. match = re.match(r'(client|impl)_([gl]te)_([0-9]+)', tag) assert match is not None, ( 'tag must be of the form "{client,impl}_{gte,lte}_$version", found %r' % tag) target_str, op_str, tag_version_str = match.groups() # If a version is specified see if the tag refers to the same target or # return False otherwise. if impl_version != 'trunk' and target_str != 'impl': return False if client_version != 'trunk' and target_str != 'client': return False version = impl_version if impl_version != 'trunk' else client_version assert type(version) == int, 'Specified version must be an integer.' tag_version = int(tag_version_str) op = OP_MAP[op_str] return op(version, tag_version)
5,335,680
def split_params(param_string): """ Splits a parameter string into its key-value pairs >>> d = split_params('alpha-0.5_gamma-0.9') >>> d['alpha'] 0.5 >>> d['gamma'] 0.9 >>> d = split_params('depth-15_features-a-b-c') >>> d['depth'] 15 >>> d['features'] ['a', 'b', 'c'] >>> d = split_params('alpha-0.1_l-a-b_trace_rate-None') >>> d['alpha'] 0.1 >>> d['l'] ['a', 'b'] >>> d['trace_rate'] >>> print(d['trace_rate']) None >>> split_params('a-b-c') {'a': ['b', 'c']} >>> split_params('a_b_c') {} """ #TODO: check for negatives i.e. alpha--1 parts = param_string.split('_') params = {} for i in range(len(parts)): param = split_items(parts[i]) if len(param) < 2: try: parts[i+1] = parts[i] + "_" + parts[i+1] except: pass continue elif len(param) == 2: params[param[0]] = param[1] elif len(param) == 3 and len(param[1]) == 0: params[param[0]] = -param[2] else: params[param[0]] = param[1:] return params
5,335,681
def load_suites_from_directory(dir, recursive=True): # type: (str, bool) -> List[Suite] """ Load a list of suites from a directory. If the recursive argument is set to True, sub suites will be searched in a directory named from the suite module: if the suite module is "foo.py" then the sub suites directory must be "foo". Raise SuiteLoadingError if one or more suite could not be loaded. """ if not osp.exists(dir): raise SuiteLoadingError("Directory '%s' does not exist" % dir) suites = {} for filename in get_py_files_from_dir(dir): suite = load_suite_from_file(filename) if not suite.hidden: suites[filename] = suite if recursive: for dirname in _get_sub_dirs_from_dir(dir): suite = suites.get(dirname + ".py") if not suite: suite_name = osp.basename(dirname) suite = Suite(None, suite_name, build_description_from_name(suite_name)) suites[suite.name] = suite for sub_suite in load_suites_from_directory(dirname, recursive=True): suite.add_suite(sub_suite) return sorted(sorted(filter(lambda s: not s.is_empty(), suites.values()), key=lambda s: s.name), key=lambda s: s.rank)
5,335,682
def run(core=-1): """ Parameters ---------- core: int Integer from argparse module to be used as switcher. Returns ------- None """ """for g_name, A in graphs.generate_n_nodes_graphs(100, get_graphs('cycle', 100)).items(): M = A / sum(A[0]) eigenvals = np.linalg.eigvals(M) np.sort(eigenvals) print(g_name + ' ' + str(abs(eigenvals[1])))""" if core == 0: test4_on_multieigvecsvm_dataset(seed=22052010, n=100, n_samples=20, distr='exp') elif core == 1: test4_on_multieigvecsvm_dataset(seed=22052010, n=100, n_samples=30, distr='exp')
5,335,683
def upgrade(c, release='jhub', version='0.9.0', values='hub/config.yaml'): """Apply changes to the hub deployment.""" command = ( f'helm upgrade --cleanup-on-fail {release} jupyterhub/jupyterhub ' f'--version {version} --values {values}' ) _print_command('Upgrade JupyterHub deployment', command) if _prompt(): c.run(command)
5,335,684
def defineRelationNM(TableA, TableB, sequence=Sequence('all_id_seq'), tableAItemName=None, tableBItemName=None): """defines relation N:M (TableA : TableB) between two tables intermediated table is automaticaly defined Parameters ---------- TableA Model of first table TableB Model of second table """ assert not(sequence is None), "sequence must be defined explicitly" tableAName = TableA.__tablename__ if tableAItemName is None else tableAItemName tableBName = TableB.__tablename__ if tableBItemName is None else tableBItemName interTable = Table( f'{tableAName}_{tableBName}', TableA.metadata, Column('id', BigInteger, sequence, primary_key=True), Column(f'{tableAName}_id', ForeignKey(f'{tableAName}.id'), primary_key=True), Column(f'{tableBName}_id', ForeignKey(f'{tableBName}.id'), primary_key=True) ) setattr(TableA, tableBName, relationship(TableB, secondary=interTable, back_populates=tableAName)) #relationship(lazy='dynamic') setattr(TableB, tableAName, relationship(TableA, secondary=interTable, back_populates=tableBName)) return
5,335,685
def usage(): """Print usage info.""" print(("""%s [-l|--location <location>] [-t|--table <table>] [-u|--update] \ [-r|--recordTag <tag>] [-k|--keyTags <tag1,tag2,tag3,...>] [-d|--debug] \ [-h|--help] <xml doc>""" % sys.argv[0]))
5,335,686
async def test_add_event_date_time( hass: HomeAssistant, mock_token_read: None, component_setup: ComponentSetup, mock_calendars_list: ApiResult, test_api_calendar: dict[str, Any], mock_insert_event: Mock, ) -> None: """Test service call that adds an event with a date time range.""" assert await component_setup() start_datetime = datetime.datetime.now() delta = datetime.timedelta(days=3, hours=3) end_datetime = start_datetime + delta await hass.services.async_call( DOMAIN, SERVICE_ADD_EVENT, { "calendar_id": CALENDAR_ID, "summary": "Summary", "description": "Description", "start_date_time": start_datetime.isoformat(), "end_date_time": end_datetime.isoformat(), }, blocking=True, ) mock_insert_event.assert_called() assert mock_insert_event.mock_calls[0] == call( calendarId=CALENDAR_ID, body={ "summary": "Summary", "description": "Description", "start": { "dateTime": start_datetime.isoformat(timespec="seconds"), "timeZone": "CST", }, "end": { "dateTime": end_datetime.isoformat(timespec="seconds"), "timeZone": "CST", }, }, )
5,335,687
def deep_parameters_back(param, back_node, function_params, count, file_path, lineno=0, vul_function=None, isback=False): """ 深层递归分析外层逻辑,主要是部分初始化条件和新递归的确定 :param isback: :param lineno: :param vul_function: :param param: :param back_node: :param function_params: :param count: :param file_path: :return: """ count += 1 padding = {} is_co, cp, expr_lineno = parameters_back(param, back_node, function_params, lineno, vul_function=vul_function, file_path=file_path, isback=isback) if count > 20: logger.warning("[Deep AST] depth too big, auto exit...") return is_co, cp, expr_lineno return is_co, cp, expr_lineno
5,335,688
def get_latest_runtime(dotnet_dir: Optional[str] = None, version_major: Optional[int] = 5, version_minor: Optional[int] = 0, version_build: Optional[int] = 0) -> Optional[str]: """ Search and select the latest installed .NET Core runtime directory. """ dotnet_dir = dotnet_dir or get_dotnet_dir() if not dotnet_dir: return None if "DOTNETRUNTIMEVERSION" in dotnet_const.ENVIRON: tmp = join(dotnet_dir, "shared", "Microsoft.NETCore.App", dotnet_const.ENVIRON["DOTNETRUNTIMEVERSION"]) if isdir(tmp): return tmp runtime = None for r in get_dotnet_runtimes(): if r.name == "Microsoft.NETCore.App": vmatch = re.match(r"^(?P<major>\d+)\.(?P<minor>\d+)\.(?P<build>\d+)", r.version) if vmatch: tmp_major = int(vmatch.group("major")) tmp_minor = int(vmatch.group("minor")) tmp_build = int(vmatch.group("build")) if tmp_major > version_major: version_major = tmp_major version_minor = tmp_minor version_build = tmp_build runtime = r continue if version_major == tmp_major: if tmp_minor > version_minor: version_minor = tmp_minor version_build = tmp_build runtime = r continue if version_minor == tmp_minor: if tmp_build > version_build: version_build = tmp_build runtime = r continue if runtime is None: runtime = r continue if runtime is None: return None tmp = join(dotnet_dir, "shared", "Microsoft.NETCore.App", runtime.version) if isdir(tmp): return tmp tmp = join(runtime.path, runtime.version) if isdir(tmp): return tmp return None
5,335,689
def choose_action(state, mdp_data): """ Choose the next action (0 or 1) that is optimal according to your current mdp_data. When there is no optimal action, return a random action. Args: state: The current state in the MDP mdp_data: The parameters for your MDP. See initialize_mdp_data. Returns: int, 0 or 1. The index of the optimal action according to your current MDP. """ # BONUS LEARNING OPPORTUNITY: When you have finished the problem set, try # un-commenting the following two lines. This will implement a strategy # called epsilon greedy, which drastically improves performance. Why do you # think this works so well? # # if np.random.uniform() < 0.1: # 10% of the time, choose a random action # return 0 if np.random.uniform() < 0.5 else 1 action = None # *** START CODE HERE *** right = mdp_data['transition_probs'][state, 0, :].dot(mdp_data['value']) left = mdp_data['transition_probs'][state, 1, :].dot(mdp_data['value']) if right > left: action = 0 elif right < left: action = 1 else: action = np.random.choice([0, 1]) # *** END CODE HERE *** return action
5,335,690
def test_rfloordiv(): """Ensures that add works correctly.""" floordiv = _MathExpression() // 2 rfloordiv = 5 // _MathExpression() assert floordiv(5) == rfloordiv(2)
5,335,691
def print_rf_rainday_gt1mm_SGonly_plots(model, dest, optimal_k): """ i.e. taking the values but subtracting the baseline """ rfstarttime = timer(); print(f'{utils.time_now()} - Plotting proba of >1mm rainfall over SG now.\nTotal of {optimal_k} clusters. ') # RFprec_to_ClusterLabels_dataset = utils.open_pickle(model.RFprec_to_ClusterLabels_dataset_path) w_lim_sg = 103.5 e_lim_sg = 104.055 s_lim_sg = 1.1 n_lim_sg = 1.55 # RFprec_to_ClusterLabels_dataset = RFprec_to_ClusterLabels_dataset.sel(lon=slice(w_lim_sg, e_lim_sg),lat=slice(s_lim_sg, n_lim_sg)) fig, gs_rf_plot = create_multisubplot_axes(optimal_k) # rf_ds_lon = RFprec_to_ClusterLabels_dataset.lon # rf_ds_lat = RFprec_to_ClusterLabels_dataset.lat rf_ds_lon = get_RF_calculations(model, 'rf_ds_lon', sgonly=True) rf_ds_lat = get_RF_calculations(model, 'rf_ds_lat', sgonly=True) w = rf_ds_lon.min().values e = rf_ds_lon.max().values s = rf_ds_lat.min().values n = rf_ds_lat.max().values levels = [int(i) for i in np.linspace(25,75,11)] all_colors = np.vstack(plt.cm.RdBu(np.linspace(0,1,21))) terrain_map = colors.LinearSegmentedColormap.from_list('terrain_map', all_colors) fig.suptitle(f'Rainfall predictions, SG-only: {model.domain[0]}S {model.domain[1]}N {model.domain[2]}W {model.domain[3]}E', fontweight='bold') # levels1 = np.linspace(25,75,101) levels1 = np.linspace(0,100,201) # levels2 = np.arange(25, 75.5, 5) levels2 = np.arange(0, 100.5, 5) for clus in range(optimal_k): time.sleep(1); gc.collect() # data = RFprec_to_ClusterLabels_dataset.where(RFprec_to_ClusterLabels_dataset.cluster==clus, drop=True).precipitationCal.values # mean = np.mean(data > 1, axis=0)*100 # mean = mean-baseline mean = get_RF_calculations(model, 'gt1mm', calculation='mean', clus=clus, sgonly=True) time.sleep(1); gc.collect() ax_rf_plot = fig.add_subplot(gs_rf_plot[clus], projection=ccrs.PlateCarree()) ax_rf_plot.xaxis.set_major_formatter(model.lon_formatter) ax_rf_plot.yaxis.set_major_formatter(model.lat_formatter) ax_rf_plot.set_facecolor('w') ax_rf_plot.set_extent([w,e,s,n]) ax_rf_plot.coastlines("50m", linewidth=.7, color='k') ax_rf_plot.add_feature(cf.BORDERS, linewidth=.5, color='k', linestyle='dashed') if clus < model.grid_width: # top ticks ax_rf_plot.set_xticks([np.ceil(w), np.floor(e)], crs=ccrs.PlateCarree()) ax_rf_plot.set_xticklabels([np.ceil(w), np.floor(e)], rotation=55) ax_rf_plot.xaxis.tick_top() else: ax_rf_plot.set_xticks([]) if clus % model.grid_width == model.grid_width - 1: # right-side ticks ax_rf_plot.set_yticks([s,n], crs=ccrs.PlateCarree()) ax_rf_plot.yaxis.set_label_position("right") ax_rf_plot.yaxis.tick_right() else: ax_rf_plot.set_yticks([]) RF = ax_rf_plot.contourf(rf_ds_lon, rf_ds_lat, mean.T, # levels, levels1, cmap=terrain_map, extend='neither') conts = ax_rf_plot.contour(RF, 'y', linewidths=0.02) ax_rf_plot.clabel(conts, # conts.levels, levels2, colors='k', inline=True, fmt='%1.f', fontsize=8) ax_rf_plot.set_title(f"cluster no.{clus+1}", loc='left') time.sleep(1); gc.collect() if clus == model.cbar_pos: # cbar axins_rf = inset_axes(ax_rf_plot, width='100%', height='100%', loc='lower left', bbox_to_anchor=(0, -.8, model.grid_width, .1), bbox_transform=ax_rf_plot.transAxes) cbar_rf = fig.colorbar(RF, cax=axins_rf, label='Proportion of grid with >1 mm rainfall (%)', orientation='horizontal', pad=0.01, # ticks=np.arange(0,100,10) ticks=levels2 ) cbar_rf.ax.xaxis.set_ticks_position('top') cbar_rf.ax.xaxis.set_label_position('top') print(f'\n{utils.time_now()}: {clus}.. '); print(f"\n -- Time taken is {utils.time_since(rfstarttime)}\n") fig.subplots_adjust(wspace=0.05,hspace=0.3) fn = f"{dest}/{model.month_names_joined}_RFplot_rainday_gt1mm_SGonly_v3_{model.gridsize}x{model.gridsize}" fig.savefig(fn, bbox_inches='tight', pad_inches=1) print(f'file saved @:\n{fn}') plt.close('all') # sys.exit()
5,335,692
def get_formatted_reproduction_help(testcase): """Return url to reproduce the bug.""" help_format = get_value_from_job_definition_or_environment( testcase.job_type, 'HELP_FORMAT') if not help_format: return None # Since this value may be in a job definition, it's non-trivial for it to # include newlines. Instead, it will contain backslash-escaped characters # that must be converted here (e.g. \n). help_format = help_format.decode('unicode-escape') arguments = get_arguments(testcase) fuzzer_display = get_fuzzer_display(testcase) fuzzer_name = fuzzer_display.name or 'NA' fuzz_target = fuzzer_display.target or 'NA' engine = fuzzer_display.engine or 'NA' last_tested_crash_revision = str( testcase.get_metadata('last_tested_crash_revision') or testcase.crash_revision) project_name = get_project_name(testcase.job_type) testcase_id = str(testcase.key.id()) sanitizer = environment.get_memory_tool_name(testcase.job_type) sanitizer_options = _get_memory_tool_options(testcase) sanitizer_options_string = ' '.join(sanitizer_options) bazel_test_args = _get_bazel_test_args(arguments, sanitizer_options) result = help_format.replace('%TESTCASE%', testcase_id) result = result.replace('%PROJECT%', project_name) result = result.replace('%REVISION%', last_tested_crash_revision) result = result.replace('%FUZZER_NAME%', fuzzer_name) result = result.replace('%FUZZ_TARGET%', fuzz_target) result = result.replace('%ENGINE%', engine) result = result.replace('%SANITIZER%', sanitizer) result = result.replace('%SANITIZER_OPTIONS%', sanitizer_options_string) result = result.replace('%ARGS%', arguments) result = result.replace('%BAZEL_TEST_ARGS%', bazel_test_args) return result
5,335,693
def specialize_on(names, maxsize=None): """ A decorator that wraps a function, partially evaluating it with the parameters defined by ``names`` (can be a string or an iterable of strings) being fixed. The partially evaluated versions are cached based on the values of these parameters using ``functools.lru_cache`` with the provided ``maxsize`` (consequently, these values should be hashable). """ if isinstance(names, str): names = [names] names_set = set(names) def _specializer(func): signature = inspect.signature(func) if not names_set.issubset(signature.parameters): missing_names = names_set.intersection(signature.parameters) raise ValueError( "The provided function does not have parameters: " + ", ".join(missing_names)) @lru_cache(maxsize=maxsize) def get_pevaled_func(args): return partial_apply(func, **{name:val for name, val in args}) def _wrapper(*args, **kwds): bargs = signature.bind(*args, **kwds) call_arguments = bargs.arguments.copy() for name in list(bargs.arguments): if name not in names_set: del bargs.arguments[name] # automatically changes .args and .kwargs else: del call_arguments[name] cache_args = tuple((name, val) for name, val in bargs.arguments.items()) pevaled_func = get_pevaled_func(cache_args) bargs.arguments = call_arguments # automatically changes .args and .kwargs return pevaled_func(*bargs.args, **bargs.kwargs) return _wrapper return _specializer
5,335,694
def parse_docstring(docstring: str, signature) -> str: """ Parse a docstring! Note: to try notes. Args: docstring: this is the docstring to parse. Raises: OSError: no it doesn't lol. Returns: markdown: the docstring converted to a nice markdown text. """ params = {} exceptions = {} returns = "" lines = docstring.split("\n") new_lines = [] i = 0 while i < len(lines): if lines[i].lower() in ("args:", "arguments:", "params:", "parameters:"): j = i + 1 name = None while j < len(lines) and lines[j].startswith(" "): if lines[j].startswith(" ") and params[name]: params[name] += " " + lines[j].lstrip(" ") else: name, description = lines[j].lstrip(" ").split(":", 1) params[name] = description.lstrip(" ") j += 1 new_lines.append("**Parameters**\n") new_lines.append("| Name | Type | Description |") new_lines.append("| ---- | ---- | ----------- |") for param_name, param_description in params.items(): param_name, param_default, param_type = get_param_info(signature, param_name) # if param_default: # param_default = f"`{param_default}`" new_lines.append(f"| `{param_name}` | `{param_type}` | {param_description} |") new_lines.append("") i = j - 1 elif lines[i].lower() in ("raise:", "raises:", "except:", "exceptions:"): j = i + 1 name = None while j < len(lines) and lines[j].startswith(" "): if lines[j].startswith(" ") and exceptions[name]: exceptions[name] += " " + lines[j].lstrip(" ") else: name, description = lines[j].lstrip(" ").split(":", 1) exceptions[name] = description.lstrip(" ") j += 1 new_lines.append("**Exceptions**\n") new_lines.append("| Type | Description |") new_lines.append("| ---- | ----------- |") for exception_name, exception_description in exceptions.items(): new_lines.append(f"| `{exception_name}` | {exception_description} |") new_lines.append("") i = j - 1 elif lines[i].lower() in ("return:", "returns:"): j = i + 1 while j < len(lines) and lines[j].startswith(" "): description = lines[j].lstrip(" ") returns += " " + description j += 1 new_lines.append("**Returns**\n") new_lines.append("| Type | Description |") new_lines.append("| ---- | ----------- |") new_lines.append(f"| `{get_return_type(signature)}` | {returns} |") new_lines.append("") i = j - 1 elif lines[i].lower() in ADMONITIONS.keys(): j = i + 1 admonition = [] while j < len(lines) and lines[j].startswith(" ") or lines[j] == "": admonition.append(lines[j]) j += 1 new_lines.append(f"!!! {ADMONITIONS[lines[i].lower()]}") new_lines.append("\n".join(admonition)) new_lines.append("") i = j - 1 else: new_lines.append(lines[i]) i += 1 return "\n".join(new_lines)
5,335,695
def main(): """Tool that takes a collection of theory-assertion examples and runs them through a theorem prover. Supported input format 1: Jsonl format with json objects represented as per the `TheoryAssertionRepresentationWithLabel` class. Sample: { "json_class": "TheoryAssertionRepresentation", "theory_statements": [ "1.0::kind('Fiona').", "1.0::rough('Dave').", "1.0::smart('Dave').", "1.0::quiet('Charlie').", "1.0::kind('Dave').", "1.0::white('Erin').", "1.0::young(X) :- white(X).", "1.0::smart(X) :- big(X), green(X).", "1.0::kind(X) :- round(X), smart(X).", "1.0::kind(X) :- quiet(X), round(X).", "1.0::rough(X) :- round(X), red(X)." "1.0::kind(X) :- quiet(X).", "1.0::furry(X) :- quiet(X), big(X)." ], "assertion_statement": "query(1.0::young('Dave').)." } Supported input format 2: Ruletaker's legacy Jsonl format (for AI2's internal use with existing RuleTaker datasets) Sample (there are additional fields not relevant and not shown here): { "id": "AttNoneg-D3-319", ... "triples":{ "triple1": "text":"Bob is cold.", "representation":"(\"Bob\" \"is\" \"cold\" \"+\")" }, "triple2": { "text":"Erin is nice.", "representation":"(\"Erin\" \"is\" \"nice\" \"+\")" }, "triple3":{ "text":"Gary is nice.", "representation":"(\"Gary\" \"is\" \"nice\" \"+\")" }, "triple4":{ "text":"Harry is blue.", "representation":"(\"Harry\" \"is\" \"blue\" \"+\")" } }, "rules":{ "rule1":{ "text":"Blue people are furry.", "representation":"(((\"someone\" \"is\" \"blue\" \"+\")) -> (\"someone\" \"is\" \"furry\" \"+\"))" }, "rule2":{ "text":"Nice people are furry.", "representation":"(((\"someone\" \"is\" \"nice\" \"+\")) -> (\"someone\" \"is\" \"furry\" \"+\"))" }, "rule3":{ "text":"Blue, big people are nice.", "representation":"(((\"someone\" \"is\" \"blue\" \"+\") (\"someone\" \"is\" \"big\" \"+\")) -> (\"someone\" \"is\" \"nice\" \"+\"))" }, "rule4":{ "text":"If someone is cold then they are quiet.", "representation":"(((\"someone\" \"is\" \"cold\" \"+\")) -> (\"someone\" \"is\" \"quiet\" \"+\"))"}, } }, "questions":{ "Q1":{ "question":"Erin is nice.", "answer":true, ... "representation":"(\"Erin\" \"is\" \"nice\" \"+\")" }, "Q2":{ "question":"Gary is not nice.", "answer":false, ... "representation":"(\"Gary\" \"is\" \"nice\" \"-\")" }, "Q3":{ "question":"Gary is furry.", "answer":true, "representation":"(\"Gary\" \"is\" \"furry\" \"+\")" } } } Output jsonl format: Same as above with an additional field "problog_label": <true|false>. """ parser = argparse.ArgumentParser( description="Tool to run theories through a theorem prover." ) parser.add_argument( "--input-file", required=True, help="Input jsonl file in either the current format or the legacy RuleTaker Jsonl format", ) parser.add_argument( "--input-format", choices=["current", "legacy"], default="current", help="Input file format", ) parser.add_argument( "--theorem-prover", default="problog", help="Thorem proving engine to use. Only supported one right now is problog.", ) parser.add_argument( "--output-file", required=True, help="Output file containing the theorem prover's output for each theory-assertion instance input. \ Output format will be the same as input format, so this will be either a CSV or a jsonl file.", ) parser.add_argument( "--report-metrics", action="store_true", help="Flag that will cause metrics (accuracy against gold labels) to be tracked and reported", ) args = parser.parse_args() with open(args.input_file, "r") as ip, open(args.output_file, "w") as op: run_theorem_prover( args.theorem_prover, ip, args.input_format, op, args.report_metrics )
5,335,696
def get_rgeo(coordinates): """Geocode specified coordinates :argument coordinates: address coordinates :type coordinates: tuple :returns tuple """ params = {'language': GEOCODING_LANGUAGE, 'latlng': ','.join([str(crdnt) for crdnt in coordinates])} result = get(url=GEOCODING_URL, params=params) return result, coordinates
5,335,697
def norm_lib_size_log(assay, counts: daskarr) -> daskarr: """ Performs library size normalization and then transforms the values into log scale. Args: assay: An instance of the assay object counts: A dask array with raw counts data Returns: A dask array (delayed matrix) containing normalized data. """ return np.log1p(assay.sf * counts / assay.scalar.reshape(-1, 1))
5,335,698
def checkSymLinks(config): """ Scans Local directory which contain symlinks """ log.info("Scanning directory {} for symlinks") scandir = scanner.Scanner() for file in scandir.scanDirectory(config.LOCAL_DIR): stats = os.path.stat(file, follow_symlink=False):
5,335,699