content
stringlengths
22
815k
id
int64
0
4.91M
def import_code(code, name): """ code can be any object containing code -- string, file object, or compiled code object. Returns a new module object initialized by dynamically importing the given code. If the module has already been imported - then it is returned and not imported a second time. """ # Check if 'code' has already been loaded if (name in config.g_utils_import_dictionary): return config.g_utils_import_dictionary[name] # Load the 'code' into the memory try: module = imp.new_module(name) config.g_utils_import_dictionary[name] = module exec(code, module.__dict__) return module except Exception as e: print("Error={}".format( str(e) )) return None
5,331,200
def two(data: np.ndarray) -> int: """ Use the binary numbers in your diagnostic report to calculate the oxygen generator rating and CO2 scrubber rating, then multiply them together. What is the life support rating of the submarine? (Be sure to represent your answer in decimal, not binary.) """ def loop(most_common: bool) -> int: """ Loop through each bit for both the Oxygen generator rating (True) and CO2 scrubber rating (False). """ n_bits = len(data[0]) rating_list = np.copy(data) for pos in range(n_bits): if len(rating_list) <= 1: break pos_data = rating_list[:, pos] n_0, n_1 = (pos_data == 0).sum(), (pos_data == 1).sum() if most_common: bit = 1 if n_1 >= n_0 else 0 else: bit = 0 if n_1 >= n_0 else 1 rating_list = rating_list[rating_list[:, pos] == bit] return binary_to_int(rating_list[0]) return loop(most_common=True) * loop(most_common=False)
5,331,201
def test_get_mutable_mark_dirty(): """ Ensure that accessing a mutable field type does not mark it dirty if the field has never been set. If the field has been set, ensure that it is set to dirty. """ class MutableTester(XBlock): """Test class with mutable fields.""" list_field = List(default=[]) mutable_test = MutableTester(TestRuntime(services={'field-data': DictFieldData({})}), scope_ids=Mock(spec=ScopeIds)) # Test get/set with a default value. assert len(mutable_test._dirty_fields) == 0 _test_get = mutable_test.list_field assert len(mutable_test._dirty_fields) == 1 mutable_test.list_field = [] assert len(mutable_test._dirty_fields) == 1 # Now test after having explicitly set the field. mutable_test.save() # _dirty_fields shouldn't be cleared here assert len(mutable_test._dirty_fields) == 1 _test_get = mutable_test.list_field assert len(mutable_test._dirty_fields) == 1
5,331,202
def get_lr_fit(sess, model, x_train, y_train, x_test, num_steps=100): """Fit a multi-class logistic regression classifier. Args: x_train: [N, D]. Training data. y_train: [N]. Training label, integer classes. x_test: [M, D]. Test data. Returns: y_pred: [M]. Integer class prediction of test data. """ nbatches = x_train.shape[0] y_pred = np.zeros([x_test.shape[0], x_test.shape[1]]) for ii in six.moves.xrange(nbatches): x_train_ = x_train[ii].reshape([x_train[ii].shape[0], -1]) x_test_ = x_test[ii].reshape([x_test[ii].shape[0], -1]) y_train_ = y_train[ii] # Reinitialize variables for a new episode. var_to_init = list( filter(lambda x: 'LRModel' in x.name, tf.global_variables())) sess.run(tf.variables_initializer(var_to_init)) # Run LR training. for step in six.moves.xrange(num_steps): cost, acc, _ = sess.run( [model.cost, model.acc, model.train_op], feed_dict={ model.inputs: x_train_, model.labels: y_train_ }) y_pred[ii] = np.argmax( sess.run(model.prediction, feed_dict={ model.inputs: x_test_ }), axis=-1) return y_pred
5,331,203
def make_block_trials(ntrials_block): """Creates a matrix of pseudo-random balanced trial parameters for a block of trials. Parameters ---------- ntrials_block : int Number of trials in the block. Returns ------- block : 2d array Matrix of trial parameters (this is NOT random). order : 1d array Randomized order to run the trials in. """ ## CREATE VECTORS OF TRIAL PARAMETER SETTINGS FOR A BLOCK OF TRIALS # FOR EXAMPLE: COND_VEC = NP.APPEND(NP.ZEROS(NTRIAL_BLOCK/2), NP.ONES(NTRIAL_BLOCK/2)) # ^ CREATES A VECTOR TO HAVE 50% OF EACH OF TWO TRIAL CONDITIONS # Collect run details into block object block = Block() # ADD BLOCK RUN # EXAMPLE: block.CONDITION = COND_VEC # Set up array for run order order = range(0, len(ntrials_block)) random.shuffle(order) return block, order
5,331,204
def jitter(opts, imax, ibad, mesh=None): """ JITTER call JIGSAW iteratively; try to improve topology. """ if (not isinstance(opts, jigsaw_jig_t)): raise Exception("Incorrect type: OPTS.") if (mesh is not None and not isinstance(mesh, jigsaw_msh_t)): raise Exception("Incorrect type: MESH.") if (mesh is None): mesh = jigsaw_msh_t() #--------- call JIGSAW iteratively; try to improve topology. OPTS = copy.deepcopy(opts) best = metric(mesh); next = mesh; done = False for iter in range(imax): if (next.point is not None and next.point.size != +0): nvrt = next.point.size keep = np.full( (nvrt), True, dtype=bool) #------------------------------ setup initial conditions path = Path(opts.mesh_file).parent name = Path(opts.mesh_file).stem fext = Path(opts.mesh_file).suffix name = str(name) fext = str(fext) name = name + "-INIT" + fext OPTS.init_file = str(path / name) if (next.tria3 is not None and next.tria3.size != +0): #------------------------------ mark any irregular nodes vdeg = trideg2( next.point["coord"], next.tria3["index"]) ierr = np.abs(vdeg - 6) # err in topo. deg. ierr[vdeg > 6] = ierr[vdeg > 6] * 2 ierr = ierr[next.tria3["index"]] M = np.sum(ierr, axis=1) >= ibad keep[next.tria3["index"][M, :]] = False if (next.edge2 is not None and next.edge2.size != +0): keep[next.edge2["index"][:, :]] = False if (np.count_nonzero(keep) <= +8): #------------------------------ don't delete everything! keep = np.full( (nvrt), True, dtype=bool) done = np.all(keep) #------------------------------ keep nodes far from seam init = jigsaw_msh_t() init.point = next.point[keep] savemsh(OPTS.init_file, init, OPTS.mesh_tags) #------------------------------ call JIGSAW with new ICs jigsaw (OPTS, next) # noqa cost = metric(next) if (cost > best): #------------------------------ keep "best" mesh so far! mesh = copy.deepcopy(next) best = cost if (done): return return
5,331,205
def view_user(user_id: int): """Return the given user's history.""" return render_user(manager.get_user_by_id(user_id))
5,331,206
def first(c) -> col: """ In contrast to pyspark.sql.functions.first this function uses column name as alias without prefixing it with the aggregation function name. """ if isinstance(c, str): return F.first(c).alias(c) columnName = c._jc.toString() return F.first(c).alias(columnName)
5,331,207
def translate(root_list, use_bag_semantics=False): """ Translate a list of relational algebra trees into SQL statements. :param root_list: a list of tree roots :param use_bag_semantics: flag for using relational algebra bag semantics :return: a list of SQL statements """ translator = (Translator() if use_bag_semantics else SetTranslator()) return [translator.translate(root).to_sql() for root in root_list]
5,331,208
def replace_aliases(record): """ Replace all aliases associated with this DID / GUID """ # we set force=True so that if MIME type of request is not application/JSON, # get_json will still throw a UserError. aliases_json = flask.request.get_json(force=True) try: jsonschema.validate(aliases_json, RECORD_ALIAS_SCHEMA) except jsonschema.ValidationError as err: logger.warning(f"Bad request body:\n{err}") raise UserError(err) aliases = [record["value"] for record in aliases_json["aliases"]] # authorization and error handling done in driver blueprint.index_driver.replace_aliases_for_did(aliases, record) aliases_payload = {"aliases": [{"value": alias} for alias in aliases]} return flask.jsonify(aliases_payload), 200
5,331,209
def run_with_output(*args, **kwargs): """Run the main multiprocessing function while saving stdout and/or stderr.""" # Get variables target = kwargs.pop('LP_TARGET_FUNC') # Raise Error. Do not use this if a target was not given out_queue = kwargs.pop('LP_STDOUT_QUEUE', None) err_queue = kwargs.pop('LP_STDERR_QUEUE', None) # Save output if out_queue: sys.stdout = out_queue if err_queue: sys.stderr = err_queue # Run the function try: target(*args, **kwargs) finally: # Reset output if out_queue is not None: out_queue.put_sentinel() # Sentinel to detect when finished sys.stdout = sys.__stdout__ if err_queue is not None: err_queue.put_sentinel() # Sentinel to detect when finished sys.stderr = sys.__stderr__
5,331,210
def pos_tag(docs, language=None, tagger_instance=None, doc_meta_key=None): """ Apply Part-of-Speech (POS) tagging to list of documents `docs`. Either load a tagger based on supplied `language` or use the tagger instance `tagger` which must have a method ``tag()``. A tagger can be loaded via :func:`~tmtoolkit.preprocess.load_pos_tagger_for_language`. POS tagging so far only works for English and German. The English tagger uses the Penn Treebank tagset (https://ling.upenn.edu/courses/Fall_2003/ling001/penn_treebank_pos.html), the German tagger uses STTS (http://www.ims.uni-stuttgart.de/forschung/ressourcen/lexika/TagSets/stts-table.html). :param docs: list of tokenized documents :param language: the language for the POS tagger (currently only "english" and "german" are supported) if no `tagger` is given :param tagger_instance: a tagger instance to use for tagging if no `language` is given :param doc_meta_key: if this is not None, it must be a string that specifies the key that is used for the resulting dicts :return: if `doc_meta_key` is None, return a list of N lists, where N is the number of documents; each of these lists contains the POS tags for the respective tokens from `docs`, hence each POS list has the same length as the respective token list of the corresponding document; if `doc_meta_key` is not None, the result list contains dicts with the only key `doc_meta_key` that maps to the list of POS tags for the corresponding document """ require_listlike(docs) if tagger_instance is None: tagger_instance, _ = load_pos_tagger_for_language(language or defaults.language) docs_meta = [] for dtok in docs: if len(dtok) > 0: tokens_and_tags = tagger_instance.tag(dtok) tags = list(list(zip(*tokens_and_tags))[1]) else: tags = [] if doc_meta_key: docs_meta.append({doc_meta_key: tags}) else: docs_meta.append(tags) return docs_meta
5,331,211
def NOBE_GA_SH(G,K,topk): """detect SH spanners via NOBE-GA[1]. Parameters ---------- G : easygraph.Graph An unweighted and undirected graph. K : int Embedding dimension k topk : int top - k structural hole spanners Returns ------- SHS : list The top-k structural hole spanners. Examples -------- >>> NOBE_GA_SH(G,K=8,topk=5) References ---------- .. [1] https://www.researchgate.net/publication/325004496_On_Spectral_Graph_Embedding_A_Non-Backtracking_Perspective_and_Graph_Approximation """ Y=eg.NOBE_GA(G,K) if(isinstance(Y[0,0],complex)): Y = abs(Y) kmeans = KMeans(n_clusters=K, random_state=0).fit(Y) com={} cluster={} a=0 for i in G.nodes: com[i]=kmeans.labels_[a] a+=1 for i in com: if com[i] in cluster: cluster[com[i]].append(i) else: cluster[com[i]]=[] cluster[com[i]].append(i) vector={} a=0 for i in G.nodes: vector[i]=Y[a] a+=1 rds=RDS(com,cluster,vector,K) rds_sort=sorted(rds.items(), key=lambda d: d[1],reverse=True) SHS=list() a=0 for i in rds_sort: SHS.append(i[0]) a+=1 if a==topk: break return SHS
5,331,212
def calculate_correct_answers(model, dataloader, epoch): """Calculate correct over total answers""" forward_backward_func = get_forward_backward_func() for m in model: m.eval() def loss_func(labels, output_tensor): logits = output_tensor loss_dict = {} # Compute the correct answers. predicted = torch.argmax(logits, dim=-1) corrects = (predicted == labels).float() # Add to the counters. loss_dict['total'] = labels.size(0) loss_dict['correct'] = corrects.sum().item() return 0, loss_dict #defined inside to capture output_predictions def correct_answers_forward_step(batch, model): try: batch_ = next(batch) except BaseException: batch_ = batch images, labels = process_batch(batch_) # Forward model. output_tensor = model(images) return output_tensor, partial(loss_func, labels) with torch.no_grad(): # For all the batches in the dataset. total = 0 correct = 0 for _, batch in enumerate(dataloader): loss_dicts = forward_backward_func(correct_answers_forward_step, batch, model, optimizer=None, timers=None, forward_only=True) for loss_dict in loss_dicts: total += loss_dict['total'] correct += loss_dict['correct'] for m in model: m.train() # Reduce. if mpu.is_pipeline_last_stage(): unreduced = torch.cuda.LongTensor([correct, total]) torch.distributed.all_reduce(unreduced, group=mpu.get_data_parallel_group()) # Print on screen. correct_ans = unreduced[0].item() total_count = unreduced[1].item() return correct_ans, total_count
5,331,213
def set_template_parameters( template: Template, template_metadata: TemplateMetadata, input_parameters: Dict[str, str], interactive=False ): """Set and verify template parameters' values in the template_metadata.""" if interactive and not communication.has_prompt(): raise errors.ParameterError("Cannot use interactive mode with no prompt") def validate(var: TemplateParameter, val) -> Tuple[bool, Any]: try: return True, var.convert(val) except ValueError as e: communication.info(str(e)) return False, val def read_valid_value(var: TemplateParameter, default_value=None): """Prompt the user for a template variable and return a valid value.""" while True: variable_type = f", type: {var.type}" if var.type else "" enum_values = f", options: {var.possible_values}" if var.possible_values else "" default_value = default_value or to_string(var.default) val = communication.prompt( f"Enter a value for '{var.name}' ({var.description}{variable_type}{enum_values})", default=default_value, show_default=var.has_default, ) valid, val = validate(var, val) if valid: return val missing_values = [] for parameter in sorted(template.parameters, key=lambda v: v.name): name = parameter.name is_valid = True if name in input_parameters: # NOTE: Inputs override other values. No prompt for them in interactive mode is_valid, value = validate(parameter, input_parameters[name]) elif interactive: value = read_valid_value(parameter, default_value=template_metadata.metadata.get(name)) elif name in template_metadata.metadata: is_valid, value = validate(parameter, template_metadata.metadata[name]) elif parameter.has_default: # Use default value if no value is available in the metadata value = parameter.default elif communication.has_prompt(): value = read_valid_value(parameter) else: missing_values.append(name) continue if not is_valid: if not communication.has_prompt(): raise errors.TemplateUpdateError(f"Invalid value '{value}' for variable '{name}'") template_metadata.metadata[name] = read_valid_value(parameter) else: template_metadata.metadata[name] = value if missing_values: missing_values_str = ", ".join(missing_values) raise errors.TemplateUpdateError(f"Can't update template, it now requires variable(s): {missing_values_str}") # NOTE: Ignore internal variables, i.e. __\w__ internal_keys = re.compile(r"^__\w+__$") metadata_variables = {v for v in template_metadata.metadata if not internal_keys.match(v)} | set( input_parameters.keys() ) template_variables = {v.name for v in template.parameters} unused_metadata_variables = metadata_variables - template_variables if len(unused_metadata_variables) > 0: unused_str = "\n\t".join(unused_metadata_variables) communication.info(f"These parameters are not used by the template and were ignored:\n\t{unused_str}\n")
5,331,214
def testRead(): """ Tests exception raising for invalid property indices and names on the read side. """ a = alembic.Abc.IArchive("testPropException.abc") t = a.getTop() props = t.children[0].getProperties() p = props.getProperty("myprop") assert p.getName() == "myprop" try: found = props.getProperty("notfound") except KeyError, e: found = False assert found == False try: found = props.getProperty(99) except IndexError, e: found = False assert found == False
5,331,215
def calc_roll_pitch_yaw(yag, zag, yag_obs, zag_obs, sigma=None): """Calc S/C delta roll, pitch, and yaw for observed star positions relative to reference. This function computes a S/C delta roll/pitch/yaw that transforms the reference star positions yag/zag into the observed positions yag_obs/zag_obs. The units for these values must be in arcsec. The ``yag`` and ``zag`` values correspond to the reference star catalog positions. These must be a 1-d list or array of length M (number of stars). The ``yag_obs`` and ``zag_obs`` values must be either a 1-d or 2-d array with shape M (single readout of M stars) or shape N x M (N rows of M stars). The ``sigma`` parameter can be None or a 1-d array of length M. The algorithm is a simple but fast linear least-squared solution which uses a small angle assumption to linearize the rotation matrix from [[cos(th) -sin(th)], [sin(th), cos(th)]] to [[1, -th], [th, 1]]. In practice anything below 1.0 degree is fine. :param yag: reference yag (list or array, arcsec) :param zag: reference zag (list or array, arcsec) :param yag_obs: observed yag (list or array, arcsec) :param zag_obs: observed zag (list or array, arcsec) :param sigma: centroid uncertainties (None or list or array, arcsec) :returns: roll, pitch, yaw (degrees) """ yag = np.array(yag) zag = np.array(zag) yag_obs = np.array(yag_obs) zag_obs = np.array(zag_obs) if yag.ndim != 1 or zag.ndim != 1 or yag.shape != zag.shape: raise ValueError('yag and zag must be 1-d and equal length') if (yag_obs.ndim not in (1, 2) or zag.ndim not in (1, 2) or yag_obs.shape != zag_obs.shape): raise ValueError('yag_obs and zag_obs must be 1-d or 2-d and equal shape') n_stars = len(yag) if yag_obs.shape[-1] != n_stars or zag.shape[-1] != n_stars: raise ValueError('inconsistent number of stars in yag_obs or zag_obs') one_d = yag_obs.ndim == 1 if one_d: yag_obs.shape = 1, n_stars zag_obs.shape = 1, n_stars outs = [] for yo, zo in zip(yag_obs, zag_obs): out = _calc_roll_pitch_yaw(yag, zag, yo, zo, sigma=sigma) outs.append(out) if one_d: roll, pitch, yaw = outs[0] else: vals = np.array(outs) roll, pitch, yaw = vals[:, 0], vals[:, 1], vals[:, 2] return roll, pitch, yaw
5,331,216
def make_query_abs(db, table, start_dt, end_dt, dscfg, mode, no_part=False, cols=None): """절대 시간으로 질의를 만듦. Args: db (str): DB명 table (str): table명 start_dt (date): 시작일 end_dt (date): 종료일 dscfg (ConfigParser): 데이터 스크립트 설정 mode: 쿼리 모드 ('count' - 행 수 구하기, 'preview' - 프리뷰) no_part: 테이블에 파티션이 없음. 기본 False cols: 명시적 선택 컬럼 """ assert type(start_dt) is date and type(end_dt) is date start_dt = start_dt.strftime('%Y%m%d') end_dt = end_dt.strftime('%Y%m%d') return _make_query(db, table, start_dt, end_dt, dscfg, mode, no_part, cols)
5,331,217
def tgl_forward_backward( emp_cov, alpha=0.01, beta=1.0, max_iter=100, n_samples=None, verbose=False, tol=1e-4, delta=1e-4, gamma=1.0, lamda=1.0, eps=0.5, debug=False, return_history=False, return_n_iter=True, choose="gamma", lamda_criterion="b", time_norm=1, compute_objective=True, return_n_linesearch=False, vareps=1e-5, stop_at=None, stop_when=1e-4, laplacian_penalty=False, init="empirical", ): """Time-varying graphical lasso solver with forward-backward splitting. Solves the following problem via FBS: min sum_{i=1}^T -n_i log_likelihood(S_i, K_i) + alpha*||K_i||_{od,1} + beta sum_{i=2}^T Psi(K_i - K_{i-1}) where S_i = (1/n_i) X_i^T \times X_i is the empirical covariance of data matrix X (training observations by features). Parameters ---------- emp_cov : ndarray, shape (n_times, n_features, n_features) Empirical covariance of data. alpha, beta : float, optional Regularisation parameters. max_iter : int, optional Maximum number of iterations. n_samples : ndarray Number of samples available for each time point. verbose : bool, default False Print info at each iteration. tol : float, optional Absolute tolerance for convergence. delta, gamma, lamda, eps : float, optional FBS parameters. debug : bool, default False Run in debug mode. return_history : bool, optional Return the history of computed values. return_n_iter : bool, optional Return the number of iteration before convergence. choose : ('gamma', 'lambda', 'fixed', 'both) Search iteratively gamma / lambda / none / both. lamda_criterion : ('a', 'b', 'c') Criterion to choose lamda. See ref for details. time_norm : float, optional Choose the temporal norm between points. compute_objective : bool, default True Choose to compute the objective value. return_n_linesearch : bool, optional Return the number of line-search iterations before convergence. vareps : float, optional Jitter for the loss. stop_at, stop_when : float, optional Other convergence criteria, as used in the paper. laplacian_penalty : bool, default False Use Laplacian penalty. init : {'empirical', 'zero', ndarray} Choose how to initialize the precision matrix, with the inverse empirical covariance, zero matrix or precomputed. Returns ------- K, covariance : numpy.array, 3-dimensional (T x d x d) Solution to the problem for each time t=1...T . history : list If return_history, then also a structure that contains the objective value, the primal and dual residual norms, and tolerances for the primal and dual residual norms at each iteration. """ available_choose = ("gamma", "lamda", "fixed", "both") if choose not in available_choose: raise ValueError("`choose` parameter must be one of %s." % available_choose) n_times, _, n_features = emp_cov.shape K = init_precision(emp_cov, mode=init) if laplacian_penalty: obj_partial = partial( objective_laplacian, n_samples=n_samples, emp_cov=emp_cov, alpha=alpha, beta=beta, vareps=vareps ) function_f = partial(loss_laplacian, beta=beta, n_samples=n_samples, S=emp_cov, vareps=vareps) gradient_f = partial(grad_loss_laplacian, emp_cov=emp_cov, beta=beta, n_samples=n_samples, vareps=vareps) function_g = partial(penalty_laplacian, alpha=alpha) else: psi = partial(vector_p_norm, p=time_norm) obj_partial = partial( objective, n_samples=n_samples, emp_cov=emp_cov, alpha=alpha, beta=beta, psi=psi, vareps=vareps ) function_f = partial(loss, n_samples=n_samples, S=emp_cov, vareps=vareps) gradient_f = partial(grad_loss, emp_cov=emp_cov, n_samples=n_samples, vareps=vareps) function_g = partial(penalty, alpha=alpha, beta=beta, psi=psi) max_residual = -np.inf n_linesearch = 0 checks = [convergence(obj=obj_partial(precision=K))] for iteration_ in range(max_iter): k_previous = K.copy() x_inv = np.array([linalg.pinvh(x) for x in K]) grad = gradient_f(K, x_inv=x_inv) if choose in ["gamma", "both"]: gamma, y = choose_gamma( gamma / eps if iteration_ > 0 else gamma, K, function_f=function_f, beta=beta, alpha=alpha, lamda=lamda, grad=grad, delta=delta, eps=eps, max_iter=200, p=time_norm, x_inv=x_inv, choose=choose, laplacian_penalty=laplacian_penalty, ) x_hat = K - gamma * grad if choose not in ["gamma", "both"]: if laplacian_penalty: y = soft_thresholding_od(x_hat, alpha * gamma) else: y = prox_FL(x_hat, beta * gamma, alpha * gamma, p=time_norm, symmetric=True) if choose in ("lamda", "both"): lamda, n_ls = choose_lamda( min(lamda / eps if iteration_ > 0 else lamda, 1), K, function_f=function_f, objective_f=obj_partial, gradient_f=gradient_f, function_g=function_g, gamma=gamma, delta=delta, eps=eps, criterion=lamda_criterion, max_iter=200, p=time_norm, grad=grad, prox=y, vareps=vareps, ) n_linesearch += n_ls K = K + min(max(lamda, 0), 1) * (y - K) # K, t = fista_step(Y, Y - Y_old, t) check = convergence( obj=obj_partial(precision=K), rnorm=np.linalg.norm(upper_diag_3d(K) - upper_diag_3d(k_previous)), snorm=np.linalg.norm(obj_partial(precision=K) - obj_partial(precision=k_previous)), e_pri=np.sqrt(upper_diag_3d(K).size) * tol + tol * max(np.linalg.norm(upper_diag_3d(K)), np.linalg.norm(upper_diag_3d(k_previous))), e_dual=tol, ) if verbose and iteration_ % (50 if verbose < 2 else 1) == 0: print("obj: %.4f, rnorm: %.7f, snorm: %.4f," "eps_pri: %.4f, eps_dual: %.4f" % check[:5]) if return_history: checks.append(check) if np.isnan(check.rnorm) or np.isnan(check.snorm): warnings.warn("precision is not positive definite.") if stop_at is not None: if abs(check.obj - stop_at) / abs(stop_at) < stop_when: break else: # use this convergence criterion subgrad = (x_hat - K) / gamma if 0: if laplacian_penalty: grad = grad_loss_laplacian(K, emp_cov, n_samples, vareps=vareps) else: grad = grad_loss(K, emp_cov, n_samples, vareps=vareps) res_norm = np.linalg.norm(grad + subgrad) if iteration_ == 0: normalizer = res_norm + 1e-6 max_residual = max(np.linalg.norm(grad), np.linalg.norm(subgrad)) + 1e-6 else: res_norm = np.linalg.norm(K - k_previous) / gamma max_residual = max(max_residual, res_norm) normalizer = max(np.linalg.norm(grad), np.linalg.norm(subgrad)) + 1e-6 r_rel = res_norm / max_residual r_norm = res_norm / normalizer if not debug and (r_rel <= tol or r_norm <= tol) and iteration_ > 0: # or ( # check.rnorm <= check.e_pri and iteration_ > 0): break else: warnings.warn("Objective did not converge.") covariance_ = np.array([linalg.pinvh(k) for k in K]) return_list = [K, covariance_] if return_history: return_list.append(checks) if return_n_iter: return_list.append(iteration_ + 1) if return_n_linesearch: return_list.append(n_linesearch) return return_list
5,331,218
def validate_task_rel_proposal(header, propose, rel_address, state): """Validates that the User exists, the Task exists, and the User is not in the Task's relationship specified by rel_address. Args: header (TransactionHeader): The transaction header. propose (ProposeAddTask_____): The Task relationship proposal. rel_address (str): The Task relationship address produced by the Task and the User. state (sawtooth_sdk.Context): The way to communicate to the validator the state gets and sets. Returns: (dict of addresses) """ user_address = addresser.make_user_address(propose.user_id) task_address = addresser.make_task_attributes_address(propose.task_id) proposal_address = addresser.make_proposal_address( object_id=propose.task_id, related_id=propose.user_id) state_entries = get_state(state, [user_address, task_address, proposal_address, rel_address]) validate_identifier_is_user(state_entries, identifier=propose.user_id, address=user_address) user_entry = get_state_entry(state_entries, user_address) user = get_user_from_container( return_user_container(user_entry), propose.user_id) if header.signer_public_key not in [user.user_id, user.manager_id]: raise InvalidTransaction( "Txn signer {} is not the user or the user's " "manager {}".format(header.signer_public_key, [user.user_id, user.manager_id])) validate_identifier_is_task(state_entries, identifier=propose.task_id, address=task_address) try: task_admins_entry = get_state_entry(state_entries, rel_address) task_rel_container = return_task_rel_container(task_admins_entry) if is_in_task_rel_container( task_rel_container, propose.task_id, propose.user_id): raise InvalidTransaction( "User {} is already in the Role {} " "relationship".format(propose.user_id, propose.task_id)) except KeyError: # The task rel container doesn't exist so no task relationship exists pass return state_entries
5,331,219
def error_handler(update: Update, context: CallbackContext): """Log the error and send a telegram message to notify the developer.""" # Log the error before we do anything else, so we can see it even if something breaks. logger.error(msg="Exception while handling an update:", exc_info=context.error) # traceback.format_exception returns the usual python message about an exception, but as a # list of strings rather than a single string, so we have to join them together. tb_list = traceback.format_exception(None, context.error, context.error.__traceback__) tb = ''.join(tb_list) # Build the message with some markup and additional information about what happened. # You might need to add some logic to deal with messages longer than the 4096 character limit. message = ( 'An exception was raised while handling an update\n' '<pre>update = {}</pre>\n\n' '<pre>context.chat_data = {}</pre>\n\n' '<pre>context.user_data = {}</pre>\n\n' '<pre>{}</pre>' ).format( html.escape(json.dumps(update.to_dict(), indent=2, ensure_ascii=False)), html.escape(str(context.chat_data)), html.escape(str(context.user_data)), html.escape(tb) ) # Finally, send the message context.bot.send_message(chat_id=DEVELOPER_CHAT_ID, text=message, parse_mode=ParseMode.HTML)
5,331,220
def all_cells_run(event_str: str, expected_count: int) -> bool: """Wait for an event signalling all cells have run. `execution_count` should equal number of nonempty cells. """ try: event = json.loads(event_str) msg_type = event["msg_type"] content = event["content"] execution_count = content["execution_count"] status = content["status"] except (TypeError, KeyError): return False return all( ( msg_type == "execute_reply", execution_count == expected_count, status == "ok", ) )
5,331,221
def to_forecasting( timeseries: np.ndarray, forecast: int = 1, axis: Union[int, float] = 0, test_size: int = None, ): """Split a timeseries for forecasting tasks. Transform a timeseries :math:`X` into a series of input values :math:`X_t` and a series of output values :math:`X_{t+\\mathrm{forecast}}`. It is also possible to split the timeseries between training timesteps and testing timesteps. Parameters ---------- timeseries : np.ndarray Timeseries to split. forecast : int, optional Number of time lag steps between the timeseries :math:`X_t` and the timeseries :math:`X_{t+\\mathrm{forecast}}`, by default 1, i.e. returns two timeseries with a time difference of 1 timesteps. axis : int, optional Time axis of the timeseries, by default 0 test_size : int or float, optional If set, will also split the timeseries into a training phase and a testing phase of ``test_size`` timesteps. Can also be specified as a float ratio, by default None Returns ------- tuple of numpy.ndarray :math:`X_t` and :math:`X_{t+\\mathrm{forecast}}`. If ``test_size`` is specified, will return: :math:`X_t`, :math:`X_t^{test}`, :math:`X_{t+\\mathrm{forecast}}`, :math:`X_{t+\\mathrm{forecast}}^{test}`. The size of the returned timeseries is therefore the size of :math:`X` minus the forecasting length ``forecast``. Raises ------ ValueError If ``test_size`` is a float, it must be in [0, 1[. """ series_ = np.moveaxis(timeseries.view(), axis, 0) time_len = series_.shape[0] if test_size is not None: if isinstance(test_size, float) and test_size < 1 and test_size >= 0: test_len = round(time_len * test_size) elif isinstance(test_size, int): test_len = test_size else: raise ValueError( "invalid test_size argument: " "test_size can be an integer or a float " f"in [0, 1[, but is {test_size}." ) else: test_len = 0 X = series_[:-forecast] y = series_[forecast:] if test_len > 0: X_t = X[-test_len:] y_t = y[-test_len:] X = X[:-test_len] y = y[:-test_len] X = np.moveaxis(X, 0, axis) X_t = np.moveaxis(X_t, 0, axis) y = np.moveaxis(y, 0, axis) y_t = np.moveaxis(y_t, 0, axis) return X, X_t, y, y_t return np.moveaxis(X, 0, axis), np.moveaxis(y, 0, axis)
5,331,222
def build_task_environment() -> dm_env.Environment: """Returns the environment.""" # We first build the base task that contains the simulation model as well # as all the initialization logic, the sensors and the effectors. task, components = task_builder.build_task() del components env_builder = subtask_env_builder.SubtaskEnvBuilder() env_builder.set_task(task) # Build a composer environment. task_env = env_builder.build_base_env() # Define the action space. This defines what action spec is exposed to the # agent along with how to project the action received by the agent to the one # exposed by the composer environment. Here the action space is a collection # of actions spaces, one for the arm and one for the gripper. parent_action_spec = task.effectors_action_spec(physics=task_env.physics) robot_action_spaces = [] for rbt in task.robots: # Joint space control of each individual robot. joint_action_space = action_spaces.ArmJointActionSpace( af.prefix_slicer(parent_action_spec, rbt.arm_effector.prefix)) gripper_action_space = action_spaces.GripperActionSpace( af.prefix_slicer(parent_action_spec, rbt.gripper_effector.prefix)) # Gripper isn't controlled by the agent for this task. gripper_action_space = af.FixedActionSpace( gripper_action_space, gripper_action_space.spec().minimum) robot_action_spaces.extend([joint_action_space, gripper_action_space]) env_builder.set_action_space( af.CompositeActionSpace(robot_action_spaces)) # We add a preprocessor that casts all the observations to float32 env_builder.add_preprocessor(observation_transforms.CastPreprocessor()) env_builder.add_preprocessor( rewards.L2Reward(obs0='robot0_tcp_pos', obs1='robot1_tcp_pos')) # End episodes after 100 steps. env_builder.add_preprocessor(subtask_termination.MaxStepsTermination(100)) return env_builder.build()
5,331,223
def searchInsert(nums, target): """ :type nums: List[int] :type target: int :rtype: int """ try: return nums.index(target) except ValueError: nums.append(target) nums.sort() return nums.index(target)
5,331,224
def drawButton(): """绘制按钮""" pygame.draw.rect(DISPLAYSURF, WHITE, RECT1) pygame.draw.rect(DISPLAYSURF, WHITE, RECT2) pygame.draw.rect(DISPLAYSURF, WHITE, RECT3) pygame.draw.rect(DISPLAYSURF, WHITE, RECT4)
5,331,225
def initialize(slave_address=DEFAULT_SLAVE_ADDRESS, i2c_bus=DEFAULT_I2C_BUS): """ :param slave_address: 8-bit I2C slave address. For DPP2607, should be 0x34 or 0x36. :param i2c_bus: I2C bus number, for Linux only. """ global _i2c, _slave_address if sys.platform == 'win32': import devasys _i2c = devasys.DeVaSys(slave_address) _i2c.open() else: import linuxi2c _i2c = linuxi2c.LinuxI2C(i2c_bus, slave_address) _i2c.open() _slave_address = slave_address
5,331,226
def resource(author, tag) -> Resource: """Resource fixture""" return Resource( name="Sentiment Algorithm", url="https://raw.githubusercontent.com/MarcSkovMadsen/awesome-streamlit/master/src/pages/gallery/contributions/marc_skov_madsen/sentiment_analyzer/sentiment_analyzer.py", is_awesome=True, tags=[tag], author=author, )
5,331,227
def tick(curtime=''): """ Acts as a clock, changing the label as time goes up """ newtime = time.strftime('%H:%M:%S') if newtime != curtime: curtime = newtime clockLabel.config(text=curtime) clockLabel.after(200, tick, curtime)
5,331,228
def FilterBlueScreen(): """ Does something @rtype: """
5,331,229
def del_list(request, list_id: int, list_slug: str) -> HttpResponse: """Delete an entire list. Danger Will Robinson! Only staff members should be allowed to access this view. """ task_list = get_object_or_404(TaskList, slug=list_slug) # Ensure user has permission to delete list. Admins can delete all lists. # Get the group this list belongs to, and check whether current user is a member of that group. # FIXME: This means any group member can delete lists, which is probably too permissive. if task_list.group not in request.user.groups.all() and not request.user.is_staff: raise PermissionDenied if request.method == 'POST': TaskList.objects.get(id=task_list.id).delete() messages.success(request, "{list_name} is gone.".format(list_name=task_list.name)) return redirect('todo:lists') else: task_count_done = Task.objects.filter(task_list=task_list.id, completed=True).count() task_count_undone = Task.objects.filter(task_list=task_list.id, completed=False).count() task_count_total = Task.objects.filter(task_list=task_list.id).count() context = { "task_list": task_list, "task_count_done": task_count_done, "task_count_undone": task_count_undone, "task_count_total": task_count_total, } return render(request, 'todo/del_list.html', context)
5,331,230
def new_req(to_id, from_who): # создание нового запроса """ :param to_id: to user id :param from_who: from user id :return: создание нового запроса """ global connect global cursor cursor.execute("INSERT INTO Requests VALUES ({0},'{1}',0)".format(to_id, from_who)) connect.commit() get_request(to_id)
5,331,231
def model_softmax(input_data=None, output_targets=None, num_words=3000, num_units=128, num_layers=2, num_tags=5, batchsize=1, train=True ): """ :param input_data: :param output_targets: :param num_words: :param num_units: :param num_layers: :param num_tags:标签数量 :param batchsize: :param train: 训练还是预测 :return: """ tensors = {} with tf.name_scope('embedding'): w = tf.Variable(tf.random_uniform([num_words, num_units], -1.0, 1.0), name="W") # 词向量shape [?,?,num_units] inputs = tf.nn.embedding_lookup(w, input_data) with tf.name_scope('lstm'): lstmcell = tf.nn.rnn_cell.BasicLSTMCell cell_list = [lstmcell(num_units, state_is_tuple=True) for i in range(num_layers)] cell_mul = tf.nn.rnn_cell.MultiRNNCell(cell_list, state_is_tuple=True) initial_state = cell_mul.zero_state(batch_size=batchsize, dtype=tf.float32) # 序列输出shape [?,?,num_units] outputs, last_state = tf.nn.dynamic_rnn(cell_mul, inputs, initial_state=initial_state) with tf.name_scope('softmax'): output = tf.reshape(outputs, [-1, num_units]) weights = tf.Variable(tf.truncated_normal([num_units, num_tags])) bias = tf.Variable(tf.zeros(shape=[num_tags])) logits = tf.nn.bias_add(tf.matmul(output, weights), bias=bias) prediction = tf.reshape(tf.argmax(logits, axis=1, output_type=tf.int32), shape=[batchsize, -1]) # 训练的时候计算loss,target用独热编码;生成的时候只需要计算logits if train: with tf.name_scope('loss'): labels = tf.reshape(output_targets, [-1]) loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits) total_loss = tf.reduce_mean(loss) accu = tf.reduce_mean(tf.cast(tf.equal(output_targets, prediction), dtype=tf.float32)) train_op = tf.train.AdamOptimizer(learning_rate=0.01).minimize(loss) tensors['initial_state'] = initial_state tensors['output'] = output tensors['last_state'] = last_state tensors['train_op'] = train_op tensors['prediction'] = prediction tensors['loss'] = total_loss tensors['accu'] = accu else: # 和CRF的输出保持统一 tensors['prediction'] = prediction return tensors
5,331,232
def get_metric_monthly_rating(metric: AnyStr, tenant_id: AnyStr, namespaces: List[AnyStr]) -> List[Dict]: """ Get the monthly price for a metric. :metric (AnyStr) A string representing the metric. :tenant_id (AnyStr) A string representing the tenant, only used by decorators. :namespaces (List[AnyStr]) A list of namespaces accessible by the tenant. Return the results of the query as a list of dictionary. """ qry = sa.text(""" SELECT max(frame_price) * 24 * (SELECT extract(days FROM date_trunc('month', now()) + interval '1 month - 1 day')) AS frame_price FROM frames WHERE metric = :metric AND frame_begin >= date_trunc('month', now()) AND namespace IN :namespaces """).bindparams(bindparam('namespaces', expanding=True)) params = { 'metric': metric, 'tenant_id': tenant_id, 'namespaces': namespaces } return process_query(qry, params)
5,331,233
def masked_softmax_cross_entropy(preds, labels, mask): """Softmax cross-entropy loss with masking.""" loss = tf.nn.softmax_cross_entropy_with_logits(logits=preds, labels=labels) mask = tf.cast(mask, dtype=tf.float32) mask /= tf.reduce_mean(mask) loss *= tf.transpose(mask) return tf.reduce_mean(tf.transpose(loss))
5,331,234
def u0(x): """ Initial Condition Parameters ---------- x : array or float; Real space Returns ------- array or float : Initial condition evaluated in the real space """ return sin(pi * x)
5,331,235
def deploy(upgrade=False): """ The master deploy script. Examples: fab -H ubuntu@54.244.224.30 deploy fab -H root@104.131.132.143 deploy """ # Ubuntu setup if upgrade: update_ubuntu() setup_ubuntu() upgrade_pip() # Fun starts here clone('https://github.com/aodin/adopt.git') setup_env('adopt') server_ln('adopt') create_pg_db('adopt') setup_django('adopt', 'pets') # Restart restart_servers('adopt')
5,331,236
def jsonify(value): """ Convert a value into a JSON string that can be used for JSONB queries in Postgres. If a string happens to contain the character U+0000, which cannot be represented in a PostgreSQL value, remove the escape sequence representing that character, effectively stripping out that character from all strings. """ return json.dumps(value, ensure_ascii=False).replace("\\u0000", "")
5,331,237
def add_generated_report_header(report_header): """ Upload report history and return the id of the header that was generated on the server. Parameters ---------- report_header: Required Parmeters: A dictionary of parameters that will be used to describe the report that consist of: - report: Name of the report - executionTimeMS: The number of milliseconds it took to generate the report - scheduled: True if the report was scheduled, false if it was not - note: Any notes to be added to the report - user: An Entity Header (dictionary of id and text) of the user (which could be a system user) that requested the report. - contentType: Mime type of the report, generally this is application/pdf - fileName: name of the file (not including the path) of the report - reportTitle: tile of the report as it was generated Optional Parameters - reportSummary: report summary as returned from the generatred report - reportDate: date of for the report - device: An Entity Header (dictionary of id and text) of the device that this report is for, if this is provided reports for specific devices will be available in the dashboard Returns ------- out: string Returns the id of the generated report that can be used to upload a report. """ job_server = os.environ.get('JOB_SERVER_URL') if(job_server is None): raise Exception("Missing environment variable [JOB_SERVER_URL]") headers={'Content-Type':'application/json'} generated_report_json = json.dumps(report_header) url = "%s/api/generatedreport/header" % (job_server) encoded_data = generated_report_json.encode('utf-8') http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where()) r = http.request('POST', url, headers=headers, preload_content=False, body=encoded_data) responseText = '' responseStatus = r.status for chunk in r.stream(32): responseText += chunk.decode("utf-8") responseJSON = json.loads(responseText) r.release_conn() if responseStatus > 299: print('Failed http call, response code: ' + str(responseStatus)) print('Url: ' + url) print(responseJSON) print('--------------------------------------------------------------------------------') print() raise Exception("Could not upload report header to %s" % url) if(responseJSON["successful"]): return responseJSON["result"] else: raise Exception(responseJSON["errors"][0]["message"])
5,331,238
def generate_dataset(type = 'nlp', test=1): """ Generates a dataset for the model. """ if type == 'nlp': return generate_nlp_dataset(test=test) elif type == 'non-nlp': return generate_non_nlp_dataset()
5,331,239
def get_pipes_output(database: Optional[pulumi.Input[str]] = None, schema: Optional[pulumi.Input[str]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetPipesResult]: """ ## Example Usage ```python import pulumi import pulumi_snowflake as snowflake current = snowflake.get_pipes(database="MYDB", schema="MYSCHEMA") ``` :param str database: The database from which to return the schemas from. :param str schema: The schema from which to return the pipes from. """ ...
5,331,240
def plot_tree_on_terminal(G): """ Plots the random tree graph Parameters ---------- G: nx.Graph The random tree graph """ print(nx.forest_str(G))
5,331,241
def search(isamAppliance, comment, check_mode=False, force=False): """ Retrieve snapshots with given comment contained """ ret_obj = isamAppliance.create_return_object() ret_obj_all = get(isamAppliance) for obj in ret_obj_all['data']: if comment in obj['comment']: logger.debug("Snapshot comment \"{0}\" has this string \"{1}\" in it.".format(obj['comment'], comment)) if ret_obj['data'] == {}: ret_obj['data'] = [obj['id']] else: ret_obj['data'].append(obj['id']) return ret_obj
5,331,242
def getAsciiFileExtension(proxyType): """ The file extension used for ASCII (non-compiled) proxy source files for the proxies of specified type. """ return '.proxy' if proxyType == 'Proxymeshes' else '.mhclo'
5,331,243
def dist(s1, s2): """Given two strings, return the Hamming distance (int)""" return abs(len(s1) - len(s2)) + sum( map(lambda p: 0 if p[0] == p[1] else 1, zip(s1.lower(), s2.lower())))
5,331,244
def test_config(): """Test create_app for testing pourposes.""" assert not create_app().testing db_fd, db_path = tempfile.mkstemp() app = create_app(test=True, db_path=db_path) assert app.testing os.close(db_fd) os.unlink(db_path)
5,331,245
def read_bunch(path): """ read bunch. :param path: :return: """ file = open(path, 'rb') bunch = pickle.load(file) file.close() return bunch
5,331,246
def cluster_analysis(L, cluster_alg, args, kwds): """Given an input graph (G), and whether the graph Laplacian is to be normalized (True) or not (False) runs spectral clustering as implemented in scikit-learn (empirically found to be less effective) Returns Partitions (list of sets of ints) """ labels = cluster_alg(*args, **kwds).fit_predict(L) num_clusters = np.max(labels) + 1 partitions = [set() for _ in range(num_clusters)] outliers = set() # mechanisms only used in DBSCAN (i.e. where vertex gets no label) for i, guess in enumerate(labels): if guess == -1: outliers.add(i) else: partitions[guess].add(i) return partitions, outliers
5,331,247
def col_index_list(info, key, value): """Given a list of dicts 'info', return a list of indices corresponding to columns in which info[key] == value. Use to build lists of default columns, non-exportable columns, etc.""" index_list = list() if info != None: for i in range(0, len(info)): if info[i].get(key) == value: index_list.append(i) return index_list
5,331,248
def sparse_softmax_cross_entropy(logits, labels, weights=1.0, scope=None): """Cross-entropy loss using `tf.nn.sparse_softmax_cross_entropy_with_logits`. `weights` acts as a coefficient for the loss. If a scalar is provided, then the loss is simply scaled by the given value. If `weights` is a tensor of size [`batch_size`], then the loss weights apply to each corresponding sample. Args: logits: [batch_size, num_classes] logits outputs of the network . labels: [batch_size, 1] or [batch_size] labels of dtype `int32` or `int64` in the range `[0, num_classes)`. weights: Coefficients for the loss. The tensor must be a scalar or a tensor of shape [batch_size] or [batch_size, 1]. scope: the scope for the operations performed in computing the loss. Returns: A scalar `Tensor` representing the mean loss value. Raises: ValueError: If the shapes of `logits`, `labels`, and `weights` are incompatible, or if `weights` is None. """ with ops.name_scope(scope, "sparse_softmax_cross_entropy_loss", [logits, labels, weights]) as scope: labels = array_ops.reshape(labels, shape=[array_ops.shape(labels)[0]]) losses = nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits, name="xentropy") return compute_weighted_loss(losses, weights, scope=scope)
5,331,249
def curl(url, headers={}, data=None, verbose=0): """Use curl to make a request; return the entire reply as a string.""" import os, tempfile fd, tempname = tempfile.mkstemp(prefix='scrape') command = 'curl --include --insecure --silent --max-redirs 0' if data: if not isinstance(data, str): # Unicode not allowed here data = urlencode(data) command += ' --data ' + shellquote(data) for name, value in headers.iteritems(): command += ' --header ' + shellquote('%s: %s' % (name, value)) command += ' ' + shellquote(url) if verbose >= 3: print >>sys.stderr, 'execute:', command os.system(command + ' > ' + tempname) reply = open(tempname).read() os.remove(tempname) return reply
5,331,250
def retr_radihill(smax, masscomp, massstar): """ Return the Hill radius of a companion Arguments peri: orbital period rsma: the sum of radii of the two bodies divided by the semi-major axis cosi: cosine of the inclination """ radihill = smax * (masscomp / 3. / massstar)**(1. / 3.) # [AU] return radihill
5,331,251
def load_feature_file(in_feature): """Load the feature file into a pandas dataframe.""" f = pd.read_csv(feature_path + in_feature, index_col=0) return f
5,331,252
async def run_command(client, config, log=print): """Begin the action specified by command line arguments and config""" # Always print current height initial_height, speed = struct.unpack( "<Hh", await client.read_gatt_char(UUID_HEIGHT) ) log("Height: {:4.0f}mm".format(rawToMM(initial_height))) target = None if config["monitor"]: # Print changes to height data await subscribe( client, UUID_HEIGHT, partial(get_height_data_from_notification, log=log) ) wait = asyncio.get_event_loop().create_future() await wait elif config["sit"]: # Move to configured sit height target = config["sit_height_raw"] await move_to(client, target, log=log) elif config["stand"]: # Move to configured stand height target = config["stand_height_raw"] await move_to(client, target, log=log) elif config["move_to"]: # Move to custom height target = mmToRaw(config["move_to"]) await move_to(client, target, log=log) elif config["move_to_raw"]: # Move to custom raw height target = config["move_to_raw"] await move_to(client, target, log=log) if target: final_height, speed = struct.unpack( "<Hh", await client.read_gatt_char(UUID_HEIGHT) ) # If we were moving to a target height, wait, then print the actual final height log( "Final height: {:4.0f}mm (Target: {:4.0f}mm)".format( rawToMM(final_height), rawToMM(target) ) )
5,331,253
def test_get_delivery_pricing(get_order, jwt_token, api_url): """ Test getDeliveryPricing """ order = get_order() headers = {"Authorization": jwt_token} query = """ query($input: DeliveryPricingInput!) { getDeliveryPricing(input: $input) { pricing } } """ variables = { "input": { "products": order["products"], "address": order["address"] } } print("VARIABLES", variables) response = requests.post(api_url, json={"query": query, "variables": variables}, headers=headers) data = response.json() print(data) assert "data" in data assert "getDeliveryPricing" in data["data"] assert "pricing" in data["data"]["getDeliveryPricing"]
5,331,254
def add_observation_noise(obs, noises, stds, only_object_noise=False): """Add noise to observations `noises`: Standard normal noise of same shape as `obs` `stds`: Standard deviation per dimension of `obs` to scale noise with """ assert obs.shape == noises.shape idxs_object_pos = SENSOR_INFO_PNP["object_pos"] agent_vel = obs[..., SENSOR_INFO_PNP["grip_velp"]] obs = obs.copy() if only_object_noise: obs[..., idxs_object_pos] += ( noises[..., idxs_object_pos] * stds[..., idxs_object_pos] ) else: obs += noises * stds # Recompute relative position obs[..., SENSOR_INFO_PNP["object_rel_pos"]] = ( obs[..., SENSOR_INFO_PNP["object_pos"]] - obs[..., SENSOR_INFO_PNP["grip_pos"]] ) # Recompute relative speed: first add old agent velocity to get noisy # object velocity, then subtract noisy agent velocity to get correct # relative speed between noisy measurements obs[..., SENSOR_INFO_PNP["object_velp"]] = ( obs[..., SENSOR_INFO_PNP["object_velp"]] + agent_vel - obs[..., SENSOR_INFO_PNP["grip_velp"]] ) return obs
5,331,255
def test_domains(file_path="../../domains.json"): """ Reads a list of domains and see if they respond """ # Read file with open(file_path, 'r') as domain_file: domains_json = domain_file.read() # Parse file domains = json.loads(domains_json) results = {} for domain in domains: status = check_status_code(domain) results[domain] = status return results
5,331,256
async def state(ip): """Get the current state of a given bulb.""" click.echo("Get the state from %s" % ip) bulb = wizlight(ip) state = await bulb.updateState() click.echo(state.__dict__["pilotResult"])
5,331,257
def mix_dirichlet_noise(distribution: Dict[Any, float], epsilon: float, alpha: float) -> Dict[Any, float]: """Combine values in dictionary with Dirichlet noise. Samples dirichlet_noise according to dirichlet_alpha in each component. Then updates the value v for key k with (1-epsilon) * v + epsilon * noise_k. Parameters ---------- distribution Dictionary with floats as values. epsilon Mixes the prior probabilities for starting_node with Dirichlet noise. Uses (1 - dirichlet_epsilon) * prior_prob + dirichlet_epsilon * dirichlet_noise, where dirichlet_noise is sampled from the Dirichlet distribution with parameter dirichlet_alpha. Set to 0.0 if no Dirichlet perturbation. alpha The parameter to sample the Dirichlet distribution with. Returns ------- dict The dictionary with perturbed values. """ noise = np.random.dirichlet([alpha] * len(distribution)) return {k: (1 - epsilon) * v + epsilon * noise for ((k, v), noise) in zip(distribution.items(), noise)}
5,331,258
def get_video_ID(video_url: str) -> str: """Returns the video ID of a youtube video from a URL""" try: return parse_qs(urlparse(video_url).query)['v'][0] except KeyError: # The 'v' key isn't there, this could be a youtu.be link return video_url.split("/")[3][:11]
5,331,259
def profiling_csv(stage, phases, durations): """ Dumps the profiling information into a CSV format. For example, with stage: `x` phases: ['a', 'b', 'c'] durations: [1.42, 2.0, 3.4445] The output will be: ``` x,a,1.42 x,b,2.0 x,c,3.444 ``` """ assert all(hasattr(p, "name") for p in phases), "expected to have name attribute." return "\n".join( [f"{stage},{p.name},{round(t, 3)}" for (p, t) in zip(phases, durations)] )
5,331,260
def int_not_in_range(bounds, inclusive=False): """Creates property that must be an int outside bounds[0] and bounds[1]. Parameters: bounds: Subscriptable with len()==2, where bounds[0] is the lower bound and bounds[1] is the upper bound. Requires bounds[1] > bounds[0]. inclusive (bool): If set to False, values falling on the upper and lower bounds will not be accepted. Can set one bound to be inclusive and the other exclusive by setting this to a tuple of 2 bools, e.g. (True,False) makes the lower bound inclusive while the upper bound is not. Returns: property """ return not_in_range(bounds, inclusive, type_constraint=int)
5,331,261
def plot_concordance_pr( pr_df: pd.DataFrame, snv: bool, colors: Dict[str, str] = None, size_prop: str = None, bins_to_label: List[int] = None, ) -> Column: """ Generates plots showing Precision/Recall curves for truth samples: Two tabs: - One displaying the PR curve with ranking computed on the entire data - One displaying the PR curve with ranking computed on the truth sample only Within each tab, a row of n_truth_samples. The input to this function should come out of the `get_binned_concordance_pd` function, which creates a DataFrame containing the necessary metris for PR plotting and is grouped by 'rank_name', 'truth_sample', 'model' and 'snv'. :param DataFrame pr_df: Input Dataframe :param bool snv: Whether to plot SNVs or Indels :param dict of str -> str colors: Optional colors to use (model name -> desired color) :param str size_prop: Either 'radius' or 'area' can be specified. If either is specified, the points will be sized proportionally to the amount of data in that point. :param list of int bins_to_label: Bins to label :return: Bokeh grid of plots :rtype: Tabs """ if colors is None: # Get a palette automatically models = sorted(list(set([g[2] for g in pr_df.groups]))) palette = d3['Category10'][max(3, len(models))] colors = {model: palette[i] for i, model in enumerate(models)} hover = HoverTool( tooltips=[ ('model', '@model'), ('bin', '@bin'), ('score (min, max)', '(@min_score, @max_score)'), ('n_alleles', '@n_alleles'), ('cum_alleles', '@cum_alleles'), ('data (x,y)', '($x, $y)'), ] ) tabs = [] for rank in ['truth_sample_rank', 'global_rank']: plot_row = [] for truth_sample in set([g[1] for g in pr_df.groups]): p = figure( title=truth_sample[0].upper() + truth_sample[1:], x_axis_label='Recall', y_axis_label='Precision', tools=[hover] + [tool for tool in TOOLS.split(',') if tool != 'hover'], ) p.xaxis[0].formatter = NumeralTickFormatter(format='0%') p.yaxis[0].formatter = NumeralTickFormatter(format='0.0%') circles = [] for model in set([g[2] for g in pr_df.groups]): data = pr_df.get_group((rank, truth_sample, model, snv)).copy() data['model'] = [model] * len(data) data['size'] = get_point_size_col(data['n_alleles'], size_prop) source = ColumnDataSource(data) circles.append( ( model, [ p.circle( 'recall', 'precision', size='size', color=colors[model], source=source, ) ], ) ) if bins_to_label is not None: label_data = data.loc[data.bin.isin(bins_to_label)].copy() label_data['x_offset'] = label_data['recall'] + 0.025 label_data['y_offset'] = label_data['precision'] label_data['bin_str'] = [str(int(t)) for t in label_data['bin']] label_source = ColumnDataSource(label_data) p.add_layout( LabelSet( x='x_offset', y='precision', text='bin_str', text_color=colors[model], source=label_source, ) ) p.multi_line( xs=[[x, x + 0.05] for x in label_data.recall], ys=[[y, y] for y in label_data.precision], color=colors[model], ) legend = Legend( items=circles, orientation='horizontal', location=(0, 0), click_policy='hide', ) p.add_layout(legend, 'above') _set_plots_defaults(p) plot_row.append(p) tabs.append(Panel(child=Row(children=plot_row), title=rank)) return Tabs(tabs=tabs)
5,331,262
def typehint_metavar(typehint): """Generates a metavar for some types.""" metavar = None if typehint == bool: metavar = '{true,false}' elif is_optional(typehint, bool): metavar = '{true,false,null}' elif _issubclass(typehint, Enum): enum = typehint metavar = '{'+','.join(list(enum.__members__.keys()))+'}' elif is_optional(typehint, Enum): enum = typehint.__args__[0] metavar = '{'+','.join(list(enum.__members__.keys())+['null'])+'}' return metavar
5,331,263
def generate_signed_url(filename): """ Generate a signed url to access publicly """ found_blob = find(filename) expiration = datetime.now() + timedelta(hours=1) return found_blob.generate_signed_url(expiration)
5,331,264
def delete_old_layer_versions(client, table, region, package, prefix): """ Loops through all layer versions found in DynamoDB and deletes layer version if it's <maximum_days_older> than latest layer version. The latest layer version is always kept Because lambda functions are created at a maximum rate of once per day, a maximum of 14 layers can exists at one time. """ deleted_arns = [] layer_name = f"{prefix}{package}" # Get deployed layer versions deployed_layer_version_arns = list_layer_version_arns(client=client, layer_name=layer_name) # Get Live Layer versions (they automatically delete if they're old) response = table.query(KeyConditionExpression=Key("deployed_region-package").eq(f"{region}.{package}"), ScanIndexForward=False) live_layer_version_arns = [item['layer_version_arn'] for item in response['Items']] # Delete layer versions for layer_version_arn in deployed_layer_version_arns: if layer_version_arn not in live_layer_version_arns: logger.info(f"Found dead layer version {layer_version_arn}...deleting") layer_version = layer_version_arn.split(":")[-1] client.delete_layer_version( LayerName=layer_name, VersionNumber=layer_version ) deleted_arns.append(layer_version_arn) else: pass return deleted_arns
5,331,265
def _run_simulation(sim_desc): """Since _run_simulation() is always run in a separate process, its input and output params must be pickle-friendly. Keep that in mind when making changes. This is what each worker executes. Given a SimulationDescription object, calls the sequence & binning code, traps any errors that arise and grabs results. Also verfies that the results meet our criteria (e.g. converts to tuples/lists if necessary, raises an exception if the ppms, areas and phases arrays are not all the same length, etc.) If an exception is raised at any point, it sets _worker_exception. Returns a result dict. If an exception occurred, the repackaged exception is in result["exception"]. """ started = util_time.now() # I make a copy of dims because I need to return them as part of the # result dict, and the pulse seq code might alter or even delete what's # attached to the sim_desc. dims = sim_desc.dims[:] exception = False # Execute the user's sequence code try: result = _sequence_function(sim_desc) except: exception = _repackage_exception(SEQUENCE_CODE_ALIAS) if not exception: # Sequence code completed OK. if result: # Sequence code returned the result. There's no need to # execute the binning code. pass else: # Execute the user's binning code try: result = _binning_function(sim_desc) except: exception = _repackage_exception(BINNING_CODE_ALIAS) if exception: result = EMPTY_RESULT else: # Execution completed with no errors. Let's see if what was returned # meets our criteria. First, the result must be an N-tuple, where # N == RESULT_LENGTH. As of this writing, RESULT_LENGTH == 3. result_length = _safe_len(result) if result_length != RESULT_LENGTH: result = EMPTY_RESULT # I force an error here so I can get the exception including a traceback. try: raise ValueError("Result returned from your code must be a %d-tuple, but has length %d" % \ (RESULT_LENGTH, result_length)) except ValueError: exception = _repackage_exception(GENERIC_CODE_ALIAS) # Our second criteria is that each element of the 3-tuple must be the # same length. lengths = [_safe_len(element) for element in result] for length in lengths: if length != lengths[0]: result = EMPTY_RESULT # I force an error here so I can get the exception including a traceback. try: raise ValueError("Result elements differ in length: %s" % lengths) except ValueError: exception = _repackage_exception(GENERIC_CODE_ALIAS) # The user's code is required to return a tuple of iterables. Those # iterables might be lists, numpy arrays, PyGAMMA.DoubleVectors or any # number of other things. PyGAMMA objects in particular don't pickle, and # this function's result needs to be pickleable. # So here we ensure that the result contains only ordinary Python objects. result = list(map(_tuplify, result)) # Last but not least, ensure that each value is numeric and a native # Python type. f = lambda an_object: isinstance(an_object, (float, int)) # Loop through ppms, areas & phases lists for result_chunk in result: # map() allows me to test all the items in the list in one shot. if not all(map(f, result_chunk)): # Ooops, at least one of the results in this list isn't a float, # int, or long. # I force an error here so I can get the exception including # a traceback. try: raise ValueError("Results must contain only floats, ints or longs") except ValueError: exception = _repackage_exception(GENERIC_CODE_ALIAS) # The result (good or bad) is returned as a dict. result = dict(list(zip(("ppms", "areas", "phases"), result))) result["started"] = started result["completed"] = util_time.now() result["metabolite"] = dims[0] result["dims"] = dims[1:] if exception: _worker_exception.value = 1 result["exception"] = exception return result
5,331,266
def check_cn_en_match(path="./paddle", diff_file="en_cn_files_diff"): """ skip """ osp_join = os.path.join osp_exists = os.path.exists with open(diff_file, 'w') as fo: tmpl = "{}\t{}\n" fo.write(tmpl.format("exist", "not_exits")) for root, dirs, files in os.walk(path): for file in files: if file.endswith(en_suffix): cf = file.replace(en_suffix, cn_suffix) if not osp_exists(osp_join(root, cf)): fo.write( tmpl.format( osp_join(root, file), osp_join(root, cf))) elif file.endswith(cn_suffix): ef = file.replace(cn_suffix, en_suffix) if not osp_exists(osp_join(root, ef)): fo.write( tmpl.format( osp_join(root, file), osp_join(root, ef)))
5,331,267
def _sample_fq_pair( file1: str, file2: str, fraction: float, output1: str, output2: str): """ Randomly subsample the input fastq file pair Args: file1: path-like The input fastq file 1 file2: path-like The input fastq file 2 fraction: Fraction of reads or sequences to be retrieved output1: path-like The output fastq file 1 output2: path-like The output fastq file 2 """ fh_in1 = open(file1, 'r') fh_in2 = open(file2, 'r') fh_out1 = open(output1, 'w') fh_out2 = open(output2, 'w') # Count the total number of lines in the fastq file i = 0 while fh_in1.readline() != '': i += 1 # Go back to the beginning of the file, because it needs to go through the file again fh_in1.seek(0) N = int(i / 4) # The total count of reads n = int(N * fraction) # The count of reads to be retrieved # A random array of int corresponding to which reads to be retrieved rand_array = random.sample(list(range(N)), n) rand_array.sort() # Prepare the iterating variable pos (current line position) and next_pos pos = -4 # 4 lines for each read in fastq, so if the next read is 10th read, # then it starts at 40th line next_pos = rand_array.pop(0) * 4 while len(rand_array) > 0: # Read 4 lines at a time lines1 = [fh_in1.readline() for _ in range(4)] # .1.fq lines2 = [fh_in2.readline() for _ in range(4)] # .2.fq # Every time read 4 lines, update the current position pos pos += 4 if pos == next_pos: # If pos is at the next position, write four lines for l in lines1: fh_out1.write(l) # .1.fq for l in lines2: fh_out2.write(l) # .2.fq # Update the next position next_pos = rand_array.pop(0) * 4 # If pos is not at the next position, then just continue onto the next read fh_in1.close() fh_in2.close() fh_out1.close() fh_out2.close()
5,331,268
def test_array_and_stringlike_roundtrip(strtype): """ Test that string representations of long-double roundtrip both for array casting and scalar coercion, see also gh-15608. """ o = 1 + LD_INFO.eps if strtype in (np.bytes_, bytes): o_str = strtype(repr(o).encode("ascii")) else: o_str = strtype(repr(o)) # Test that `o` is correctly coerced from the string-like assert o == np.longdouble(o_str) # Test that arrays also roundtrip correctly: o_strarr = np.asarray([o] * 3, dtype=strtype) assert (o == o_strarr.astype(np.longdouble)).all() # And array coercion and casting to string give the same as scalar repr: assert (o_strarr == o_str).all() assert (np.asarray([o] * 3).astype(strtype) == o_str).all()
5,331,269
def view_filestorage_file(self, request): """ Renders the given filestorage file in the browser. """ return getattr(request.app, self.storage).getsyspath(self.path)
5,331,270
def enum_choice_list(data): """ Creates the argparse choices and type kwargs for a supplied enum type or list of strings """ # transform enum types, otherwise assume list of string choices if not data: return {} try: choices = [x.value for x in data] except AttributeError: choices = data def _type(value): return next((x for x in choices if x.lower() == value.lower()), value) if value else value params = { 'choices': CaseInsensitiveList(choices), 'type': _type } return params
5,331,271
def chartset(request): """ Conjunto de caracteres que determian la pagina request: respuesta de la url""" print "--------------- Obteniendo charset -------------------" try: charset = request.encoding except AttributeError as error_atributo: charset = "NA" print "charset: " + str(error_atributo) return charset
5,331,272
def generate_UUID(): """ Generate a UUID and return it """ return str(uuid.uuid4())
5,331,273
def streaming_recall_at_thresholds(predictions, labels, thresholds, ignore_mask=None, metrics_collections=None, updates_collections=None, name=None): """Computes various recall values for different `thresholds` on `predictions`. The `streaming_recall_at_thresholds` function creates four local variables, `true_positives`, `true_negatives`, `false_positives` and `false_negatives` for various values of thresholds. `recall[i]` is defined as the number of values in `predictions` above `thresholds[i]` whose corresponding entry in `labels` is `True` (`true_positives[i]`) divided by the number of True values in `labels` (`true_positives[i] + false_negatives[i]`). If `ignore_mask` is not None then only values whose corresponding value in `ignore_mask` is `False` are considered. `recall` are returned along with an `update_op` whose value equals that of `recall`. Args: predictions: A floating point `Tensor` of arbitrary shape and whose values are in the range `[0, 1]`. labels: A binary `Tensor` whose shape matches `predictions`. thresholds: A python list or tuple of float thresholds in `[0, 1]`. ignore_mask: An optional, binary tensor whose size matches `predictions`. metrics_collections: An optional list of collections that `auc` should be added to. updates_collections: An optional list of collections that `update_op` should be added to. name: An optional variable_op_scope name. Returns: recall: A float tensor of shape [len(thresholds)]. update_op: An operation that increments the `true_positives`, `true_negatives`, `false_positives` and `false_negatives` variables that are used in the computation of `recall`. Raises: ValueError: If the shape of `predictions` and `labels` do not match or if `ignore_mask` is not `None` and its shape doesn't match `predictions` or if either `metrics_collections` or `updates_collections` are not a list or tuple. """ with variable_scope.variable_op_scope([predictions, labels], name, 'recall_at_thresholds'): (true_positives, false_negatives, _, _, true_positives_compute_op, false_negatives_compute_op, _, _,) = _tp_fn_tn_fp( predictions, labels, thresholds, ignore_mask) # avoid division by zero epsilon = 1e-7 def compute_recall(name): recall = math_ops.div(true_positives, epsilon + true_positives + false_negatives, name='recall_' + name) return recall recall = compute_recall('value') with ops.control_dependencies([true_positives_compute_op, false_negatives_compute_op]): update_op = compute_recall('update_op') if metrics_collections: ops.add_to_collections(metrics_collections, recall) if updates_collections: ops.add_to_collections(updates_collections, update_op) return recall, update_op
5,331,274
def cmd_flush(bot, trigger): """ Resets the cached RatNames. Helps with Bugged rat names on !assign aliases: flush, resetnames, rn, flushnames, fn """ flushNames() bot.say('Cached names flushed!')
5,331,275
def fib(n): """Returns the nth Fibonacci number.""" if n == 0: return 1 elif n == 1: return 1 else: return fib(n - 1) + fib(n - 2)
5,331,276
def fuzz_cachew_impl(): """ Insert random sleeps in cachew_impl to increase likelihood of concurrency issues """ import patchy # type: ignore[import] from .. import cachew_wrapper patch = '''\ @@ -740,6 +740,11 @@ logger.debug('old hash: %s', prev_hash) + from random import random + rs = random() * 2 + print("sleeping for: ", rs) + from time import sleep; sleep(rs) + if h == prev_hash: logger.debug('hash matched: loading from cache') rows = conn.execute(values_table.select()) ''' patchy.patch(cachew_wrapper, patch) yield patchy.unpatch(cachew_wrapper, patch)
5,331,277
def CheckStaleSettings(): """Check various things to make sure they don't get stale.""" die = False for test in SPECIAL_TESTS: if not os.path.exists(test): die = True logging.error('SPECIAL_TESTS is stale: delete old %s', test) for test in SLOW_TESTS: if not os.path.exists(test): die = True logging.error('SLOW_TESTS is stale: delete old %s', test) # Sanity check wrapper scripts. for path in glob.glob('bin/*'): if os.path.islink(path): src = os.path.join('scripts', os.path.basename(path) + '.py') if not os.path.exists(src): die = True logging.error('Stale symlink should be removed: %s', path) if die: cros_build_lib.Die('Please fix the above problems first')
5,331,278
def test_remove_label_raises_error_if_label_not_in_matcher( matcher: RegexMatcher, ) -> None: """It raises a ValueError if trying to remove a label not present.""" with pytest.raises(ValueError): matcher.remove("TEST")
5,331,279
def load_saved_users(args) -> list: """ :param args: :return: list """ data_frame = pd.read_csv(os.path.join(args.data_dir, args.users_tweets_dir, args.users_file), header=None) return list(data_frame[0])
5,331,280
def remove_artifacts_from_biom_table(table_filename, fasta_filename, ref_fp, biom_table_dir, ref_db_fp, threads=1, verbose=False, sim_thresh=None, coverage_thresh=None): """Remove artifacts from a biom table using SortMeRNA Parameters ---------- table : str name of the biom table file fasta_filename : str the fasta file containing all the sequences of the biom table Returns ------- tmp_files : list of str The temp files created during the artifact removal step """ logger = logging.getLogger(__name__) logger.info('getting 16s sequences from the biom table') # remove artifacts from the fasta file. output is in clean_fp fasta file clean_fp, num_seqs_left, tmp_files = remove_artifacts_seqs(fasta_filename, ref_fp, working_dir=biom_table_dir, ref_db_fp=ref_db_fp, negate=False, threads=threads, verbose=verbose, sim_thresh=sim_thresh, coverage_thresh=coverage_thresh) if clean_fp is None: logger.warn("No clean sequences in %s" % fasta_filename) return tmp_files logger.debug('removed artifacts from sequences input %s' ' to output %s' % (fasta_filename, clean_fp)) # read the clean fasta file good_seqs = {s for _, s in sequence_generator(clean_fp)} logger.debug('loaded %d sequences from cleaned biom table' ' fasta file' % len(good_seqs)) logger.debug('loading biom table %s' % table_filename) table = load_table(table_filename) # filter and save the artifact biom table artifact_table = table.filter(list(good_seqs), axis='observation', inplace=False, invert=True) # remove the samples with 0 reads filter_minreads_samples_from_table(artifact_table) output_nomatch_fp = join(biom_table_dir, 'reference-non-hit.biom') write_biom_table(artifact_table, output_nomatch_fp) logger.info('wrote artifact only filtered biom table to %s' % output_nomatch_fp) # and save the reference-non-hit fasta file output_nomatch_fasta_fp = join(biom_table_dir, 'reference-non-hit.seqs.fa') fasta_from_biom(artifact_table, output_nomatch_fasta_fp) # filter and save the only 16s biom table table.filter(list(good_seqs), axis='observation') # remove the samples with 0 reads filter_minreads_samples_from_table(table) output_fp = join(biom_table_dir, 'reference-hit.biom') write_biom_table(table, output_fp) logger.info('wrote 16s filtered biom table to %s' % output_fp) # and save the reference-non-hit fasta file output_match_fasta_fp = join(biom_table_dir, 'reference-hit.seqs.fa') fasta_from_biom(table, output_match_fasta_fp) # we also don't need the cleaned fasta file tmp_files.append(clean_fp) return tmp_files
5,331,281
def template_review(context, mapping): """:phabreview: Object describing the review for this changeset. Has attributes `url` and `id`. """ ctx = context.resource(mapping, b'ctx') m = _differentialrevisiondescre.search(ctx.description()) if m: return templateutil.hybriddict({ b'url': m.group(b'url'), b'id': b"D{}".format(m.group(b'id')), })
5,331,282
def addRandomEdges(graph: nx.Graph, nEdges: int) -> tuple: """ Adds random edges to a given graph """ nodes = list(graph.nodes) n = len(nodes) edges = [] for i in range(nEdges): newEdge = False while not newEdge: i_u, i_v = np.random.randint(0, n-1), np.random.randint(0, n-1) edge = (nodes[i_u], nodes[i_v]) if edge not in graph.edges(data=False) and edge not in edges: newEdge = True edges.append(edge) g = graph.copy() g.add_edges_from(edges) return g, edges
5,331,283
def get_s3_buckets_for_account(account, region='us-east-1'): """ Get S3 buckets for a specific account. :param account: AWS account :param region: AWS region """ session = boto3.session.Session() # create session for Thread Safety assume = rolesession.assume_crossact_audit_role( session, account['accountNum'], region) s3_data = [] if assume: s3_client = assume.client('s3') s3_info = s3_client.list_buckets().get('Buckets') if s3_info: for bucket in s3_info: s3_global = is_s3_bucket_global(assume, bucket) s3_data.append( dict(BucketName=bucket['Name'], AccountNum=account['accountNum'], AccountAlias=account.get('alias'), GlobalAccess=s3_global)) return s3_data
5,331,284
def save_greens_hetero_links_plus_one(links, unwrap=0, Kprops_bareWLC=None): """Each link in links is the smallest linker length in the heterogenous chain. Assumes each chain has random sampling of link or (link + 1)bp.""" Klin = np.linspace(0, 10**5, 20000) Klog = np.logspace(-3, 5, 10000) Kvals = np.unique(np.concatenate((Klin, Klog))) #convert to little k -- units of inverse bp (this results in kmax = 332) kvals = Kvals / (2*wlc.default_lp) if Kprops_bareWLC is None: Kprops_bareWLC = wlc.tabulate_bareWLC_propagators(Kvals) print('Tabulated K propagators for bare WLC!') link = min(links) #for each chain, do fourier inversion integral and save output as .npy file in Bprops directory Bprop = np.load(f'csvs/Bprops/{unwrap}unwraps/heterogenous/B0_k_given_N_links{link}or{link+1}_50nucs_30000Ks.npy') rvals = np.linspace(0.0, 1.0, 1000) qprop = wlc.bareWLC_gprop(kvals, links, unwrap, props=Kprops_bareWLC) #default: 1000 rvals integral = wlc.BRN_fourier_integrand_splines(kvals, links, unwrap, Bprop=Bprop, rvals=rvals) #default: 1000 rvals qintegral = wlc.BRN_fourier_integrand_splines(kvals, links, unwrap, Bprop=qprop, rvals=rvals) #default: 1000 rvals #integral takes ~10 min to run, so prob worth saving np.save(f'csvs/Bprops/{unwrap}unwraps/heterogenous/kinkedWLC_greens_links{link}or{link+1}_{len(rvals)}rvals_50nucs.npy', integral, allow_pickle=False) np.save(f'csvs/Bprops/{unwrap}unwraps/heterogenous/bareWLC_greens_links{link}or{link+1}_{len(rvals)}rvals_50nucs.npy', qintegral, allow_pickle=False) print(f'Saved G(R;L) for {link} or {link+1} link, {unwrap} unwrap!')
5,331,285
def test_structuring_enums(converter, choice, enum): # type: (Converter, Any, Any) -> None """Test structuring enums by their values.""" val = choice(list(enum)) assert converter.structure(val.value, enum) == val
5,331,286
def test_nfc_p2p_static_handover_join_tagdev_client(dev): """NFC static handover to join a P2P group (NFC Tag device is the P2P Client)""" set_ip_addr_info(dev[0]) logger.info("Start autonomous GO") dev[0].p2p_start_go() dev[1].request("SET ignore_old_scan_res 1") dev[2].request("SET ignore_old_scan_res 1") logger.info("Write NFC Tag on the P2P Client") res = dev[1].request("P2P_LISTEN") if "FAIL" in res: raise Exception("Failed to start Listen mode") pw = dev[1].request("WPS_NFC_TOKEN NDEF").rstrip() if "FAIL" in pw: raise Exception("Failed to generate password token") res = dev[1].request("P2P_SET nfc_tag 1").rstrip() if "FAIL" in res: raise Exception("Failed to enable NFC Tag for P2P static handover") sel = dev[1].request("NFC_GET_HANDOVER_SEL NDEF P2P-CR-TAG").rstrip() if "FAIL" in sel: raise Exception("Failed to generate NFC connection handover select") logger.info("Read NFC Tag on the GO to trigger invitation") res = dev[0].request("WPS_NFC_TAG_READ " + sel) if "FAIL" in res: raise Exception("Failed to provide NFC tag contents to wpa_supplicant") ev = dev[1].wait_event(grpform_events, timeout=30) if ev is None: raise Exception("Joining the group timed out") res = dev[1].group_form_result(ev) hwsim_utils.test_connectivity_p2p(dev[0], dev[1]) check_ip_addr(res) logger.info("Write NFC Tag on another P2P Client") res = dev[2].request("P2P_LISTEN") if "FAIL" in res: raise Exception("Failed to start Listen mode") pw = dev[2].request("WPS_NFC_TOKEN NDEF").rstrip() if "FAIL" in pw: raise Exception("Failed to generate password token") res = dev[2].request("P2P_SET nfc_tag 1").rstrip() if "FAIL" in res: raise Exception("Failed to enable NFC Tag for P2P static handover") sel = dev[2].request("NFC_GET_HANDOVER_SEL NDEF P2P-CR-TAG").rstrip() if "FAIL" in sel: raise Exception("Failed to generate NFC connection handover select") logger.info("Read NFC Tag on the GO to trigger invitation") res = dev[0].request("WPS_NFC_TAG_READ " + sel) if "FAIL" in res: raise Exception("Failed to provide NFC tag contents to wpa_supplicant") ev = dev[2].wait_event(grpform_events, timeout=30) if ev is None: raise Exception("Joining the group timed out") res = dev[2].group_form_result(ev) hwsim_utils.test_connectivity_p2p(dev[0], dev[2]) check_ip_addr(res)
5,331,287
def create_uid_email(username=None, hostname=None): """Create an email address suitable for a UID on a GnuPG key. :param str username: The username portion of an email address. If None, defaults to the username of the running Python process. :param str hostname: The FQDN portion of an email address. If None, the hostname is obtained from gethostname(2). :rtype: str :returns: A string formatted as <username>@<hostname>. """ if hostname: hostname = hostname.replace(' ', '_') if not username: try: username = os.environ['LOGNAME'] except KeyError: username = os.environ['USERNAME'] if not hostname: hostname = gethostname() uid = "%s@%s" % (username.replace(' ', '_'), hostname) else: username = username.replace(' ', '_') if (not hostname) and (username.find('@') == 0): uid = "%s@%s" % (username, gethostname()) elif hostname: uid = "%s@%s" % (username, hostname) else: uid = username return uid
5,331,288
def get_users(): """ Alle Benutzer aus der Datenbank laden. """ session = get_cassandra_session() future = session.execute_async("SELECT user_id, username, email FROM users") try: rows = future.result() except Exception: log.exeception() users = [] for row in rows: users.append({ 'user_id': row.user_id, 'username': row.username, 'email': row.email }) return jsonify({'users': users}), 200
5,331,289
def fastcorrelate( input1, input2, usefft=True, zeropadding=0, weighting="None", displayplots=False, debug=False, ): """Perform a fast correlation between two arrays. Parameters ---------- input1 input2 usefft zeropadding weighting displayplots debug Returns ------- corr Notes ----- From http://stackoverflow.com/questions/12323959/fast-cross-correlation-method-in-python. """ len1 = len(input1) len2 = len(input2) outlen = len1 + len2 - 1 if zeropadding < 0: # autopad newlen1 = len1 * 2 newlen2 = len2 * 2 paddedinput1 = np.zeros((newlen1), dtype=float) paddedinput2 = np.zeros((newlen2), dtype=float) paddedinput1[0:len1] = input1 paddedinput2[0:len2] = input2 startpt = (len1 + len2) // 2 elif zeropadding > 0: # explicit pad newlen1 = len1 + zeropadding newlen2 = len2 + zeropadding paddedinput1 = np.zeros((newlen1), dtype=float) paddedinput2 = np.zeros((newlen2), dtype=float) paddedinput1[0:len1] = input1 paddedinput2[0:len2] = input2 startpt = zeropadding else: # no pad paddedinput1 = input1 paddedinput2 = input2 startpt = 0 if debug: print(f"FASTCORRELATE - padding: {zeropadding}, startpt: {startpt}, outlen: {outlen}") if usefft: # Do an array flipped convolution, which is a correlation. if weighting == "None": return signal.fftconvolve(paddedinput1, paddedinput2[::-1], mode="full")[ startpt : startpt + outlen ] else: return convolve_weighted_fft( paddedinput1, paddedinput2[::-1], mode="full", weighting=weighting, displayplots=displayplots, )[startpt : startpt + outlen] else: return np.correlate(paddedinput1, paddedinput2, mode="full")
5,331,290
def test_bottom_up_coordinate_grabbing(qtbot, p1, p2): """ Test coordinate grabbing when grabbing from the 'bottom' of the screen to the 'top' of the screen. :param QtBot qtbot: :return: None """ co_widget = CoordinateWidget() qtbot.addWidget(co_widget) qtbot.mouseClick(co_widget, QtCore.Qt.LeftButton, pos=p1) qtbot.mouseClick(co_widget, QtCore.Qt.LeftButton, pos=p2) assert co_widget.top_corner.x() == pytest.approx(p2.x(), 1) assert co_widget.top_corner.y() == pytest.approx(p2.y(), 1) assert co_widget.bottom_corner.x() == pytest.approx(p1.x(), 1) assert co_widget.bottom_corner.y() == pytest.approx(p1.y(), 1)
5,331,291
def vgconv(xinput,yinput,fwhm, ppr=None): """convolution with a Gaussian in log lambda scale for a constant resolving power Parameters ---------- xinput: numpy float array wavelengths yinput: numpy array of floats fluxes fwhm: float FWHM of the Gaussian (km/s) ppr: float, optional Points per resolution element to downsample the convolved spectrum (default None, to keep the original sampling) Returns ------- x: numpy float array wavelengths after convolution, will be a subset of xinput when that is equidistant in log lambda, otherwise a subset of the resampled version y: numpy array of floats fluxes after convolution """ #resampling to ln(lambda) if need be xx = np.diff(np.log(xinput)) if max(xx) - min(xx) > 1.e-7: #input not equidist in loglambda nel = len(xinput) minx = np.log(xinput[0]) maxx = np.log(xinput[-1]) x = np.linspace(minx,maxx,nel) step = x[1] - x[0] x = np.exp(x) #y = np.interp( x, xinput, yinput) y = interp_spl( x, xinput, yinput) else: x = xinput y = yinput step = np.log(xinput[1])-np.log(xinput[0]) fwhm = fwhm/clight # inverse of the resolving power sigma=fwhm/2.0/np.sqrt(-2.0*np.log(0.5)) npoints = 2*int(3*fwhm/2./step)+1 half = npoints * step /2. xx = np.linspace(-half,half,npoints) kernel = np.exp(-(xx-np.mean(xx))**2/2./sigma**2) kernel = kernel/np.sum(kernel) y = np.convolve(y,kernel,'valid') edge = int(npoints/2) x = x[edge:-edge] #print(xinput.size,x.size,y.size) if ppr != None: fac = int(fwhm / step / ppr) print(fwhm,step,ppr,fac) subset = np.arange(x.size / fac, dtype=int) * fac x = x[subset] y = y[subset] return(x,y)
5,331,292
def attach_component_to_entity(entity_id, component_name): # type: (azlmbr.entity.EntityId, str) -> azlmbr.entity.EntityComponentIdPair """ Adds the component if not added already. :param entity_id: EntityId of the entity to attach the component to :param component_name: name of the component :return: If successful, returns the EntityComponentIdPair, otherwise returns None. """ type_ids_list = editor.EditorComponentAPIBus( bus.Broadcast, 'FindComponentTypeIdsByEntityType', [component_name], 0) general.log(f"Components found = {len(type_ids_list)}") if len(type_ids_list) < 1: general.log(f"ERROR: A component class with name {component_name} doesn't exist") return None elif len(type_ids_list) > 1: general.log(f"ERROR: Found more than one component classes with same name: {component_name}") return None # Before adding the component let's check if it is already attached to the entity. component_outcome = editor.EditorComponentAPIBus(bus.Broadcast, 'GetComponentOfType', entity_id, type_ids_list[0]) if component_outcome.IsSuccess(): return component_outcome.GetValue() # In this case the value is not a list. component_outcome = editor.EditorComponentAPIBus(bus.Broadcast, 'AddComponentsOfType', entity_id, type_ids_list) if component_outcome.IsSuccess(): general.log(f"{component_name} Component added to entity.") return component_outcome.GetValue()[0] general.log(f"ERROR: Failed to add component [{component_name}] to entity") return None
5,331,293
def get_raw_samples(sampling_strategy: sample_entry.Strategy, step: int) -> np.ndarray: """ Collects raw samples from database associated with sampling strategy. If the raw samples do not exists in the database new ones will be created by calling the Sobol function """ sampling_strategy.reload() for raw_sample in sampling_strategy.samples_raw: if raw_sample.sequence_number == step: logger.debug(f'Found existing raw sample with sequence #{step}') return np.array(raw_sample.samples_raw) logger.debug(f'Creating new raw sample with sequence #{step}') distribution_dimension = len(sampling_strategy.strategy['distributions'].keys()) samples_raw = sobol(m=sampling_strategy.strategy['settings']['raw sample size'], dimension=distribution_dimension, sets=1) samples_raw_id = sampling_interactions.upload_raw_samples(samples_raw, step) sampling_interactions.add_raw_samples_to_strategy(sampling_strategy, samples_raw_id) return samples_raw
5,331,294
def make_hdf( idir: str, ofile: str ) -> None: """Process a directory of NetCDF RouteLink files to their CSV equivalents. Parameters ---------- idir: str Input directory containing Routelink files in CSV format. ofile: str Output HDF5 file to store pandas.DataFrame Returns ------- None """ # Get list of files file_list = Path(idir).glob("*.csv") # Load data dfs = [] for ifile in file_list: # Read file df = pd.read_csv(ifile, comment="#", dtype={"usgs_site_code": str}, parse_dates=["time"]) # Stowe data dfs.append(df) # Concat data = pd.concat(dfs, ignore_index=True) # Save data.to_hdf(ofile, key="data", format="table", complevel=1)
5,331,295
def mix_to_dat(probspec,isStringIO=True): """ Reads a YAML mix file and generates all of the GMPL dat components associated with the mix inputs. Inputs: ttspec - the tour type spec object created from the mix file param_name - string name of paramter in GMPL file non_shiftlen_param_name - string name of non-shift length specific mix parameter key in YAML file shiftlen_param_name - string name of shift length specific mix parameter key in YAML file Output: param tt_shiftlen_min_dys_weeks:= 1 6 1 3 1 6 2 5 1 6 3 5 1 6 4 5 ... """ # Open the mix file and load it into a YAML object fn_mix = probspec['reqd_files']['filename_mix'] fin = open(fn_mix,"r") ttspec = yaml.load(fin) mixout = StringIO.StringIO() ## print ttspec ## print ttspec['tourtypes'] ## print ttspec['tourtypes'][0] ## print ttspec['tourtypes'][0]['min_days_week'] # Get set of shift lengths and order them ascending by length lenset = set([]) for m in ttspec['tourtypes']: for s in m['shiftlengths']: lenset.add(s['numbins']) lengths = list(lenset) lengths.sort() len_param = list_to_param('lengths', lengths) # Number of shift lengths n_lengths = size(lengths) numlen_param = scalar_to_param('n_lengths', n_lengths) # Number of tour types n_ttypes = size(ttspec['tourtypes']) numttypes_param = scalar_to_param('n_tts', n_ttypes) # Tour type length sets lenxset = get_length_x_from_mix(ttspec) lenxset_set = list_to_indexedset('tt_length_x', lenxset) # Midnight threshold for weekend assignments midthresholds = [m['midnight_thresh'] for m in ttspec['tourtypes']] midthresh_param = list_to_param('midnight_thresh', midthresholds) # Parttime flag and bound ptflags = [m['is_parttime'] for m in ttspec['tourtypes']] ptflags_param = list_to_param('tt_parttime', ptflags) ptfrac = ttspec['max_parttime_frac'] ptfrac_param = scalar_to_param('max_parttime_frac', ptfrac) # Global start window width width = ttspec['g_start_window_width'] width_param = scalar_to_param('g_start_window_width', width) # Lower and upper bounds on number scheduled if 'opt_files' in probspec and 'filename_ttbounds' in probspec['opt_files']: fn_ttbnds = probspec['opt_files']['filename_ttbounds'] fin_ttbnds = open(fn_ttbnds,"r") ttbndsspec = yaml.load(fin_ttbnds) tt_lb = [m['tt_lb'] for m in ttbndsspec['tourtypes']] tt_lb_param = list_to_param('tt_lb', tt_lb) tt_ub = [m['tt_ub'] for m in ttbndsspec['tourtypes']] tt_ub_param = list_to_param('tt_ub', tt_ub) else: tt_lb = [m['tt_lb'] for m in ttspec['tourtypes']] tt_lb_param = list_to_param('tt_lb', tt_lb) tt_ub = [m['tt_ub'] for m in ttspec['tourtypes']] tt_ub_param = list_to_param('tt_ub', tt_ub) # Cost multiplier tt_cost_multiplier = [m['tt_cost_multiplier'] for m in ttspec['tourtypes']] tt_cost_multiplier_param = list_to_param('tt_cost_multiplier', tt_cost_multiplier) # Min and max cumulative days and prds worked over the weeks tt_min_dys_weeks_param = mix_days_prds_params(ttspec, 'tt_min_dys_weeks','min_days_week', 'min_shiftlen_days_week') tt_max_dys_weeks_param = mix_days_prds_params(ttspec, 'tt_max_dys_weeks','max_days_week', 'max_shiftlen_days_week') tt_min_prds_weeks_param = mix_days_prds_params(ttspec, 'tt_min_prds_weeks','min_prds_week', 'min_shiftlen_prds_week') tt_max_prds_weeks_param = mix_days_prds_params(ttspec, 'tt_max_prds_weeks','max_prds_week', 'max_shiftlen_prds_week') # Min and max days and prds worked over the weeks # for each shift length workable in the tour type tt_shiftlen_min_dys_weeks_param = mix_days_prds_params(ttspec, 'tt_shiftlen_min_dys_weeks','min_days_week', 'min_shiftlen_days_week') tt_shiftlen_max_dys_weeks_param = mix_days_prds_params(ttspec, 'tt_shiftlen_max_dys_weeks','max_days_week', 'max_shiftlen_days_week') tt_shiftlen_min_prds_weeks_param = mix_days_prds_params(ttspec, 'tt_shiftlen_min_prds_weeks','min_prds_week', 'min_shiftlen_prds_week') tt_shiftlen_max_prds_weeks_param = mix_days_prds_params(ttspec, 'tt_shiftlen_max_prds_weeks','max_prds_week', 'max_shiftlen_prds_week') # Min and max days and prds worked each week tt_min_cumul_dys_weeks_param = mix_days_prds_params(ttspec, 'tt_min_cumul_dys_weeks','min_cumul_days_week', 'min_shiftlen_cumul_days_week') tt_max_cumul_dys_weeks_param = mix_days_prds_params(ttspec, 'tt_max_cumul_dys_weeks','max_cumul_days_week', 'max_shiftlen_cumul_days_week') tt_min_cumul_prds_weeks_param = mix_days_prds_params(ttspec, 'tt_min_cumul_prds_weeks','min_cumul_prds_week', 'min_shiftlen_cumul_prds_week') tt_max_cumul_prds_weeks_param = mix_days_prds_params(ttspec, 'tt_max_cumul_prds_weeks','max_cumul_prds_week', 'max_shiftlen_cumul_prds_week') # Min and max cumulative days and prds worked over the weeks # for each shift length workable in the tour type tt_shiftlen_min_cumul_dys_weeks_param = mix_days_prds_params(ttspec, 'tt_shiftlen_min_cumul_dys_weeks','min_cumul_days_week', 'min_shiftlen_cumul_days_week') tt_shiftlen_max_cumul_dys_weeks_param = mix_days_prds_params(ttspec, 'tt_shiftlen_max_cumul_dys_weeks','max_cumul_days_week', 'max_shiftlen_cumul_days_week') tt_shiftlen_min_cumul_prds_weeks_param = mix_days_prds_params(ttspec, 'tt_shiftlen_min_cumul_prds_weeks','min_cumul_prds_week', 'min_shiftlen_cumul_prds_week') tt_shiftlen_max_cumul_prds_weeks_param = mix_days_prds_params(ttspec, 'tt_shiftlen_max_cumul_prds_weeks','max_cumul_prds_week', 'max_shiftlen_cumul_prds_week') # Put the parameter pieces together into a single StringIO object print >>mixout, numlen_param print >>mixout, len_param print >>mixout, numttypes_param print >>mixout, lenxset_set print >>mixout, midthresh_param print >>mixout, tt_lb_param print >>mixout, tt_ub_param print >>mixout, tt_cost_multiplier_param print >>mixout, ptflags_param print >>mixout, ptfrac_param print >>mixout, width_param print >>mixout, tt_min_cumul_dys_weeks_param print >>mixout, tt_max_cumul_dys_weeks_param print >>mixout, tt_min_cumul_prds_weeks_param print >>mixout, tt_max_cumul_prds_weeks_param print >>mixout, tt_min_dys_weeks_param print >>mixout, tt_max_dys_weeks_param print >>mixout, tt_min_prds_weeks_param print >>mixout, tt_max_prds_weeks_param print >>mixout, tt_shiftlen_min_dys_weeks_param print >>mixout, tt_shiftlen_max_dys_weeks_param print >>mixout, tt_shiftlen_min_prds_weeks_param print >>mixout, tt_shiftlen_max_prds_weeks_param print >>mixout, tt_shiftlen_min_cumul_dys_weeks_param print >>mixout, tt_shiftlen_max_cumul_dys_weeks_param print >>mixout, tt_shiftlen_min_cumul_prds_weeks_param print >>mixout, tt_shiftlen_max_cumul_prds_weeks_param # print mixout.getvalue() if isStringIO: return mixout.getvalue() else: smixout = mixout.read() return smixout
5,331,296
def clear_path(path: Path): """ Clears folder, including deleting sub folders """ for filename in os.listdir(path): file_path = path.joinpath(filename) try: if os.path.isfile(file_path) or os.path.islink(file_path): os.unlink(file_path) elif os.path.isdir(file_path): shutil.rmtree(file_path) except Exception as e: print('Failed to delete %s. Reason: %s' % (file_path, e))
5,331,297
def taskAcq(timestamp,img): """ Task the workers in acquisition mode by splitting the camera image into segments and dealing them out to the workers TODO: Check for any busy and skip tasking """ global workerLastTasked,outstandingRequests n = 0 # Keep a place for the results task = request(timestamp,len(regions)) print task.timestamp,task.numberAssignments outstandingRequests[timestamp] = task # Split the image and send it to the workersn for r in regions: w,h,c = img.shape left = int(r[0] * w) top = int(r[1] * h) width = int(r[2] * w) height = int(r[2] * h) rect = (left,top,width,height) img1 = img[top:top+width,left:left+height] msg = [timestamp,img1,rect,ft.detectThreshold] q = taskQueue[n] q.put(msg) workerLastTasked = n n = n + 1; if n >= len(taskQueue): n = 0
5,331,298
def get_setting(setting_name: str, default: Any=None) -> Any: """ Convenience wrapper to get the value of a setting. """ configuration = get_configuration() if not configuration: # pragma: no cover raise Exception('get_setting() called before configuration was initialised') return configuration.get_setting_live(setting_name, default)
5,331,299