content
stringlengths
22
815k
id
int64
0
4.91M
def create_role(session, project_name, project_model_nexus): """ Создание локальных ролей Nexus :param session: an opened session of NexusSession :param project_name: name of project :param project_model_nexus: nexus part of project map dict """ roles_dict = Role(session).list() privileges_for_role = {} for repo in project_model_nexus['repository']: repo_name = repo['name'] repo_type = repo['repoType'] if repo_type == 'maven': repo_type = 'maven2' for role in project_model_nexus['privileges']: full_role_name = f'rb-{project_name}-{role}' role_privileges = project_model_nexus['privileges'][role] for privilege in role_privileges: privileges_for_role = privilege_translation(privilege, privileges_for_role, full_role_name, repo_name, repo_type) for role in privileges_for_role: # TODO: требуется добавить удаление ролей не из модели проекта if role in roles_dict: Role(session).update( name=role, privileges=privileges_for_role[role] ) else: Role(session).create( name=role, privileges=privileges_for_role[role] )
29,100
def init(): """ Function: init Parameter: none Functionality: Initializes the variables """ global grn_graph parent_path = os.getcwd() file_prefix = '400' file_name = file_prefix + '.gml' grn_file_path = os.path.join(parent_path, 'grn_endpoint', file_name) grn_graph = nx.read_gml(grn_file_path) grn_graph = nx.convert_node_labels_to_integers(grn_graph, first_label=0) global n_motif global e_motif global mapping global PI centrality_file = file_prefix + '_centrality.p' centrality_file_path = os.path.join( parent_path, 'grn_endpoint', centrality_file) n_motif = pickle.load(open(centrality_file_path, "rb")) for edge in grn_graph.edges: node1, node2 = edge e_motif[edge] = min(n_motif[node1], n_motif[node2]) PI = max(PI, e_motif[edge]) non_increasing_grn_nodes = [node[0] for node in sorted(n_motif.items(), key=lambda node: node[1], reverse=True)] mapping_function(non_increasing_grn_nodes, grn_graph) for edge in grn_graph.edges: GRN_edges[edge] = True
29,101
def test_remove_autosave_file(editor_bot, mocker, qtbot): """ Test that remove_autosave_file() removes the autosave file. Also, test that it updates `name_mapping`. """ editor_stack, editor = editor_bot autosave = editor_stack.autosave editor.set_text('spam\n') autosave.maybe_autosave(0) autosave_filename = os.path.join(get_conf_path('autosave'), 'foo.py') assert os.access(autosave_filename, os.R_OK) expected = {'foo.py': autosave_filename} assert autosave.name_mapping == expected autosave.remove_autosave_file(editor_stack.data[0].filename) assert not os.access(autosave_filename, os.R_OK) assert autosave.name_mapping == {}
29,102
def threshold_xr_via_auc(ds, df, res_factor=3, if_nodata='any'): """ Takes a xarray dataset/array of gdv likelihood values and thresholds them according to a pandas dataframe (df) of field occurrence points. Scipy roc curve and auc is generated to perform thresholding. Pandas dataframe must include absences along with presences or the roc curve cannot be performed. Parameters ---------- ds : xarray dataset/array A dataset with x, y and time dims with likelihood values. df : pandas dataframe A dataframe of field occurrences with x, y values and presence, absence column. res_factors : int Controls the tolerance of occurence points intersection with nearest pixels. In other words, number of pixels that a occurrence point can be 'out'. if_nodata : str Whether to exclude a point from the auc threshold method if any or all values are nan. Default is any. Returns ---------- ds_thresh : xarray dataset or array. """ # imports check try: from sklearn.metrics import roc_curve, roc_auc_score except: raise ImportError('Could not import sklearn.') # notify print('Thresholding dataset via occurrence records and AUC.') # check xr type, dims, num time if not isinstance(ds, (xr.Dataset, xr.DataArray)): raise TypeError('Dataset not an xarray type.') elif 'x' not in list(ds.dims) or 'y' not in list(ds.dims): raise ValueError('No x or y dimensions in dataset.') # we need a dataset, try and convert from array was_da = False if isinstance(ds, xr.DataArray): try: was_da = True ds = ds.to_dataset(dim='variable') except: raise TypeError('Failed to convert xarray DataArray to Dataset.') # check if pandas type, columns, actual field if not isinstance(df, pd.DataFrame): raise TypeError('Occurrence records is not a pandas type.') elif 'x' not in df or 'y' not in df: raise ValueError('No x, y fields in occurrence records.') elif 'actual' not in df: raise ValueError('No actual field in occurrence records.') # check if nodatavals is in dataset if not hasattr(ds, 'nodatavals') or ds.nodatavals == 'unknown': raise AttributeError('Dataset does not have a nodatavalue attribute.') # check if res factor and if_nodata valid if not isinstance(res_factor, int) and res_factor < 1: raise TypeError('Resolution factor must be an integer of 1 or greater.') elif if_nodata not in ['any', 'all']: raise TypeError('If nodata policy must be either any or all.') # split ds into arrays depending on dims da_list = [ds] if 'time' in ds.dims: da_list = [ds.sel(time=dt) for dt in ds['time']] # loop each slice, threshold to auc thresh_list = [] for da in da_list: # take a copy da = da.copy(deep=True) # intersect points with current da df_data = df[['x', 'y', 'actual']].copy() df_data = tools.intersect_records_with_xr(ds=da, df_records=df_data, extract=True, res_factor=res_factor, if_nodata=if_nodata) # remove no data df_data = tools.remove_nodata_records(df_data, nodata_value=ds.nodatavals) # check if dataframe has 1s and 0s only unq = df_data['actual'].unique() if not np.any(unq == 1) or not np.any(unq == 0): raise ValueError('Occurrence records do not contain 1s and/or 0s.') elif len(unq) != 2: raise ValueError('Occurrence records contain more than just 1s and/or 0s.') # rename column, add column of actuals (1s) df_data = df_data.rename(columns={'like': 'predicted'}) # get fpr, tpr, thresh, auc and optimal threshold fpr, tpr, thresholds = roc_curve(df_data['actual'], df_data['predicted']) auc = roc_auc_score(df_data['actual'], df_data['predicted']) cut_off = thresholds[np.argmax(tpr - fpr)] # threshold da to cutoff and append da = da.where(da > cut_off) thresh_list.append(da) # notify if 'time' in ds.dims: print('AUC: {0} for time: {1}.'.format(round(auc, 3), da['time'].values)) else: print('AUC: {0} for whole dataset.'.format(round(auc, 3))) for e in fpr: print(e) print('\n') for e in tpr: print(e) print('\n') print(auc) print('\n') print(cut_off) # show print('- ' * 30) plt.show() print('- ' * 30) print('') # concat array back together if len(thresh_list) > 1: ds_thresh = xr.concat(thresh_list, dim='time').sortby('time') else: ds_thresh = thresh_list[0] if was_da: ds_thresh = ds_thresh.to_array() # notify and return print('Thresholded dataset successfully.') return ds_thresh
29,103
def check_travis_python_versions(python_versions_in_travis, all_results): """ Add list of python versions to the results """ all_results[module_dict_key]["python_versions"] = python_versions_in_travis
29,104
def plot_pie_all(day): """ plots two a pie charts of the distribution of vehiles by terminal, parking and pass through """ plt.figure(figsize=(20,8)) labels = ['A','B','C','D','E','Pass Through','Parking'] inter_labels = ['A','B','C','D','E','pass','parking'] broad = [day['terminal_tot'].sum(axis=0),day['pass'].sum(axis=0),day['parking'].sum(axis=0)] broad_label = ['Terminal','Pass Through','Parking'] explode_broad = np.zeros(3) explode_broad[np.argmax(broad,axis=0)] = .1 explode_broad[np.argmin(broad,axis=0)] = .1 sizes = [] colors = ['#8c510a','#d8b365','#f6e8c3','#f5f5f5','#c7eae5','#5ab4ac','#01665e'] for i in inter_labels: sizes.append(day[i].sum(axis=0)) explode = np.zeros(len(sizes)) # only "explode" the 2nd slice (i.e. 'Hogs') explode[np.argmax(sizes,axis = 0)] = .1 explode[np.argmin(sizes,axis = 0)] = .1 plt.subplot(1, 2, 1) plt.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%', shadow=True, startangle=90, colors=colors) plt.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle. plt.subplot(1, 2, 2) plt.pie(broad, explode=explode_broad, labels=broad_label, autopct='%1.1f%%', shadow=True, startangle=90, colors=colors) plt.show()
29,105
def kd_or_scan(func=None, array=None, extra_data=None): """Decorator to allow functions to be call with a scan number or kd object """ if func is None: return partial(kd_or_scan, array=array, extra_data=extra_data) @wraps(func) def wrapper(scan, *args, **kwargs): # If scan number given, read the scan into the object and pass it to function if isinstance(scan, (int, np.int, np.int64)): scan = read_scan(scan, array=array, extra_data=extra_data) return func(scan, *args, **kwargs) return wrapper
29,106
def matching_intervals(original: DomainNode, approx: DomainNode, conf: float) -> bool: """ Checks if 2 intervals match in respect to a confidence interval.""" # out_of_bounds = (not matching_bounds(original.domains[v], approx.domains[v], conf) for v in original.variables) # return not any(out_of_bounds) vars_in_bounds = (matching_bounds(original.domains[var], approx.domains[var], conf) for var in original.variables) return all(vars_in_bounds)
29,107
def _make_label_sigmoid_cross_entropy_loss(logits, present_labels, split): """ Helper function to create label loss Parameters ---------- logits: tensor of shape [batch_size, num_verts, num_labels] present_labels: tensor of shape [batch_size, num_verts, num_labels]; labels of labelled verts split: tensor of shape [batch_size, num_verts], 0 if censored, 1 if not censored Returns ------- The cross-entropy loss corresponding to the label. """ if len(logits.shape) == 3: batch_size = tf.cast(tf.shape(input=logits)[0], dtype=tf.float32) else: batch_size = 1 label_pred_losses = tf.compat.v1.losses.sigmoid_cross_entropy( present_labels, logits=logits, weights=tf.expand_dims(split, -1), reduction=tf.compat.v1.losses.Reduction.NONE) # sum rather than (tf default of) mean because ¯\_(ツ)_/¯ label_pred_loss = tf.reduce_sum(input_tensor=label_pred_losses) return label_pred_loss / batch_size
29,108
def train(model, optimizer, loss_fn, dataloader, metrics, params): """Train the model on `num_steps` batches Args: model: (torch.nn.Module) the neural network optimizer: (torch.optim) optimizer for parameters of model loss_fn: a function that takes batch_output and batch_labels and computes the loss for the batch dataloader: (DataLoader) a torch.utils.data.DataLoader object that fetches training data metrics: (dict) a dictionary of functions that compute a metric using the output and labels of each batch params: (Params) hyperparameters num_steps: (int) number of batches to train on, each of size params.batch_size """ # set model to training mode model.train() # summary for current training loop and a running average object for loss summ = [] loss_avg = utils.RunningAverage() # with tqdm(total=len(dataloader), ncols=80, disable=True) as t: with tqdm(disable=False) as t: for i, (train_batch, labels_batch) in enumerate(dataloader): # move to GPU if available if params.cuda: train_batch, labels_batch = train_batch.cuda( non_blocking=True), labels_batch.cuda(non_blocking=True) # convert to torch Variables train_batch, labels_batch = Variable( train_batch), Variable(labels_batch) # compute model output and loss output_batch = model(train_batch) loss = loss_fn(output_batch, labels_batch) # clear previous gradients, compute gradients of all variables wrt loss optimizer.zero_grad() loss.backward() # performs updates using calculated gradients optimizer.step() # Evaluate summaries only once in a while if i % params.save_summary_steps == 0: # extract data from torch Variable, move to cpu, convert to numpy arrays output_batch = output_batch.data.cpu().numpy() labels_batch = labels_batch.data.cpu().numpy() # compute all metrics on this batch summary_batch = {metric: metrics[metric](output_batch, labels_batch) for metric in metrics} summary_batch['loss'] = loss.item() summ.append(summary_batch) # update the average loss loss_avg.update(loss.item()) t.set_postfix(loss='{:05.3f}'.format(loss_avg())) t.update() # compute mean of all metrics in summary metrics_mean = {metric: np.mean([x[metric] for x in summ]) for metric in summ[0]} metrics_string = " ; ".join("{}: {:05.3f}".format(k, v) for k, v in metrics_mean.items()) logging.info("- Train metrics: " + metrics_string)
29,109
def _merge_meta(base, child): """Merge the base and the child meta attributes. List entries, such as ``indexes`` are concatenated. ``abstract`` value is set to ``True`` only if defined as such in the child class. Args: base (dict): ``meta`` attribute from the base class. child (dict): ``meta`` attribute from the child class. Returns: dict: Merged metadata. """ base = copy.deepcopy(base) child.setdefault('abstract', False) for key, value in child.items(): if isinstance(value, list): base.setdefault(key, []).extend(value) else: base[key] = value return base
29,110
def _parse_quotes(quotes_dict: Dict[str, Dict[str, Dict[str, Any]]]) -> "RegionalQuotes": """ Parse quote data for a :class:`~.DetailedProduct`. :param quotes_dict: """ quotes: RegionalQuotes = RegionalQuotes() for gsp, payment_methods in quotes_dict.items(): quotes[gsp] = {} for method, fuels in payment_methods.items(): quotes[gsp][method] = {} for fuel, quote in fuels.items(): quotes[gsp][method][fuel] = Quote(**quote) return quotes
29,111
def train_validation(train_df, valid_df, epochs=100, batch_size=512, plot=False, nn_args={}): """ Wrapper for training on the complete training data and evaluating the performance on the hold-out set. Parameter: ------------------- train_df: df, train df with features and valid_df: df, validation df with features Returns: ------------------- res_df: metrics nnmodel: neural network model """ #format the dtaframe for ML X_train_full, Seqs_train_full, y_train_full = process_df(train_df) X_valid_full, Seqs_valid_full, y_valid_full = process_df(valid_df) # encode class values as integers encoder = LabelEncoder() encoder.fit(y_train_full) #output dims depending on the number of fractions output_dims = len(np.unique(train_df.Fraction)) input_dims = X_train_full.shape[1] nnmodel = models.SAX_Model(output_dim=output_dims, input_dim=input_dims, **nn_args) print (nnmodel.summary()) history = nnmodel.fit(np.array(X_train_full), np_utils.to_categorical(encoder.transform(y_train_full)), epochs=epochs, batch_size=batch_size) #fit the model to the complete training data yhat_train_prob = nnmodel.predict(np.array(X_train_full)) yhat_train_disc = yhat_train_prob.argmax(axis=1) + 1 yhat_val_prob = nnmodel.predict(np.array(X_valid_full)) yhat_val_disc = yhat_val_prob.argmax(axis=1) + 1 #evaluate res_train = pd.DataFrame(eval_predictions_complex(y_train_full, yhat_train_disc, "keras_Train")) res_valid = pd.DataFrame(eval_predictions_complex(y_valid_full, yhat_val_disc, "keras_Valid")) res_df = pd.concat([res_train.transpose(), res_valid.transpose()]) res_df.columns = eval_predictions_complex(None, None, None, True) if plot: x = np.arange(-4, 30, 1) ax1 = sns.jointplot(x=y_valid_full, y=yhat_val_disc, kind="kde", xlim=(-4, 30 ), ylim=(-4, 30 )) ax1.set_axis_labels(xlabel="True Fraction", ylabel="Prediction") ax1.ax_joint.plot(x, x, '-k') print ("Results on the validation data:") print (res_df) return(res_df, nnmodel, history)
29,112
def _log_to_temp_dir(mocker, tmp_path): """Stub the logging dir to tmp_path.""" log_dir = tmp_path / "logs" mocker.patch.object(youtube_monitor_action.__main__, "LOGGING_DIR", log_dir) yield
29,113
def main(): """ The programme will find the part in the given strand with the highest similarity with the strand inputted. """ l = input('Please give me a DNA sequence to search: ').upper() s = input('What DNA sequence would you like to match? ').upper() best, similarity = homology(l, s) print('The best match is ' + best) print('The similarity is ' + str(similarity * 100) + '%')
29,114
def retrieve_email() -> str: """ Uses the Git command to retrieve the current configured user email address. :return: The global configured user email. """ return subprocess.run( ["git", "config", "--get", "user.email"], capture_output=True, text=True, ).stdout.strip("\n")
29,115
def bootstrap_compute( hind, verif, hist=None, alignment="same_verifs", metric="pearson_r", comparison="m2e", dim="init", reference=["uninitialized", "persistence"], resample_dim="member", sig=95, iterations=500, pers_sig=None, compute=compute_hindcast, resample_uninit=bootstrap_uninitialized_ensemble, reference_compute=compute_persistence, **metric_kwargs, ): """Bootstrap compute with replacement. Args: hind (xr.Dataset): prediction ensemble. verif (xr.Dataset): Verification data. hist (xr.Dataset): historical/uninitialized simulation. metric (str): `metric`. Defaults to 'pearson_r'. comparison (str): `comparison`. Defaults to 'm2e'. dim (str or list): dimension(s) to apply metric over. default: 'init'. reference (str, list of str): Type of reference forecasts with which to verify. One or more of ['persistence', 'uninitialized']. If None or empty, returns no p value. resample_dim (str): dimension to resample from. default: 'member':: - 'member': select a different set of members from hind - 'init': select a different set of initializations from hind sig (int): Significance level for uninitialized and initialized skill. Defaults to 95. pers_sig (int): Significance level for persistence skill confidence levels. Defaults to sig. iterations (int): number of resampling iterations (bootstrap with replacement). Defaults to 500. compute (func): function to compute skill. Choose from [:py:func:`climpred.prediction.compute_perfect_model`, :py:func:`climpred.prediction.compute_hindcast`]. resample_uninit (func): function to create an uninitialized ensemble from a control simulation or uninitialized large ensemble. Choose from: [:py:func:`bootstrap_uninitialized_ensemble`, :py:func:`bootstrap_uninit_pm_ensemble_from_control`]. reference_compute (func): function to compute a reference forecast skill with. Default: :py:func:`climpred.prediction.compute_persistence`. ** metric_kwargs (dict): additional keywords to be passed to metric (see the arguments required for a given metric in :ref:`Metrics`). Returns: results: (xr.Dataset): bootstrapped results for the three different skills: - `initialized` for the initialized hindcast `hind` and describes skill due to initialization and external forcing - `uninitialized` for the uninitialized/historical and approximates skill from external forcing - `persistence` for the persistence forecast computed by `compute_persistence` the different results: - `verify skill`: skill values - `p`: p value - `low_ci` and `high_ci`: high and low ends of confidence intervals based on significance threshold `sig` Reference: * Goddard, L., A. Kumar, A. Solomon, D. Smith, G. Boer, P. Gonzalez, V. Kharin, et al. “A Verification Framework for Interannual-to-Decadal Predictions Experiments.” Climate Dynamics 40, no. 1–2 (January 1, 2013): 245–72. https://doi.org/10/f4jjvf. See also: * climpred.bootstrap.bootstrap_hindcast * climpred.bootstrap.bootstrap_perfect_model """ warn_if_chunking_would_increase_performance(hind, crit_size_in_MB=5) if pers_sig is None: pers_sig = sig if isinstance(dim, str): dim = [dim] if isinstance(reference, str): reference = [reference] if reference is None: reference = [] p = (100 - sig) / 100 ci_low = p / 2 ci_high = 1 - p / 2 p_pers = (100 - pers_sig) / 100 ci_low_pers = p_pers / 2 ci_high_pers = 1 - p_pers / 2 # get metric/comparison function name, not the alias metric = METRIC_ALIASES.get(metric, metric) comparison = COMPARISON_ALIASES.get(comparison, comparison) # get class Metric(metric) metric = get_metric_class(metric, ALL_METRICS) # get comparison function comparison = get_comparison_class(comparison, ALL_COMPARISONS) # Perfect Model requires `same_inits` setup isHindcast = True if comparison.name in HINDCAST_COMPARISONS else False reference_alignment = alignment if isHindcast else "same_inits" chunking_dims = [d for d in hind.dims if d not in CLIMPRED_DIMS] # carry alignment for compute_reference separately metric_kwargs_reference = metric_kwargs.copy() metric_kwargs_reference["alignment"] = reference_alignment # carry alignment in metric_kwargs if isHindcast: metric_kwargs["alignment"] = alignment if hist is None: # PM path, use verif = control hist = verif # slower path for hindcast and resample_dim init if resample_dim == "init" and isHindcast: warnings.warn("resample_dim=`init` will be slower than resample_dim=`member`.") ( bootstrapped_init_skill, bootstrapped_uninit_skill, bootstrapped_pers_skill, ) = _bootstrap_hindcast_over_init_dim( hind, hist, verif, dim, reference, resample_dim, iterations, metric, comparison, compute, reference_compute, resample_uninit, **metric_kwargs, ) else: # faster: first _resample_iterations_idx, then compute skill resample_func = _get_resample_func(hind) if not isHindcast: if "uninitialized" in reference: # create more members than needed in PM to make the uninitialized # distribution more robust members_to_sample_from = 50 repeat = members_to_sample_from // hind.member.size + 1 uninit_hind = xr.concat( [resample_uninit(hind, hist) for i in range(repeat)], dim="member", **CONCAT_KWARGS, ) uninit_hind["member"] = np.arange(1, 1 + uninit_hind.member.size) if dask.is_dask_collection(uninit_hind): # too minimize tasks: ensure uninit_hind get pre-computed # alternativly .chunk({'member':-1}) uninit_hind = uninit_hind.compute().chunk() # resample uninit always over member and select only hind.member.size bootstrapped_uninit = resample_func( uninit_hind, iterations, "member", replace=False, dim_max=hind["member"].size, ) bootstrapped_uninit["lead"] = hind["lead"] # effectively only when _resample_iteration_idx which doesnt use dim_max bootstrapped_uninit = bootstrapped_uninit.isel( member=slice(None, hind.member.size) ) if dask.is_dask_collection(bootstrapped_uninit): bootstrapped_uninit = bootstrapped_uninit.chunk({"member": -1}) bootstrapped_uninit = _maybe_auto_chunk( bootstrapped_uninit, ["iteration"] + chunking_dims ) else: # hindcast if "uninitialized" in reference: uninit_hind = resample_uninit(hind, hist) if dask.is_dask_collection(uninit_hind): # too minimize tasks: ensure uninit_hind get pre-computed # maybe not needed uninit_hind = uninit_hind.compute().chunk() bootstrapped_uninit = resample_func( uninit_hind, iterations, resample_dim ) bootstrapped_uninit = bootstrapped_uninit.isel( member=slice(None, hind.member.size) ) bootstrapped_uninit["lead"] = hind["lead"] if dask.is_dask_collection(bootstrapped_uninit): bootstrapped_uninit = _maybe_auto_chunk( bootstrapped_uninit.chunk({"lead": 1}), ["iteration"] + chunking_dims, ) if "uninitialized" in reference: bootstrapped_uninit_skill = compute( bootstrapped_uninit, verif, metric=metric, comparison="m2o" if isHindcast else comparison, dim=dim, add_attrs=False, **metric_kwargs, ) # take mean if 'm2o' comparison forced before if isHindcast and comparison != __m2o: bootstrapped_uninit_skill = bootstrapped_uninit_skill.mean("member") bootstrapped_hind = resample_func(hind, iterations, resample_dim) if dask.is_dask_collection(bootstrapped_hind): bootstrapped_hind = bootstrapped_hind.chunk({"member": -1}) bootstrapped_init_skill = compute( bootstrapped_hind, verif, metric=metric, comparison=comparison, add_attrs=False, dim=dim, **metric_kwargs, ) if "persistence" in reference: if not metric.probabilistic: pers_skill = reference_compute( hind, verif, metric=metric, dim=dim, **metric_kwargs_reference, ) # bootstrap pers if resample_dim == "init": bootstrapped_pers_skill = reference_compute( bootstrapped_hind, verif, metric=metric, **metric_kwargs_reference, ) else: # member _, bootstrapped_pers_skill = xr.broadcast( bootstrapped_init_skill, pers_skill, exclude=CLIMPRED_DIMS ) else: bootstrapped_pers_skill = bootstrapped_init_skill.isnull() # calc mean skill without any resampling init_skill = compute( hind, verif, metric=metric, comparison=comparison, dim=dim, **metric_kwargs, ) if "uninitialized" in reference: # uninit skill as mean resampled uninit skill uninit_skill = bootstrapped_uninit_skill.mean("iteration") if "persistence" in reference: if not metric.probabilistic: pers_skill = reference_compute( hind, verif, metric=metric, dim=dim, **metric_kwargs_reference ) else: pers_skill = init_skill.isnull() # align to prepare for concat if set(bootstrapped_pers_skill.coords) != set(bootstrapped_init_skill.coords): if ( "time" in bootstrapped_pers_skill.dims and "init" in bootstrapped_init_skill.dims ): bootstrapped_pers_skill = bootstrapped_pers_skill.rename( {"time": "init"} ) # allow member to be broadcasted bootstrapped_init_skill, bootstrapped_pers_skill = xr.broadcast( bootstrapped_init_skill, bootstrapped_pers_skill, exclude=("init", "lead", "time"), ) # get confidence intervals CI init_ci = _distribution_to_ci(bootstrapped_init_skill, ci_low, ci_high) if "uninitialized" in reference: uninit_ci = _distribution_to_ci(bootstrapped_uninit_skill, ci_low, ci_high) # probabilistic metrics wont have persistence forecast # therefore only get CI if persistence was computed if "persistence" in reference: if "iteration" in bootstrapped_pers_skill.dims: pers_ci = _distribution_to_ci( bootstrapped_pers_skill, ci_low_pers, ci_high_pers ) else: # otherwise set all persistence outputs to false pers_ci = init_ci == -999 # pvalue whether uninit or pers better than init forecast if "uninitialized" in reference: p_uninit_over_init = _pvalue_from_distributions( bootstrapped_uninit_skill, bootstrapped_init_skill, metric=metric ) if "persistence" in reference: p_pers_over_init = _pvalue_from_distributions( bootstrapped_pers_skill, bootstrapped_init_skill, metric=metric ) # wrap results together in one xr object if reference == []: results = xr.concat( [ init_skill, init_ci.isel(quantile=0, drop=True), init_ci.isel(quantile=1, drop=True), ], dim="results", ) results["results"] = ["verify skill", "low_ci", "high_ci"] results["skill"] = ["initialized"] results = results.squeeze() elif reference == ["persistence"]: skill = xr.concat([init_skill, pers_skill], dim="skill", **CONCAT_KWARGS) skill["skill"] = ["initialized", "persistence"] # ci for each skill ci = xr.concat([init_ci, pers_ci], "skill", coords="minimal").rename( {"quantile": "results"} ) ci["skill"] = ["initialized", "persistence"] results = xr.concat([skill, p_pers_over_init], dim="results", **CONCAT_KWARGS) results["results"] = ["verify skill", "p"] if set(results.coords) != set(ci.coords): res_drop = [c for c in results.coords if c not in ci.coords] ci_drop = [c for c in ci.coords if c not in results.coords] results = results.drop_vars(res_drop) ci = ci.drop_vars(ci_drop) results = xr.concat([results, ci], dim="results", **CONCAT_KWARGS) results["results"] = ["verify skill", "p", "low_ci", "high_ci"] elif reference == ["uninitialized"]: skill = xr.concat([init_skill, uninit_skill], dim="skill", **CONCAT_KWARGS) skill["skill"] = ["initialized", "uninitialized"] # ci for each skill ci = xr.concat([init_ci, uninit_ci], "skill", coords="minimal").rename( {"quantile": "results"} ) ci["skill"] = ["initialized", "uninitialized"] results = xr.concat([skill, p_uninit_over_init], dim="results", **CONCAT_KWARGS) results["results"] = ["verify skill", "p"] if set(results.coords) != set(ci.coords): res_drop = [c for c in results.coords if c not in ci.coords] ci_drop = [c for c in ci.coords if c not in results.coords] results = results.drop_vars(res_drop) ci = ci.drop_vars(ci_drop) results = xr.concat([results, ci], dim="results", **CONCAT_KWARGS) results["results"] = ["verify skill", "p", "low_ci", "high_ci"] elif set(reference) == set(["uninitialized", "persistence"]): skill = xr.concat( [init_skill, uninit_skill, pers_skill], dim="skill", **CONCAT_KWARGS ) skill["skill"] = ["initialized", "uninitialized", "persistence"] # probability that i beats init p = xr.concat( [p_uninit_over_init, p_pers_over_init], dim="skill", **CONCAT_KWARGS ) p["skill"] = ["uninitialized", "persistence"] # ci for each skill ci = xr.concat([init_ci, uninit_ci, pers_ci], "skill", coords="minimal").rename( {"quantile": "results"} ) ci["skill"] = ["initialized", "uninitialized", "persistence"] results = xr.concat([skill, p], dim="results", **CONCAT_KWARGS) results["results"] = ["verify skill", "p"] if set(results.coords) != set(ci.coords): res_drop = [c for c in results.coords if c not in ci.coords] ci_drop = [c for c in ci.coords if c not in results.coords] results = results.drop_vars(res_drop) ci = ci.drop_vars(ci_drop) results = xr.concat([results, ci], dim="results", **CONCAT_KWARGS) results["results"] = ["verify skill", "p", "low_ci", "high_ci"] else: raise ValueError("results not created") # Attach climpred compute information to skill metadata_dict = { "confidence_interval_levels": f"{ci_high}-{ci_low}", "bootstrap_iterations": iterations, "reference": reference, } if reference is not None: metadata_dict[ "p" ] = "probability that reference performs better than initialized" metadata_dict.update(metric_kwargs) results = assign_attrs( results, hind, alignment=alignment, metric=metric, comparison=comparison, dim=dim, function_name=inspect.stack()[0][3], # take function.__name__ metadata_dict=metadata_dict, ) # Ensure that the lead units get carried along for the calculation. The attribute # tends to get dropped along the way due to ``xarray`` functionality. results["lead"] = hind["lead"] if "units" in hind["lead"].attrs and "units" not in results["lead"].attrs: results["lead"].attrs["units"] = hind["lead"].attrs["units"] return results
29,116
def main(): """CLI Main""" git_author_name = derive_git_author() user = derive_user() branch = None try: branch = derive_branch() perform_precommit_sanity_checks(git_author_name, user, branch) # This is purposefully broad: we want to catch any possible error here so # that an email can be sent to SDD explaining what happened. The entire # traceback will be captured # pylint: disable=broad-except except Exception as error: full_traceback = traceback.format_exc() error_msg = f"ERROR: {error}\nNO CODE HAS BEEN COMMITTED!" print(error_msg, file=sys.stderr) subject = ( f"GBT Config Warden: User '{user}' has attempted to commit " f"files to {GBT_CONFIG_PATH}" ) email_text = ( f"User '{user}' attempted to commit code as author '{git_author_name}' to active branch " f"'{branch}' of '{GBT_CONFIG_PATH}' on {NOW}. They were prevented from doing so, " f"and shown the following error message:\n" f"{'-'*80}\n{error_msg}\n{'='*80}\n\n" f"Below is debug info:\n\n" f"Global variables:\n" f" PRIMARY_BRANCH: {PRIMARY_BRANCH}\n" f" WHITELISTED_USERS: {WHITELISTED_USERS}\n" f" BLACKLISTED_AUTHORS: {BLACKLISTED_AUTHORS}\n" f"\n\nFull traceback:\n" f"{'-'*80}\n{full_traceback}\n{'='*80}\n\n" "Reminder: to make changes to the above variables, you need to edit the git config!\n" "See \"$ git config --local --list | grep '^gbtconfig\\.'\" for more details." ) email(subject=subject, text=email_text) sys.exit(1)
29,117
def description_for_number(numobj, lang, script=None, region=None): """Return a text description of a PhoneNumber object for the given language. The description might consist of the name of the country where the phone number is from and/or the name of the geographical area the phone number is from. This function explicitly checks the validity of the number passed in Arguments: numobj -- The PhoneNumber object for which we want to get a text description. lang -- A 2-letter lowercase ISO 639-1 language code for the language in which the description should be returned (e.g. "en") script -- A 4-letter titlecase (first letter uppercase, rest lowercase) ISO script code as defined in ISO 15924, separated by an underscore (e.g. "Hant") region -- A 2-letter uppercase ISO 3166-1 country code (e.g. "GB") Returns a text description in the given language code, for the given phone number, or an empty string if no description is available.""" ntype = number_type(numobj) if ntype == PhoneNumberType.UNKNOWN: return "" elif not is_number_type_geographical(ntype, numobj.country_code): return country_name_for_number(numobj, lang, script, region) return description_for_valid_number(numobj, lang, script, region)
29,118
def _construct_new_particles(samples, old_particles): """Construct new array of particles given the drawing results over the old particles. Args: + *samples* (np.ndarray): NxM array that contains the drawing results, where N is number of observations and M number of particles. + *old_particles* (np.ndarray): 3xNxM array that stores old particles. Returns: + new particles (np.ndarray): 3xNxM array of newly assembled particles (for each observation, there will be repeated particles). """ N, M = samples.shape ret_arr = 5*np.ones((3,N,M)) m_outer = np.zeros(N) while 0 < np.amax(samples): indices = np.nonzero(samples) last_n = -1 for i, n in enumerate(indices[0]): if last_n < n: if last_n >= 0: m_outer[last_n] += m_inner m_inner = 0 ret_arr[:,n,int(m_outer[n]+m_inner)] = old_particles[ :,n, indices[1][i] ] m_inner += 1 last_n = n m_outer[last_n] += m_inner samples[indices] -= 1 return ret_arr
29,119
def test_branches(runner): """Test undo alias un""" result = runner.invoke(cli, ["branches"]) assert result.exit_code == 0
29,120
def trunc(x, y, w, h): """Truncates x and y coordinates to live in the (0, 0) to (w, h) Args: x: the x-coordinate of a point y: the y-coordinate of a point w: the width of the truncation box h: the height of the truncation box. """ return min(max(x, 0), w - 1), min(max(y, 0), h - 1)
29,121
def GetPlayFabIDsFromNintendoSwitchDeviceIds(request, callback, customData = None, extraHeaders = None): """ Retrieves the unique PlayFab identifiers for the given set of Nintendo Switch Device identifiers. https://docs.microsoft.com/rest/api/playfab/server/account-management/getplayfabidsfromnintendoswitchdeviceids """ if not PlayFabSettings.DeveloperSecretKey: raise PlayFabErrors.PlayFabException("Must have DeveloperSecretKey set to call this method") def wrappedCallback(playFabResult, error): if callback: callback(playFabResult, error) PlayFabHTTP.DoPost("/Server/GetPlayFabIDsFromNintendoSwitchDeviceIds", request, "X-SecretKey", PlayFabSettings.DeveloperSecretKey, wrappedCallback, customData, extraHeaders)
29,122
def testInputLog(log_file): """ Test the user input for issues in the DNS query logs """ # if the path is a file if os.path.isfile(log_file): pass else: print("WARNING: Bad Input - Use a DNS (text) log file which has one domain per row without any other data or punctuation.") print("Exiting...") sys.exit(0) # Return NULL return None
29,123
def test_4_1_9_audit_rule_file_exists(host): """ CIS Ubuntu 20.04 v1.0.0 - Rule # 4.1.9 Tests if /etc/audit/rules.d/4.1.9.rules file exists """ host.file(RULE_FILE_419).exists
29,124
def exact_match(true_labels, predicts): """ exact_match This is the most strict metric for the multi label setting. It's defined as the percentage of samples that have all their labels correctly classified. Parameters ---------- true_labels: numpy.ndarray of shape (n_samples, n_target_tasks) A matrix with the true labels for all the classification tasks and for n_samples. predicts: numpy.ndarray of shape (n_samples, n_target_tasks) A matrix with the predictions for all the classification tasks and for n_samples. Returns ------- float The exact match percentage between the given sets. Examples -------- >>> from skmultiflow.evaluation.metrics.metrics import exact_match >>> true_labels = [[0,1,0,1],[0,0,0,1],[1,1,0,1],[1,1,1,1]] >>> predictions = [[0,1,0,1],[0,1,1,0],[0,1,0,1],[1,1,1,1]] >>> exact_match(true_labels, predictions) 0.5 """ if not hasattr(true_labels, 'shape'): true_labels = np.asarray(true_labels) if not hasattr(predicts, 'shape'): predicts = np.asarray(predicts) N, L = true_labels.shape return np.sum(np.sum((true_labels == predicts) * 1, axis=1)==L) * 1. / N
29,125
def dimred3(dat): """convenience function dimensionally reduce input data, each row being an element in some vector space, to dimension 3 using PCA calcualted by the SVD""" return dimred(dat, 3)
29,126
def stedflow(): """ stedflow() Defined at ../src/stedflow.f lines 67-133 """ _min3p.f90wrap_stedflow()
29,127
def render_orchestrator_registrations( driver: Driver = None, collab_id: str = None, project_id: str = None ): """ Renders out retrieved registration metadata in a custom form Args: driver (Driver): A connected Synergos driver to communicate with the selected orchestrator. collab_id (str): ID of selected collaboration to be rendered project_id (str): ID of selected project to be rendered """ # Type 1 view: Orchestrator's Perspective if driver and collab_id and project_id: registry_data = driver.registrations.read_all( collab_id=collab_id, project_id=project_id ).get('data', []) participant_ids = [reg['key']['participant_id'] for reg in registry_data] # Type 2 view: Insufficiant keys -> Render nothing else: registry_data = [] participant_ids = [] selected_participant_id = st.selectbox( label="Participant ID:", options=participant_ids, help="""Select an participant to view.""" ) if registry_data: selected_registry = [ reg for reg in registry_data if reg['key']['participant_id'] == selected_participant_id ].pop() else: selected_registry = {} with st.beta_container(): render_participant( driver=driver, participant_id=selected_participant_id ) with st.beta_expander("Registration Details"): reg_renderer.display(selected_registry) with st.beta_expander("Tag Details"): tags = selected_registry.get('relations', {}).get('Tag', []) tag_details = tags.pop() if tags else {} tag_renderer.display(tag_details) with st.beta_expander("Alignment Details"): alignments = selected_registry.get('relations', {}).get('Alignment', []) alignment_details = alignments.pop() if alignments else {} align_renderer.display(alignment_details) return selected_participant_id
29,128
def check_nan(data, new_data): """checks if nan values are conserved """ old = np.isnan(data) new = np.isnan(new_data) if np.all(new == old): return True else: return False
29,129
def Var(poly, dist=None, **kws): """ Element by element 2nd order statistics. Args: poly (chaospy.poly.ndpoly, Dist): Input to take variance on. dist (Dist): Defines the space the variance is taken on. It is ignored if ``poly`` is a distribution. Returns: (numpy.ndarray): Element for element variance along ``poly``, where ``variation.shape == poly.shape``. Examples: >>> dist = chaospy.J(chaospy.Gamma(1, 1), chaospy.Normal(0, 2)) >>> chaospy.Var(dist) array([1., 4.]) >>> x, y = chaospy.variable(2) >>> poly = chaospy.polynomial([1, x, y, 10*x*y]) >>> chaospy.Var(poly, dist) array([ 0., 1., 4., 800.]) """ if dist is None: dist, poly = poly, polynomials.variable(len(poly)) poly = polynomials.setdim(poly, len(dist)) if not poly.isconstant: return poly.tonumpy()**2 poly = poly-E(poly, dist, **kws) poly = polynomials.square(poly) return E(poly, dist, **kws)
29,130
def view_n_image_rels(relationships, n, image_dir="data/VisualGenome/VG_100K/"): """Displays a relationship from `n` distinct images. Args: relationships: list of relationships from VG (e.g. relationships.json) n: images to show image_dir: directory where images live """ from utils.visual_genome import get_vg_obj_name # Shuffle order that we see the images relationships_idx = list(range(len(relationships))) random.shuffle(relationships_idx) n_seen = 0 for idx in relationships_idx: a = relationships[idx] rels = a["relationships"] if len(rels) > 0: n_seen += 1 for rel in rels: sub = BBoxPrim.from_vg_obj(rel["subject"]) obj = BBoxPrim.from_vg_obj(rel["object"]) sub_name = get_vg_obj_name(rel["subject"]) obj_name = get_vg_obj_name(rel["object"]) pred = rel["predicate"] print(f"{sub_name} <{pred}> {obj_name}") image_fn = os.path.join(image_dir, f"{a['image_id']}.jpg") show_image(image_fn, [sub], [obj]) break if n_seen >= n: break
29,131
def op_par_loop_parse(text): """Parsing for op_par_loop calls""" loop_args = [] search = "op_par_loop" i = text.find(search) while i > -1: arg_string = text[text.find('(', i) + 1:text.find(';', i + 11)] # parse arguments in par loop temp_args = [] num_args = 0 # parse each op_arg_dat search2 = "op_arg_dat" search3 = "op_arg_gbl" search4 = "op_opt_arg_dat" j = arg_string.find(search2) k = arg_string.find(search3) l = arg_string.find(search4) while j > -1 or k > -1 or l > -1: index = min(j if (j > -1) else sys.maxint,k if (k > -1) else sys.maxint,l if (l > -1) else sys.maxint ) if index == j: temp_dat = get_arg_dat(arg_string, j) # append this struct to a temporary list/array temp_args.append(temp_dat) num_args = num_args + 1 j = arg_string.find(search2, j + 11) elif index == k: temp_gbl = get_arg_gbl(arg_string, k) # append this struct to a temporary list/array temp_args.append(temp_gbl) num_args = num_args + 1 k = arg_string.find(search3, k + 11) elif index == l: temp_dat = get_opt_arg_dat(arg_string, l) # append this struct to a temporary list/array temp_args.append(temp_dat) num_args = num_args + 1 l = arg_string.find(search4, l + 15) temp = {'loc': i, 'name1': arg_string.split(',')[0].strip(), 'name2': arg_string.split(',')[1].strip(), 'set': arg_string.split(',')[2].strip(), 'args': temp_args, 'nargs': num_args} loop_args.append(temp) i = text.find(search, i + 10) print '\n\n' return (loop_args)
29,132
def ex7a(): """Do not accept non-numeric inputs""" area_of_rectangle(input_numeric, 'feet')
29,133
def bin_to_hex(bin_str: str) -> str: """Convert a binary string to a hex string. The returned hex string will contain the prefix '0x' only if given a binary string with the prefix '0b'. Args: bin_str (str): Binary string (e.g. '0b1001') Returns: str: Hexadecimal string zero-padded to len(bin_str) // 4 Example: >>> bin_str = '0b1010101111001101' >>> bin_to_hex(bin_str) '0xabcd' >>> bin_to_hex(bin_str[2:]) # remove '0b' 'abcd' """ if not isinstance(bin_str, str): raise TypeError(f'Expecting type str. given {bin_str.__class__.__name__}.') literal = '0x' if bin_str[2:].lower() == '0b' else '' num_nibbles = len(bin_str) // BITS_PER_NIBBLE bin_str = bin_str[:num_nibbles * BITS_PER_NIBBLE] # truncate to whole number of nibbles return literal + hex(int(bin_str, 2))[2:].zfill(num_nibbles)
29,134
def exp_value_interpolate_bp(prod_inst, util_opti, b_ssv_sd, k_ssv_sd, epsilon_ssv_sd, b_ssv, k_ssv, epsilon_ssv, b_ssv_zr, k_ssv_zr, epsilon_ssv_zr, states_vfi_dim, shocks_vfi_dim): """interpolate value function and expected value function. Need three matrix here: 1. state matrix x shock matrix where optimal choices were solved at - previously, shock for this = 0, but now shock vector might not be zero 2. state matrix x shock matrix where shocks are drawn monte carlo way to allow for averaging, integrating over shocks for each x row 3. state matrix alone, shock = 0, each of the x row in matrix x """ 'A Get States to Integrate over' k_alpha_ae_sd, b_ssv_sd, \ k_alpha_ae, b_ssv, \ k_alpha_ae_zr, b_ssv_zr = \ inter_states_bp(prod_inst, util_opti, b_ssv_sd, k_ssv_sd, epsilon_ssv_sd, b_ssv, k_ssv, epsilon_ssv, b_ssv_zr, k_ssv_zr, epsilon_ssv_zr, states_vfi_dim, shocks_vfi_dim) 'B. invoke' util_emax = \ exp_value_interpolate_main(u1=util_opti, x1=k_alpha_ae_sd, y1=b_ssv_sd, x2=k_alpha_ae, y2=b_ssv, x2_noshk=k_alpha_ae_zr, y2_noshk=b_ssv_zr, states_dim=states_vfi_dim, shocks_dim=shocks_vfi_dim, return_uxy=False) 'C. collect' interpolant_exp_v = {'evu': util_emax, 'kae': k_alpha_ae_zr, 'b': b_ssv_zr} return interpolant_exp_v
29,135
def remove_non_protein( molecule: oechem.OEGraphMol, exceptions: Union[None, List[str]] = None, remove_water: bool = False, ) -> oechem.OEGraphMol: """ Remove non-protein atoms from an OpenEye molecule. Parameters ---------- molecule: oechem.OEGraphMol An OpenEye molecule holding a molecular structure. exceptions: None or list of str Exceptions that should not be removed. remove_water: bool If water should be removed. Returns ------- selection: oechem.OEGraphMol An OpenEye molecule holding the filtered structure. """ if exceptions is None: exceptions = [] if remove_water is False: exceptions.append("HOH") # do not change input mol selection = molecule.CreateCopy() for atom in selection.GetAtoms(): residue = oechem.OEAtomGetResidue(atom) if residue.IsHetAtom(): if residue.GetName() not in exceptions: selection.DeleteAtom(atom) return selection
29,136
def test_2(): """Test empties.""" num_1 = ListNode() num_2 = ListNode() assert add_two_numbers(num_1, num_2) == ListNode()
29,137
def configure_connection(instance, name='eventstreams', credentials=None): """Configures IBM Streams for a certain connection. Creates an application configuration object containing the required properties with connection information. Example for creating a configuration for a Streams instance with connection details:: from icpd_core import icpd_util from streamsx.rest_primitives import Instance import streamsx.eventstreams as es cfg = icpd_util.get_service_instance_details(name='your-streams-instance') cfg[streamsx.topology.context.ConfigParams.SSL_VERIFY] = False instance = Instance.of_service(cfg) app_cfg = es.configure_connection(instance, credentials='my_crdentials_json') Args: instance(streamsx.rest_primitives.Instance): IBM Streams instance object. name(str): Name of the application configuration, default name is 'eventstreams'. credentials(str|dict): The service credentials for Eventstreams. Returns: Name of the application configuration. .. warning:: The function can be used only in IBM Cloud Pak for Data. .. versionadded:: 1.1 """ description = 'Eventstreams credentials' properties = {} if credentials is None: raise TypeError(credentials) if isinstance(credentials, dict): properties['eventstreams.creds'] = json.dumps(credentials) else: properties['eventstreams.creds'] = credentials # check if application configuration exists app_config = instance.get_application_configurations(name=name) if app_config: print('update application configuration: ' + name) app_config[0].update(properties) else: print('create application configuration: ' + name) instance.create_application_configuration(name, properties, description) return name
29,138
def setup(app): """ Any time a python class is referenced, make it a pretty link that doesn't include the full package path. This makes the base classes much prettier. """ app.add_role_to_domain("py", "class", truncate_class_role) return {"parallel_read_safe": True}
29,139
def test_except_project_name_handler(project_name, ctrl_init, svc_client_templates_creation, mocker): """Test template create project controller exception raised.""" from renku.service.controllers.templates_create_project import TemplatesCreateProjectCtrl cache, user_data = ctrl_init svc_client, headers, payload, rm_remote = svc_client_templates_creation payload["project_name"] = project_name with pytest.raises(ValidationError) as exc_info: TemplatesCreateProjectCtrl(cache, user_data, payload) assert "Invalid `git_url`" in str(exc_info.value)
29,140
def transects_to_gdf(transects): """ Saves the shore-normal transects as a gpd.GeoDataFrame KV WRL 2018 Arguments: ----------- transects: dict contains the coordinates of the transects Returns: ----------- gdf_all: gpd.GeoDataFrame """ # loop through the mapped shorelines for i,key in enumerate(list(transects.keys())): # save the geometry + attributes geom = geometry.LineString(transects[key]) gdf = gpd.GeoDataFrame(geometry=gpd.GeoSeries(geom)) gdf.index = [i] gdf.loc[i,'name'] = key # store into geodataframe if i == 0: gdf_all = gdf else: gdf_all = gdf_all.append(gdf) return gdf_all
29,141
def deduce_final_configuration(fetched_config): """ Fills some variables in configuration based on those already extracted. Args: fetched_config (dict): Configuration variables extracted from a living environment, Returns: dict: Final configuration from live environment. """ final_config = fetched_config.copy() final_config[THRIFT_SERVER_URL] = _get_thrift_server_url(final_config) final_config[HIVE_SERVER_URL] = _get_hive_server_url(final_config) return final_config
29,142
def total_benchmark_return_nb(benchmark_value: tp.Array2d) -> tp.Array1d: """Get total market return per column/group.""" out = np.empty(benchmark_value.shape[1], dtype=np.float_) for col in range(benchmark_value.shape[1]): out[col] = returns_nb.get_return_nb(benchmark_value[0, col], benchmark_value[-1, col]) return out
29,143
def young_modulus(data): """ Given a stress-strain dataset, returns Young's Modulus. """ yielding = yield_stress(data)[0] """Finds the yield index""" yield_index = 0 for index, point in enumerate(data): if (point == yielding).all(): yield_index = index break """Finds data in elastic region""" elastic = data[:yield_index+1] """ Finds the upper yield point (lower yield point is the *yielding* variable). We're taking the first element ([0]) because it returns the first element that meets the criteria in parentheses. It's a two-dimensional array so we have to do this twice. """ upperyieldpoint_index = np.where(elastic==max(elastic[:,1]))[0][0] upperyieldpoint = elastic[upperyieldpoint_index] """We estimate the region until the first upper yield point with a linear model""" lin_elastic_region = elastic[:upperyieldpoint_index+1] """The slope of this region is Young's Modulus""" return (lin_elastic_region[-1,1]-lin_elastic_region[0,1])/(lin_elastic_region[-1,0]-lin_elastic_region[0,0])
29,144
def generateODTableDf(database: pd.DataFrame, save: bool = True) -> pd.DataFrame: """生成各区间OD表相关的数据集 Args: database (pd.DataFrame): 经初始化的原始数据集 save (bool, optional): 是否另外将其保存为csv文件. Defaults to True. Returns: pd.DataFrame: 各区间OD表相关的数据集 """ table4OD: np.ndarray = fetchTable4OD(database, originStations) df4OD: pd.DataFrame = pd.DataFrame( table4OD, columns=originStations, index=originStations ) if save: df4OD.to_csv(SEPERATOR.join([".", "result", "raw", "OD表.csv"])) return df4OD
29,145
def check_datatype(many: bool): """Checks if data/filter to be inserted is a dictionary""" def wrapper(func): def inner_wrapper(self, _filter={}, _data=None, **kwargs): if _data is None: # statements without two args - find, insert etc if many: # statements that expect a list of dictionaries: insert_many if isinstance(_filter, typing.Sequence): return func(self, _filter, **kwargs) else: raise TypeError("Unexpected Datatype.") if isinstance(_filter, dict): return func(self, _filter, **kwargs) else: raise TypeError("Unexpected Datatype.") else: # update statements if isinstance(_filter, dict) and isinstance(_data, dict): return func(self, _filter, _data, **kwargs) else: raise TypeError("Unexpected Datatype.") return inner_wrapper return wrapper
29,146
def login(): """Login Page""" if request.cookies.get('user_id') and request.cookies.get('username'): session['user_id'] = request.cookies.get('user_id') session['username'] = request.cookies.get('username') update_last_login(session['user_id']) return render_template('main/index.html', username=session['username']) login_form = LoginForm() if login_form.validate_on_submit(): username = request.form['username'] password = (request.form['password']) user_id = check_user_exist(username, password) if user_id: response = login_user(user_id, username) return response else: flash('Username/Password Incorrect!') return render_template('auth/login.html', form=login_form)
29,147
def suite_test(): """ suite_test() Run all the tests in the test suite """ ret = unittest.TextTestRunner(verbosity=2).run(suite()) sys.exit(not ret.wasSuccessful())
29,148
def _get_index_train_test_path(split_num, train = True): """ Method to generate the path containing the training/test split for the given split number (generally from 1 to 20). @param split_num Split number for which the data has to be generated @param train Is true if the data is training data. Else false. @return path Path of the file containing the requried data """ if train: return _DATA_DIRECTORY_PATH + "index_train_" + str(split_num) + ".txt" else: return _DATA_DIRECTORY_PATH + "index_test_" + str(split_num) + ".txt"
29,149
def reboot_nodes(batch_client, config, all_start_task_failed, node_ids): # type: (batch.BatchServiceClient, dict, bool, list) -> None """Reboot nodes in a pool :param batch_client: The batch client to use. :type batch_client: `azure.batch.batch_service_client.BatchServiceClient` :param dict config: configuration dict :param bool all_start_task_failed: reboot all start task failed nodes :param list node_ids: list of node ids to reboot """ pool_id = settings.pool_id(config) nodes_to_reboot = [] if all_start_task_failed: nodes = list( batch_client.compute_node.list( pool_id=pool_id, compute_node_list_options=batchmodels.ComputeNodeListOptions( filter='state eq \'starttaskfailed\'', ), )) for node in nodes: if not util.confirm_action( config, 'reboot node {} from {} pool'.format( node.id, pool_id)): continue nodes_to_reboot.append(node.id) else: if util.is_none_or_empty(node_ids): raise ValueError('node ids to reboot is empty or invalid') for node_id in node_ids: if not util.confirm_action( config, 'reboot node {} from {} pool'.format( node_id, pool_id)): continue nodes_to_reboot.append(node_id) if util.is_none_or_empty(nodes_to_reboot): return with concurrent.futures.ThreadPoolExecutor( max_workers=_max_workers(nodes_to_reboot)) as executor: for node_id in nodes_to_reboot: executor.submit( _reboot_node, batch_client, pool_id, node_id, False)
29,150
def configureDefaultOptions(): """Select default options based on the file format and force field.""" implicitWater = False if session['fileType'] == 'pdb' and session['waterModel'] == 'implicit': implicitWater = True isAmoeba = session['fileType'] == 'pdb' and 'amoeba' in session['forcefield'] isDrude = session['fileType'] == 'pdb' and session['forcefield'].startswith('charmm_polar') session['ensemble'] = 'nvt' if implicitWater else 'npt' session['platform'] = 'CUDA' session['precision'] = 'single' session['cutoff'] = '2.0' if implicitWater else '1.0' session['ewaldTol'] = '0.0005' session['constraintTol'] = '0.000001' session['hmr'] = True session['hmrMass'] = '1.5' if isAmoeba: session['dt'] = '0.002' elif isDrude: session['dt'] = '0.001' else: session['dt'] = '0.004' session['steps'] = '1000000' session['equilibrationSteps'] = '1000' session['temperature'] = '300' session['friction'] = '1.0' session['pressure'] = '1.0' session['barostatInterval'] = '25' session['nonbondedMethod'] = 'CutoffNonPeriodic' if implicitWater else 'PME' session['writeDCD'] = True session['dcdFilename'] = 'trajectory.dcd' session['dcdInterval'] = '10000' session['writeData'] = True session['dataFilename'] = 'log.txt' session['dataInterval'] = '1000' session['dataFields'] = ['step', 'speed' ,'progress', 'potentialEnergy', 'temperature'] session['writeCheckpoint'] = True session['checkpointFilename'] = 'checkpoint.chk' session['checkpointInterval'] = '10000' session['writeSimulationXml'] = False session['systemXmlFilename'] = 'system.xml' session['integratorXmlFilename'] = 'integrator.xml' session['writeFinalState'] = False session['finalStateFileType'] = 'stateXML' session['finalStateFilename'] = "final_state.xml" if isAmoeba: session['constraints'] = 'none' else: session['constraints'] = 'hbonds'
29,151
def my_filter(predicate, lst): """Return a new list of those items x in lst such that predicate(x) is True, in their relative order in lst. Precondition: lst is a list; predicate maps items to booleans. Examples: * my_filter(whatever, []) = [] * If you have def pos(n): return n > 0 then my_filter(pos, [3, 1, -4, 0, 5, 9, -2, 7]) = [3, 1, 5, 9, 7] Equivalently, my_filter(lambda n: n>0, [3, 1, -4, 0, 5, 9, -2, 7]) = [3, 1, 5, 9, 7] Write your own recursion over the list; do not use list comprehension here. """ pass
29,152
def mock_socket() -> MagicMock: """A mock websocket.""" return MagicMock(spec=WebSocket)
29,153
def check_function( func: ast.FunctionDef, ignore_ambiguous_signatures: bool = True ) -> Iterator[Tuple[ast.AST, List[str], List[str]]]: """Check the documented and actual arguments for a function. Parameters ---------- func : ast.FunctionDef The function to check ignore_ambiguous_signatures : bool, optional Whether to ignore extra docstring parameters if the function signature is ambiguous (the default is True). Returns ------- ast.FunctionDef The function Set[str] Parameters in the signature but not in the docstring. Set[str]] Parameters in the docstring but not in the signature. """ signature_args, ambiguous = get_signature_params(func) docced_args = get_doc_params(func) underdocumented, overdocumented = compare_args( signature_args, docced_args, ignore_ambiguous_signatures and ambiguous ) yield func, underdocumented, overdocumented
29,154
def extract_annotations_objtrk(out_path, in_image, project_id, track_prefix, **kwargs): """ out_path: str in_image: BiaflowsCytomineInput project_id: int track_prefix: str kwargs: dict """ image = in_image.object path = os.path.join(out_path, in_image.filename) data, dim_order, _ = imread(path, return_order=True) ndim = get_dimensionality(dim_order) if ndim < 3: raise ValueError("Object tracking should be at least 3D (only {} spatial dimension(s) found)".format(ndim)) tracks = TrackCollection() annotations = AnnotationCollection() if ndim == 3: slices = mask_to_objects_3d(data, time=True, assume_unique_labels=True) time_to_image = get_depth_to_slice(image) for slice_group in slices: curr_tracks, curr_annots = create_tracking_from_slice_group( image, slice_group, slice2point=lambda _slice: _slice.polygon.centroid, depth2slice=time_to_image, id_project=project_id, upload_object=True, upload_group_id=True, track_prefix=track_prefix + "-object" ) tracks.extend(curr_tracks) annotations.extend(curr_annots) elif ndim == 4: objects = mask_to_objects_3dt(mask=data) depths_to_image = get_depth_to_slice(image, depth=("time", "depth")) # TODO add tracking lines one way or another for time_steps in objects: label = time_steps[0][0].label track = Track(name="{}-{}".format(track_prefix, label), id_image=image.id, color=DEFAULT_COLOR).save() Property(track, key="label", value=label).save() annotations.extend([ Annotation( location=change_referential(p=slice.polygon, height=image.height).wkt, id_image=image.id, id_project=project_id, id_tracks=[track.id], slice=depths_to_image[(slice.time, slice.depth)].id ) for slices in time_steps for slice in slices ]) tracks.append(track) else: raise ValueError("Annotation extraction for object tracking does not support masks with more than 4 dims...") return tracks, annotations
29,155
def gen_device(dtype, ip, mac, desc, cloud): """Convenience function that generates devices based on they type.""" devices = { # sp1: [0], sp2: [ 0x2711, # SP2 0x2719, 0x7919, 0x271A, 0x791A, # Honeywell SP2 0x2720, # SPMini 0x753E, # SP3 0x7D00, # OEM branded SP3 0x947A, 0x9479, # SP3S 0x2728, # SPMini2 0x2733, 0x273E, # OEM branded SPMini 0x7530, 0x7546, 0x7918, # OEM branded SPMini2 0x7D0D, # TMall OEM SPMini3 0x2736, # SPMiniPlus ], rm: [ 0x2712, # RM2 0x2737, # RM Mini 0x273D, # RM Pro Phicomm 0x2783, # RM2 Home Plus 0x277C, # RM2 Home Plus GDT 0x278F, # RM Mini Shate 0x27C2, # RM Mini 3 0x27D1, # new RM Mini3 0x27DE, # RM Mini 3 (C) ], rm4: [ 0x51DA, # RM4 Mini 0x5F36, # RM Mini 3 0x6070, # RM4c Mini 0x610E, # RM4 Mini 0x610F, # RM4c 0x62BC, # RM4 Mini 0x62BE, # RM4c 0x6364, # RM4S 0x648D, # RM4 mini 0x6539, # RM4c Mini 0x653A, # RM4 mini ], rmp: [ 0x272A, # RM2 Pro Plus 0x2787, # RM2 Pro Plus2 0x279D, # RM2 Pro Plus3 0x27A9, # RM2 Pro Plus_300 0x278B, # RM2 Pro Plus BL 0x2797, # RM2 Pro Plus HYC 0x27A1, # RM2 Pro Plus R1 0x27A6, # RM2 Pro PP ], rm4p: [ 0x6026, # RM4 Pro 0x61A2, # RM4 pro 0x649B, # RM4 pro 0x653C, # RM4 pro ], a1: [0x2714], # A1 mp1: [ 0x4EB5, # MP1 0x4EF7, # Honyar oem mp1 0x4F1B, # MP1-1K3S2U 0x4F65, # MP1-1K3S2U ], # hysen: [0x4EAD], # Hysen controller # S1C: [0x2722], # S1 (SmartOne Alarm Kit) # dooya: [0x4E4D] # Dooya DT360E (DOOYA_CURTAIN_V2) } # Look for the class associated to devtype in devices [device_class] = [dev for dev in devices if dtype in devices[dev]] or [None] if device_class is None: print("Unknow device type 0x%x" % dtype) return BroadlinkDevice(dtype, name=desc, cloud=cloud) return device_class(ip=ip, mac=mac, devtype=dtype, name=desc, cloud=cloud)
29,156
def get_dbfs_file_output(limit_file_size: Optional[pulumi.Input[bool]] = None, path: Optional[pulumi.Input[str]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetDbfsFileResult]: """ ## Example Usage ```python import pulumi import pulumi_databricks as databricks report = databricks.get_dbfs_file(limit_file_size=10240, path="dbfs:/reports/some.csv") ``` ## Related Resources The following resources are used in the same context: * End to end workspace management guide * get_dbfs_file_paths data to get list of file names from get file content from [Databricks File System (DBFS)](https://docs.databricks.com/data/databricks-file-system.html). * DbfsFile to manage relatively small files on [Databricks File System (DBFS)](https://docs.databricks.com/data/databricks-file-system.html). * Mount to [mount your cloud storage](https://docs.databricks.com/data/databricks-file-system.html#mount-object-storage-to-dbfs) on `dbfs:/mnt/name`. :param bool limit_file_size: Do lot load content for files smaller than this in bytes :param str path: Path on DBFS for the file to get content of """ ...
29,157
def berks(berks_bin, path, action='update'): """ Execute various berks commands :rtype : tuple :param berks_bin: path to berks bin :param path: path to change directory to before running berks commands (berks is a dir context aware tool) :param action: berks action to run, e.g. berks install :return: tpl. output, errors, returncode """ cmd = 'cd {0} && {1} {2}'.format(path, berks_bin, action) p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, shell=True) output, errors = p.communicate() return output, errors, p.returncode
29,158
def container_clone(request, pk): """ Make a clone of the container. Todo: show params on OPTIONS call. Todo: permissions :param pk pk of the container that needs to be cloned :param name :param description """ params = {} data = request.data if not data.get('name'): return Response({"error": "please provide name for the clone: {\"name\" : \"some name \"}"}) params['name'] = data.get('name') if data.get('description'): params['description'] = data.get('description') origin = get_container(pk) # validate permissions validate_object_permission(ContainerDetailPermission, request, origin) if origin: clone = origin.clone(**params) clone.save() serializer = ContainerSerializer(clone) return Response(serializer.data, status=status.HTTP_201_CREATED) else: return Response({"error": "Container not found!", "data": data})
29,159
async def test_form_user_discovery_manual_and_auto_password_fetch(hass): """Test discovery skipped and we can auto fetch the password.""" await setup.async_setup_component(hass, "persistent_notification", {}) mocked_roomba = _create_mocked_roomba( roomba_connected=True, master_state={"state": {"reported": {"name": "myroomba"}}}, ) with patch( "homeassistant.components.roomba.config_flow.RoombaDiscovery", _mocked_discovery ): result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_USER} ) await hass.async_block_till_done() assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["errors"] is None assert result["step_id"] == "user" result2 = await hass.config_entries.flow.async_configure( result["flow_id"], {CONF_HOST: None}, ) await hass.async_block_till_done() assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM assert result2["errors"] is None assert result2["step_id"] == "manual" result3 = await hass.config_entries.flow.async_configure( result2["flow_id"], {CONF_HOST: MOCK_IP, CONF_BLID: "blid"}, ) await hass.async_block_till_done() assert result3["type"] == data_entry_flow.RESULT_TYPE_FORM assert result3["errors"] is None with patch( "homeassistant.components.roomba.config_flow.RoombaFactory.create_roomba", return_value=mocked_roomba, ), patch( "homeassistant.components.roomba.config_flow.RoombaPassword", _mocked_getpassword, ), patch( "homeassistant.components.roomba.async_setup_entry", return_value=True, ) as mock_setup_entry: result4 = await hass.config_entries.flow.async_configure( result3["flow_id"], {}, ) await hass.async_block_till_done() assert result4["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result4["title"] == "myroomba" assert result4["result"].unique_id == "BLID" assert result4["data"] == { CONF_BLID: "BLID", CONF_CONTINUOUS: True, CONF_DELAY: 1, CONF_HOST: MOCK_IP, CONF_PASSWORD: "password", } assert len(mock_setup_entry.mock_calls) == 1
29,160
def parse_multi_id_graph(graph, ids): """ Parse a graph with 1 to 3 ids and return individual graphs with their own braced IDs. """ new_graphs = '' LEVEL_STATE.next_token = ids[0] pid1 = LEVEL_STATE.next_id() split1 = graph.partition('({})'.format(ids[1])) text1 = combine_bolds(split1[0]) pid2_marker = split1[1] remainder = bold_first_italics(split1[2]) new_graphs += "\n{" + pid1 + "}\n" new_graphs += text1 + '\n' LEVEL_STATE.next_token = ids[1] pid2 = LEVEL_STATE.next_id() new_graphs += "\n{" + pid2 + "}\n" if len(ids) == 2: text2 = combine_bolds(" ".join([pid2_marker, remainder])) new_graphs += text2 + '\n' return new_graphs else: split2 = remainder.partition('({})'.format(ids[2])) pid3_marker = split2[1] remainder2 = bold_first_italics(split2[2]) text2 = combine_bolds(" ".join([pid2_marker, split2[0]])) new_graphs += text2 + '\n' LEVEL_STATE.next_token = ids[2] pid3 = LEVEL_STATE.next_id() new_graphs += "\n{" + pid3 + "}\n" text3 = combine_bolds(" ".join([pid3_marker, remainder2])) new_graphs += text3 + '\n' return new_graphs
29,161
def filter_multimappers(align_file, data): """ It does not seem like bowtie2 has a corollary to the -m 1 flag in bowtie, there are some options that are close but don't do the same thing. Bowtie2 sets the XS flag for reads mapping in more than one place, so we can just filter on that. This will not work for other aligners. """ config = dd.get_config(data) type_flag = "" if bam.is_bam(align_file) else "S" base, ext = os.path.splitext(align_file) out_file = base + ".unique" + ext bed_file = dd.get_variant_regions(data) bed_cmd = '-L {0}'.format(bed_file) if bed_file else " " if utils.file_exists(out_file): return out_file base_filter = '-F "[XS] == null and not unmapped {paired_filter} and not duplicate" ' if bam.is_paired(align_file): paired_filter = "and paired and proper_pair" else: paired_filter = "" filter_string = base_filter.format(paired_filter=paired_filter) sambamba = config_utils.get_program("sambamba", config) num_cores = dd.get_num_cores(data) with file_transaction(out_file) as tx_out_file: cmd = ('{sambamba} view -h{type_flag} ' '--nthreads {num_cores} ' '-f bam {bed_cmd} ' '{filter_string} ' '{align_file} ' '> {tx_out_file}') message = "Removing multimapped reads from %s." % align_file do.run(cmd.format(**locals()), message) bam.index(out_file, config) return out_file
29,162
def try_run(obj, names): """Given a list of possible method names, try to run them with the provided object. Keep going until something works. Used to run setup/teardown methods for module, package, and function tests. """ for name in names: func = getattr(obj, name, None) if func is not None: if type(obj) == types.ModuleType: # py.test compatibility try: args, varargs, varkw, defaults = inspect.getargspec(func) except TypeError: # Not a function. If it's callable, call it anyway if hasattr(func, '__call__'): func = func.__call__ try: args, varargs, varkw, defaults = \ inspect.getargspec(func) args.pop(0) # pop the self off except TypeError: raise TypeError("Attribute %s of %r is not a python " "function. Only functions or callables" " may be used as fixtures." % (name, obj)) if len(args): log.debug("call fixture %s.%s(%s)", obj, name, obj) return func(obj) log.debug("call fixture %s.%s", obj, name) return func()
29,163
def download_file(url, path=None, clobber=False): """ thanks to: https://stackoverflow.com/questions/16694907/how-to-download-large-file-in-python-with-requests-py path : str local path to download to. """ if path is None: local_filename = os.path.join(directory, url.split("/")[-1]) else: local_filename = path if os.path.exists(local_filename) and not clobber: getLogger().info("{} exists; not downloading.".format(local_filename)) return local_filename # NOTE the stream=True parameter r = requests.get(url, stream=True) with open(local_filename, "wb") as f: for chunk in r.iter_content(chunk_size=1024): if chunk: # filter out keep-alive new chunks f.write(chunk) # f.flush() commented by recommendation from J.F.Sebastian return local_filename
29,164
def write_word_concept_transducer_same_prob(data): """ Write the word to concept transducer as a .txt file of transitions, to be later compiled. This function will make it so that <unk> words will have an equal probability of being mapped to all the different concepts. :param data: Data class containing infos on the corpus. """ wt_file = open('word_concept.txt', 'w') # word-tag costs based on counts for pair in data.counter_word_concept: word, concept = pair.split() cost = -math.log(data.counter_word_concept[pair] / data.counter_concepts[concept]) wt_file.write("0 0 %s %s %f\n" % (word, concept, cost)) # <UNK>-tag maps with uniform cost with every concept for concept in data.lexicon_concepts: wt_file.write("0 0 <unk> %s %f\n" % (concept, 1. / len(data.lexicon_concepts))) wt_file.write("0\n") wt_file.close()
29,165
def param_to_secopt(param): """Convert a parameter name to INI section and option. Split on the first dot. If not dot exists, return name as option, and None for section.""" sep = '.' sep_loc = param.find(sep) if sep_loc == -1: # no dot in name, skip it section = None option = param else: section = param[0:sep_loc] option = param[sep_loc+1:] return (section, option)
29,166
async def on_reaction_remove(reaction, user): """ This is called when a message has a reaction removed from it. The message is stored in ``reaction.message``. For older messages, it's possible that this event might not get triggered. Args: reaction: A Reaction object of the current state of the reaction. user: An User or Member object of the user who removed the reaction. """ print(user, "removed", reaction, "from", reaction.message)
29,167
def load_contracts( web3: web3.Web3, contracts_file: str, contracts_names: List[str] ) -> Dict[str, web3.contract.Contract]: """ Given a list of contract names, returns a dict of contract names and contracts. """ res = {} with open(contracts_file) as infile: source_json = json.load(infile) for contract_name in contracts_names: try: res[contract_name] = web3.eth.contract( address=source_json[contract_name]["address"], abi=source_json[contract_name]["abi"] ) except (KeyError, InvalidAddress) as ex: raise ex return res
29,168
async def test_query_no_db(hass: HomeAssistant) -> None: """Test the SQL sensor.""" config = { "sensor": { "platform": "sql", "queries": [ { "name": "count_tables", "query": "SELECT 5 as value", "column": "value", } ], } } assert await async_setup_component(hass, "sensor", config) await hass.async_block_till_done() state = hass.states.get("sensor.count_tables") assert state.state == "5"
29,169
def main(): """Called after using python mosaic.py""" parser = argparse.ArgumentParser(description="Generate a mosaic from spotify playlist.") parser.add_argument("spotifyid", help="Spotify API Client ID") parser.add_argument("spotifysecret", help="Spotify API Client Secret") parser.add_argument("playlist", help="Spotify playlist URI") parser.add_argument("-t", "--tiles", help="number of artworks per mosaic row", type=int, default=2) parser.add_argument("-o", "--out", help="output file", default="mosaic.jpg") parser.add_argument("-s", "--shuffle", help="randomize the order of artworks", action="store_true") parser.add_argument("-r", "--resolution", help="select the resolution of one artwork", type=int, default=640, choices=[64, 300, 640]) parser.add_argument("-l", "--log", help="enable logging", action="store_true") args = parser.parse_args() if args.log: logging.basicConfig(level=logging.INFO, format="%(levelname)s %(asctime)s: %(message)s", datefmt="%H:%M:%S") else: logging.basicConfig(level=logging.CRITICAL, format="%(levelname)s %(asctime)s: %(message)s", datefmt="%H:%M:%S") logging.info("Creating class instance.") mosaic = SpotifyMosaic({ "id": args.spotifyid, "secret": args.spotifysecret }) logging.info("Running main function.") mosaic.create(args.playlist, size=args.tiles, output=args.out, shuffle=args.shuffle, resolution=args.resolution)
29,170
def removecandidate(_id=''): """ Remove a candidate from the candidate list Use with the lexcion's identifiers /removecandidate?identifier=katt..nn.1 """ lexicon = request.args.get('lexicon', C.config['default']) lexconf = lexconfig.get_lexiconconf(lexicon) try: identifier = request.args.get('identifier', '') # ask karp for the identifier q = 'extended||and|%s.search|equals|%s' % ('identifier', identifier) res = helpers.karp_query('query', query={'q': q}, mode=lexconf['candidateMode'], resource=lexconf['candidatelexiconName']) _id = helpers.es_first_id(res) except Exception as e1: logging.error(e1) raise e.MflException("Could not find candidate %s" % identifier, code="unknown_candidate") # delete it ans = helpers.karp_delete(_id, lexconf['candidatelexiconName']) return jsonify({"deleted": ans})
29,171
def outlier_dates_correction(series, coef=2.0): """Corrects the dates that are outliers. It receives all the dates in which samples were collected, for example for a patient and tries to (i) identify outliers and (ii) correct them with the best possible date. .. note: Using mean/std for outliers... .. note: Should I use days which is more interpretable? .. warning: Remember to include always the raw value just in case that was the best! Should I check only values that are outside range? Parameters ---------- series: series with datetime64[ns] coeff: Returns ------- datetime64[ns] series with corrected dates. """ # Check datetime series or str series (errors='raise) # Copy series too! # Find outliers outliers = np.abs(series - series.mean()) > coef * series.std() """ print(outliers) print(np.abs(series - series.mean())) print(coef * series.std()) print(series.quantile([0.05, 0.95])) from scipy.spatial.distance import pdist, cdist from itertools import product #e = np.abs(series - series.mean()) e = (series - series.mean()).abs().dt.days p = np.array(list(product(e, e))) #p = np.array([series, series]) print(p) a = pd.DataFrame(p) a = a.apply(lambda x: np.abs(x[0]-x[1]), axis=1) print(a) print(cdist(p)) #e = series.astype(int) #print(e) # / np.timedelta64(-1, 'D') print(e) import sys sys.exit() a = list(product(e, e)) #print(a) print(pdist(np.array(a))) #print(cdist(series.values, series.values)) import sys sys.exit() """ """ if len(series) < 3: return series """ """ print("\n\n\nFinding outliers...") print("Consecutive distances:") print(ddiff) print("\nThe mean") print(mean) print("\nThe difference") print(dff) print("\nOutliers") print(outliers) """ if len(series) < 3: return series ddiff = series.diff().dt.days.abs() mean = series[ddiff <= 3].mean() dff = (series - mean).abs() outliers = dff.dt.days > 10 # Do corrections if outliers.any(): # Compute min and max mn, mx, mean = series[~outliers].min(), \ series[~outliers].max(), \ series[~outliers].mean() # Compute various corrections r = series[outliers] \ .transform([lambda x: x, swap_day_month, one_year_more, one_year_less]) # Find the closest days = (r - mean).abs() idx = (r - mean).abs().idxmin(axis=1) #print(series) #print(r[idx].squeeze()) # When two outliers it breaks! # Replace series[outliers] = r[idx].squeeze() #print("U") # Return return series # Return return series
29,172
def svn_log_entry_dup(*args): """svn_log_entry_dup(svn_log_entry_t log_entry, apr_pool_t pool) -> svn_log_entry_t""" return _core.svn_log_entry_dup(*args)
29,173
def input_stream() -> IO: """Input stream fixture.""" return StringIO( """mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X mem[8] = 11 mem[7] = 101 mem[8] = 0""" )
29,174
def get_lr_scheduler(optimizer: Optimizer, cfg: CfgNode, start_epoch: int = 0): """Returns LR scheduler module""" # Get mode if cfg.TRAIN.LOSS.TYPE in ["categorical_crossentropy", "focal_loss"]: mode = "min" else: raise NotImplementedError if cfg.TRAIN.SCHEDULER.TYPE == "ReduceLROnPlateau": scheduler = ReduceLROnPlateau( optimizer, mode, factor=cfg.TRAIN.SCHEDULER.FACTOR, patience=cfg.TRAIN.SCHEDULER.PATIENCE, verbose=True, ) elif cfg.TRAIN.SCHEDULER.TYPE == "StepLR": scheduler = StepLR( optimizer, step_size=cfg.TRAIN.SCHEDULER.PATIENCE, gamma=cfg.TRAIN.SCHEDULER.FACTOR, last_epoch=start_epoch - 1, ) elif cfg.TRAIN.SCHEDULER.TYPE == "None": scheduler = None else: raise NotImplementedError logger.info(f"Used scheduler: {scheduler}") return scheduler
29,175
async def create_and_open_pool(pool_name, pool_genesis_txn_file): """ Creates a new local pool ledger configuration. Then open that pool and return the pool handle that can be used later to connect pool nodes. :param pool_name: Name of the pool ledger configuration. :param pool_genesis_txn_file: Pool configuration json. if NULL, then default config will be used. :return: The pool handle was created. """ utils.print_header("\nCreate Ledger\n") await create_pool_ledger_config(pool_name, pool_genesis_txn_file) utils.print_header("\nOpen pool ledger\n") pool_handle = await pool.open_pool_ledger(pool_name, None) return pool_handle
29,176
def rotate_char(c, n): """Rotate a single character n places in the alphabet n is an integer """ # alpha_number and new_alpha_number will represent the # place in the alphabet (as distinct from the ASCII code) # So alpha_number('a')==0 # alpha_base is the ASCII code for the first letter of the # alphabet (different for upper and lower case) if c.islower(): alpha_base = ord('a') elif c.isupper(): alpha_base = ord('A') else: # Don't rotate character if it's not a letter return c # Position in alphabet, starting with a=0 alpha_number = ord(c) - alpha_base # New position in alphabet after shifting # The % 26 at the end is for modulo 26, so if we shift it # past z (or a to the left) it'll wrap around new_alpha_number = (alpha_number + n) % 26 # Add the new position in the alphabet to the base ASCII code for # 'a' or 'A' to get the new ASCII code, and use chr() to convert # that code back to a letter return chr(alpha_base + new_alpha_number)
29,177
def subset_language(vocabulary, vectors, wordlist, N=32768): """ Subset the vocabulary/vectors to those in a wordlist. The wordlist is a list arranged in order of 'preference'. Note: we hope the vocabulary is contained in the wordlist, but it might not be. N is the number of words we require. If the wordlist contains fewer than N words, (but the vocabulary has >= N), we supplement the result from the vocabulary randomly. Also, we want to make sure the order of vocabulary is random (because some structure could negatively influence the optimisation procedure later). """ keep_indices = [] # indices of vocabulary/vectors to keep added = 0 if type(wordlist) == str: # load from path print 'Loading wordlist from', wordlist wordlist = np.loadtxt(wordlist, dtype=str) else: assert type(wordlist) == list or type(wordlist) == np.ndarray print 'Subsetting vocabulary.' for word in wordlist: print word if added == N: break try: word_index = vocabulary.index(word) keep_indices.append(word_index) added += 1 except ValueError: continue print 'Acquired', len(keep_indices), 'words.' miss = N - len(keep_indices) if miss > 0: print 'Supplementing with', miss, 'random words.' for i in xrange(miss): random_index = np.random.choice(len(vocabulary), 1) while random_index in keep_indices: random_index = np.random.choice(len(vocabulary), 1) keep_indices.append(random_index) print 'Shuffling.' # shuffle np.random.shuffle(keep_indices) # populate new arrays print 'Populating subsetted arrays.' vectors_subset = np.array([vectors[i] for i in keep_indices]) vocabulary_subset = [vocabulary[i] for i in keep_indices] return vocabulary_subset, vectors_subset
29,178
def compile_subject(*, subject_id, date_of_birth, sex): """Compiles the NWB Subject object.""" return Subject(subject_id=subject_id, date_of_birth=date_of_birth, sex=sex)
29,179
def plot_from_data(data_list, exp2dataset=None, exp_to_plot=None, figsize=None, xaxis='TotalEnvInteracts', value_list=['Performance',], color_list=None, linestyle_list=None, label_list=None, count=False, font_scale=1.5, smooth=1, estimator='mean', no_legend=False, legend_loc='best', title=None, save_name=None, save_path = None, xlimit=-1, y_limit=None, label_font_size=24, xlabel=None, ylabel=None, y_log_scale=False): """ either give a data list or give a exp2dataset dictionary, and then specify the exp_to_plot will plot each experiment setting in data_list to the same figure. the label will be basically 'Condition1' column by default causal_plot will not care about order and will be messy and each time the order can be different. for value list if it contains only one value then we will use that for all experiments """ if data_list is not None: data_list = data_list else: data_list = select_data_list_for_plot(exp2dataset, exp_to_plot) n_curves = len(data_list) if len(value_list) == 1: value_list = [value_list[0] for _ in range(n_curves)] default_colors = ['tab:blue','tab:orange','tab:green','tab:red', 'tab:purple','tab:brown','tab:pink','tab:grey','tab:olive','tab:cyan',] color_list = default_colors if color_list is None else color_list condition = 'Condition2' if count else 'Condition1' estimator = getattr(np, estimator) # choose what to show on main curve: mean? max? min? plt.figure(figsize=figsize) if figsize else plt.figure() ########################## value_list_smooth_temp = [] """ smooth data with moving window average. that is, smoothed_y[t] = average(y[t-k], y[t-k+1], ..., y[t+k-1], y[t+k]) where the "smooth" param is width of that window (2k+1) IF WE MODIFY DATA DIRECTLY, THEN CAN LEAD TO PLOTTING BUG WHERE IT'S MODIFIED MULTIPLE TIMES """ y = np.ones(smooth) for i, data_seeds in enumerate(data_list): temp_value_name = value_list[i] + '__smooth_temp' value_list_smooth_temp.append(temp_value_name) for data in data_seeds: x = np.asarray(data[value_list[i]].copy()) z = np.ones(len(x)) smoothed_x = np.convolve(x, y, 'same') / np.convolve(z, y, 'same') # data[value_list[i]] = smoothed_x # this can be problematic if temp_value_name not in data: data.insert(len(data.columns), temp_value_name, smoothed_x) else: data[temp_value_name] = smoothed_x sns.set(style="darkgrid", font_scale=font_scale) # sns.set_palette('bright') # have the same axis (figure), plot one by one onto it ax = None if version.parse(sns.__version__) <= version.parse('0.8.1'): for i, data_seeds in enumerate(data_list): data_combined = pd.concat(data_seeds, ignore_index=True) ax = sns.tsplot(data=data_combined, time=xaxis, value=value_list_smooth_temp[i], unit="Unit", condition=condition, legend=(not no_legend), ci='sd', n_boot=0, color=color_list[i], ax=ax) else: print("Error: Seaborn version > 0.8.1 is currently not supported.") quit() if linestyle_list is not None: for i in range(len(linestyle_list)): ax.lines[i].set_linestyle(linestyle_list[i]) if label_list is not None: for i in range(len(label_list)): ax.lines[i].set_label(label_list[i]) xlabel = 'environment interactions' if xlabel is None else xlabel if ylabel is None: ylabel = 'average test return' elif ylabel == 'auto': if value_list[0] in y2ylabel_dict: ylabel = y2ylabel_dict[value_list[0]] else: ylabel = value_list[0] else: ylabel = ylabel plt.xlabel(xlabel, fontsize=label_font_size) plt.ylabel(ylabel, fontsize=label_font_size) if not no_legend: plt.legend(loc=legend_loc, fontsize=label_font_size) """ For the version of the legend used in the Spinning Up benchmarking page, plt.legend(loc='upper center', ncol=6, handlelength=1, mode="expand", borderaxespad=0., prop={'size': 13}) """ xscale = np.max(np.asarray(data[xaxis])) > 5e3 if xscale: # Just some formatting niceness: x-axis scale in scientific notation if max x is large plt.ticklabel_format(style='sci', axis='x', scilimits=(0, 0)) if y_log_scale: plt.yscale('log') if xlimit > 0: plt.xlim(0, xlimit) if y_limit: plt.ylim(y_limit[0], y_limit[1]) if title: plt.title(title) plt.tight_layout() if save_name is not None: fig = plt.gcf() if not os.path.isdir(save_path): os.mkdir(save_path) fig.savefig(os.path.join(save_path, save_name)) plt.close(fig) else: plt.show()
29,180
def circulation(**kwargs: Any) -> str: """Url to get :class:`~pymultimatic.model.component.Circulation` details.""" return _CIRCULATION.format(**kwargs)
29,181
def split_parentheses(info): """ make all strings inside parentheses a list :param s: a list of strings (called info) :return: info list without parentheses """ # if we see the "(" sign, then we start adding stuff to a temp list # in case of ")" sign, we append the temp list to the new_info list # otherwise, just add the string to the new_info list new_info = [] make_list = False current_list = [] for idx in range(len(info)): if info[idx] == "(": make_list = True elif info[idx] == ")": make_list = False new_info.append(current_list) current_list = [] else: if make_list: current_list.append(info[idx]) else: new_info.append(info[idx]) return new_info
29,182
def event( name: Optional[str] = None, *, handler: bool = False ) -> Callable[[EventCallable], EventCallable]: """Create a new event using the signature of a decorated function. Events must be defined before handlers can be registered using before_event, on_event, after_event, or event_handler. :param handler: When True, the decorated function implementation is registered as an on event handler. """ def decorator(fn: EventCallable) -> EventCallable: event_name = name if name else fn.__name__ module = inspect.currentframe().f_back.f_locals.get("__module__", None) if handler: # If the method body is a handler, pass the signature directly into `create_event` # as we are going to pass the method body into `on_event` signature = inspect.Signature.from_callable(fn) create_event(event_name, signature, module=module) else: create_event(event_name, fn, module=module) if handler: decorator = on_event(event_name) return decorator(fn) else: return fn return decorator
29,183
def getcallargs(func, *positional, **named): """Get the mapping of arguments to values. A dict is returned, with keys the function argument names (including the names of the * and ** arguments, if any), and values the respective bound values from 'positional' and 'named'.""" args, varargs, varkw, defaults = getargspec(func) f_name = func.__name__ arg2value = {} # The following closures are basically because of tuple parameter unpacking. assigned_tuple_params = [] def assign(arg, value): if isinstance(arg, str): arg2value[arg] = value else: assigned_tuple_params.append(arg) value = iter(value) for i, subarg in enumerate(arg): try: subvalue = next(value) except StopIteration: raise ValueError('need more than %d %s to unpack' % (i, 'values' if i > 1 else 'value')) assign(subarg,subvalue) try: next(value) except StopIteration: pass else: raise ValueError('too many values to unpack') def is_assigned(arg): if isinstance(arg,str): return arg in arg2value return arg in assigned_tuple_params if ismethod(func) and func.im_self is not None: # implicit 'self' (or 'cls' for classmethods) argument positional = (func.im_self,) + positional num_pos = len(positional) num_total = num_pos + len(named) num_args = len(args) num_defaults = len(defaults) if defaults else 0 for arg, value in zip(args, positional): assign(arg, value) if varargs: if num_pos > num_args: assign(varargs, positional[-(num_pos-num_args):]) else: assign(varargs, ()) elif 0 < num_args < num_pos: raise TypeError('%s() takes %s %d %s (%d given)' % ( f_name, 'at most' if defaults else 'exactly', num_args, 'arguments' if num_args > 1 else 'argument', num_total)) elif num_args == 0 and num_total: if varkw: if num_pos: # XXX: We should use num_pos, but Python also uses num_total: raise TypeError('%s() takes exactly 0 arguments ' '(%d given)' % (f_name, num_total)) else: raise TypeError('%s() takes no arguments (%d given)' % (f_name, num_total)) for arg in args: if isinstance(arg, str) and arg in named: if is_assigned(arg): raise TypeError("%s() got multiple values for keyword " "argument '%s'" % (f_name, arg)) else: assign(arg, named.pop(arg)) if defaults: # fill in any missing values with the defaults for arg, value in zip(args[-num_defaults:], defaults): if not is_assigned(arg): assign(arg, value) if varkw: assign(varkw, named) elif named: unexpected = next(iter(named)) if isinstance(unexpected, unicode): unexpected = unexpected.encode(sys.getdefaultencoding(), 'replace') raise TypeError("%s() got an unexpected keyword argument '%s'" % (f_name, unexpected)) unassigned = num_args - len([arg for arg in args if is_assigned(arg)]) if unassigned: num_required = num_args - num_defaults raise TypeError('%s() takes %s %d %s (%d given)' % ( f_name, 'at least' if defaults else 'exactly', num_required, 'arguments' if num_required > 1 else 'argument', num_total)) return arg2value
29,184
def add_target_to_anchors(string_to_fix, target="_blank"): """Given arbitrary string, find <a> tags and add target attributes""" pattern = re.compile("<a(?P<attributes>.*?)>") def repl_func(matchobj): pattern = re.compile("target=['\"].+?['\"]") attributes = matchobj.group("attributes") if pattern.search(attributes): return "<a%s>" % re.sub(pattern, "target='%s'" % target, attributes) else: return "<a%s target='%s'>" % (attributes, target) return re.sub(pattern, repl_func, string_to_fix)
29,185
def find_test(test_name): """ Find test will walk the test repo for a specific key value. """ pass
29,186
def set_owner(): """Handles 'mrlist setowner'.""" mlist = List(client, args.list) new_owner = resolve_member(args.owner, False) if not new_owner: raise UserError('Unable to determine owner type') mlist.setOwner(new_owner) print "Successfully changed owner of list %s to %s" % (common.emph_text(args.list), common.emph_text(str(new_owner)))
29,187
def startup(): """ Entry point when not imported but executed """ args = get_argparser().parse_args() main(**vars(args))
29,188
def run(dataset, runs, seasons, steering, relative=None, reference_span=30, reference_period=None, plottype='png', writecsv=False): """DUMMY DOCSTRING""" if reference_period is None: reference_period = default_config['data']['cmip']['control_period'] if relative is None: relative = [] for epoch, steering_group in steering.groupby('epoch'): period = epoch - reference_span // 2 + 1, epoch + reference_span // 2 for var, vargroup in dataset.groupby('var'): rel = var in relative for season in seasons: logger.info("Calculating CMIP percentiles for %s, season %s, epoch %s", var, season, epoch) perc_distr, perc = calc(vargroup.copy(), season, period, relative=rel, reference_period=reference_period) if writecsv: filename = f"{var}_{epoch}_{season}_perc_distr.csv" perc_distr.to_csv(filename, index=True) filename = f"{var}_{epoch}_{season}_perc.csv" perc.to_csv(filename, index=True) scenarios = {} for _, row in steering_group.iterrows(): data = runs.loc[runs['var'] == var, :].copy() name = row['name'].rstrip('0123456789') # remove the year part period = [int(year) for year in row['period'].strip('()').split(',')] logger.info("Calculating runs percentiles for %s, season %s, epoch %s, " "scenario %s", var, season, epoch, name) _, scenarios[name] = calc(data, season, period, relative=rel, reference_period=reference_period) if writecsv: filename = f"{var}_{epoch}_{season}_{name}_perc.csv" scenarios[name].to_csv(filename, index=False) labels = { 'title': '', 'text': f"{VARNAME[var]}, {season.upper()}", 'y': YTITLE[var], 'x': '', 'epoch': epoch, } columns = ['mean', '5', '10', '50', '90', '95'] xlabels = ['ave', 'P05', 'P10', 'P50', 'P90', 'P95'] logger.info("Creating plot for variable %s, season %s, epoch %s") plot(perc_distr, labels, limits=None, columns=columns, xlabels=xlabels, scenarios=scenarios) plt.tight_layout() filename = f"{var}_{epoch}_{season}.{plottype.lower()}" plt.savefig(filename, bbox_inches='tight')
29,189
def calcInvariants(S, R, gradT, with_tensor_basis=False, reduced=True): """ This function calculates the invariant basis at one point. Arguments: S -- symmetric part of local velocity gradient (numpy array shape (3,3)) R -- anti-symmetric part of local velocity gradient (numpy array shape (3,3)) gradT -- array with local temperature gradient (numpy array shape (3,)) with_tensor_basis -- optional, a flag that determines whether to also calculate tensor basis. By default, it is false (so only invariants are returned) reduced -- optional argument, a boolean flag that determines whether the features that depend on a vector (lambda 7 thru lambda 13) should be calculated. If reduced==True, extra features are NOT calculated. Default value is True. Returns: invariants -- array of shape (n_features-2,) that contains the invariant basis from the gradient tensors that are used by the ML model to make a prediction at the current point. tensor_basis -- array of shape (n_basis,3,3) that contains the form invariant tensor basis that are used by the TBNN to construct the tensorial diffusivity at the current point. # Taken from the paper of Zheng, 1994, "Theory of representations for tensor functions - A unified invariant approach to constitutive equations" """ # For speed, pre-calculate these S2 = np.linalg.multi_dot([S, S]) R2 = np.linalg.multi_dot([R, R]) S_R2 = np.linalg.multi_dot([S, R2]) ### Fill basis 0-12 if reduced: num_features = constants.NUM_FEATURES_F2-2 else: num_features = constants.NUM_FEATURES_F1-2 invariants = np.empty(num_features) # Velocity gradient only (0-5) invariants[0] = np.trace(S2) invariants[1] = np.trace(np.linalg.multi_dot([S2, S])) invariants[2] = np.trace(R2) invariants[3] = np.trace(S_R2) invariants[4] = np.trace(np.linalg.multi_dot([S2, R2])) invariants[5] = np.trace(np.linalg.multi_dot([S2, R2, S, R])) # Velocity + temperature gradients (6-12) if not reduced: invariants[6] = np.linalg.multi_dot([gradT, gradT]) invariants[7] = np.linalg.multi_dot([gradT, S, gradT]) invariants[8] = np.linalg.multi_dot([gradT, S2, gradT]) invariants[9] = np.linalg.multi_dot([gradT, R2, gradT]) invariants[10] = np.linalg.multi_dot([gradT, S, R, gradT]) invariants[11] = np.linalg.multi_dot([gradT, S2, R, gradT]) invariants[12] = np.linalg.multi_dot([gradT, R, S_R2, gradT]) # Also calculate the tensor basis if with_tensor_basis: tensor_basis = np.empty((constants.N_BASIS,3,3)) tensor_basis[0,:,:] = np.eye(3) tensor_basis[1,:,:] = S tensor_basis[2,:,:] = R tensor_basis[3,:,:] = S2 tensor_basis[4,:,:] = R2 tensor_basis[5,:,:] = np.linalg.multi_dot([S, R]) + np.linalg.multi_dot([R, S]) return invariants, tensor_basis return invariants
29,190
def with_uproot(histo_path: str) -> bh.Histogram: """Reads a histogram with uproot and returns it. Args: histo_path (str): path to histogram, use a colon to distinguish between path to file and path to histogram within file (example: ``file.root:h1``) Returns: bh.Histogram: histogram containing data """ hist = uproot.open(histo_path).to_boost() return hist
29,191
def slide5x5(xss): """Slide five artists at a time.""" return slidingwindow(5, 5, xss)
29,192
def compute_consensus_rule( profile, committeesize, algorithm="fastest", resolute=True, max_num_of_committees=MAX_NUM_OF_COMMITTEES_DEFAULT, ): """ Compute winning committees with the Consensus rule. Based on Perpetual Consensus from Martin Lackner Perpetual Voting: Fairness in Long-Term Decision Making In Proceedings of the 34th AAAI Conference on Artificial Intelligence (AAAI 2020) Parameters ---------- profile : abcvoting.preferences.Profile A profile. committeesize : int The desired committee size. algorithm : str, optional The algorithm to be used. The following algorithms are available for the Consensus rule: .. doctest:: >>> Rule("consensus-rule").algorithms ('float-fractions', 'gmpy2-fractions', 'standard-fractions') resolute : bool, optional Return only one winning committee. If `resolute=False`, all winning committees are computed (subject to `max_num_of_committees`). max_num_of_committees : int, optional At most `max_num_of_committees` winning committees are computed. If `max_num_of_committees=None`, the number of winning committees is not restricted. The default value of `max_num_of_committees` can be modified via the constant `MAX_NUM_OF_COMMITTEES_DEFAULT`. Returns ------- list of CandidateSet A list of winning committees. """ rule_id = "consensus-rule" rule = Rule(rule_id) if algorithm == "fastest": algorithm = rule.fastest_available_algorithm() rule.verify_compute_parameters( profile=profile, committeesize=committeesize, algorithm=algorithm, resolute=resolute, max_num_of_committees=max_num_of_committees, ) committees, detailed_info = _consensus_rule_algorithm( profile=profile, committeesize=committeesize, algorithm=algorithm, resolute=resolute, max_num_of_committees=max_num_of_committees, ) # optional output output.info(header(rule.longname), wrap=False) if not resolute: output.info("Computing all possible winning committees for any tiebreaking order") output.info(" (aka parallel universes tiebreaking) (resolute=False)\n") output.details(f"Algorithm: {ALGORITHM_NAMES[algorithm]}\n") output.info( str_committees_with_header(committees, cand_names=profile.cand_names, winning=True) ) # end of optional output return committees
29,193
def plotprops(labelfontsize=18, legend=True, option=1, loc='upper right'): """ Define other properties of the SED plot: additional axis, font size etc. """ # Increase the size of the fonts pylab.rcParams.update({'font.size': 15}) # Defines general properties of the plot if option==1: pylab.xlim(8,20) # Nemmen et al. 2014 plots pylab.ylim(35,44) # Nemmen et al. 2014 plots if option==2: pylab.ylim(34,40) # Hayden plots pylab.xlim(8,19) if option==3: pylab.xlim(8,27) # with Fermi data (large x-axis!) pylab.xlabel('log($\\nu$ / Hz)',fontsize=labelfontsize) pylab.ylabel('log($\\nu L_\\nu$ / erg s$^{-1}$)',fontsize=labelfontsize) pylab.minorticks_on() if legend is True: pylab.legend(loc=loc, frameon=False) # Add second X axis with common units of wavelength ax1=pylab.subplot(111) ax2=pylab.twiny() pylab.minorticks_on() if option==1 or option==2: ax1.set_xlim(8,20) ax2.set_xlim(8,20) # set this to match the lower X axis ax2.set_xticks([8.477,9.477,10.477,11.477,12.477,13.477,14.477,15.4768,16.383,17.383,18.383,19.383]) ax2.set_xticklabels(['1m','10cm','1cm','1mm','100$\mu$m','10$\mu$m','1$\mu$m','1000$\AA$','.1keV','1keV','10keV','100keV'],size=10.5) if option==3: ax1.set_xlim(8,27) ax2.set_xlim(8,27) # set this to match the lower X axis ax2.set_xticks([8.477,10.477,12.477,14.477,16.383,18.383,20.383,22.383,24.383,26.383]) ax2.set_xticklabels(['1m','1cm','100$\mu$m','1$\mu$m','0.1keV','10keV','1MeV','100MeV','10GeV','1TeV'],size=10.5)
29,194
def addcron(): """ { "uid": "张三", "mission_name": "定时服务名字", "pid": "c3009c8e62544a23ba894fe5519a6b64", "EnvId": "9d289cf07b244c91b81ce6bb54f2d627", "SuiteIdList": ["75cc456d9c4d41f6980e02f46d611a5c"], "runDate": 1239863854, "interval": 60, "alwaysSendMail": true, "alarmMailGroupList": "['4dc0e648e61846a4aca01421aa1202e2', '2222222222222']", "triggerType": "interval" } """ try: require_items = get_post_items(request, CronJob.REQUIRE_ITEMS, throwable=True) option_items = get_post_items(request, CronJob.OPTIONAL_ITEMS) require_items.update(option_items) require_items.update({"uid": g.user_object_id}) mission_name = get_models_filter(CronJob, CronJob.mission_name == require_items["mission_name"]) if mission_name != []: return jsonify({'status': 'failed', 'data': '名字已存在'}) temp = require_items.get("alarmMailGroupList") require_items["alarmMailGroupList"] = str(temp) times = Run_Times(**require_items) if times == True: _model = create_model(CronJob, **require_items) cron_manager.add_cron( **{ "mission_name": require_items.get("mission_name"), "mode": require_items.get("triggerType"), "seconds": require_items.get("interval"), "run_Date": require_items.get("runDate"), "task_Job": require_items, "object_id": _model.object_id, }) return jsonify({'status': 'ok', 'object_id': _model.object_id}) else: return jsonify(times) except BaseException as e: return jsonify({'status': 'failed', 'data': '新建失败 %s' % e})
29,195
def dict_expand(d, prefix=None): """ Recursively expand subdictionaries returning dictionary dict_expand({1:{2:3}, 4:5}) = {(1,2):3, 4:5} """ result = {} for k, v in d.items(): if isinstance(v, dict): result.update(dict_expand(v, prefix=k)) else: result[k] = v if prefix is not None: result = {make_tuple(prefix) + make_tuple(k): v for k, v in result.items()} return result
29,196
def parse_config_list(config_list): """ Parse a list of configuration properties separated by '=' """ if config_list is None: return {} else: mapping = {} for pair in config_list: if (constants.CONFIG_SEPARATOR not in pair) or (pair.count(constants.CONFIG_SEPARATOR) != 1): raise ValueError("configs must be passed as two strings separted by a %s", constants.CONFIG_SEPARATOR) (config, value) = pair.split(constants.CONFIG_SEPARATOR) mapping[config] = value return mapping
29,197
def _convert_word2id(insts, operator): """ :param insts: :param operator: :return: """ # print(len(insts)) # for index_inst, inst in enumerate(insts): for inst in insts: # copy with the word and pos for index in range(inst.words_size): word = inst.words[index] wordId = operator.word_alphabet.loadWord2idAndId2Word(word) # if wordID is None: if wordId == -1: wordId = operator.word_unkId inst.words_index.append(wordId) label = inst.labels[index] labelId = operator.label_alphabet.loadWord2idAndId2Word(label) inst.label_index.append(labelId) char_index = [] for char in inst.chars[index]: charId = operator.char_alphabet.loadWord2idAndId2Word(char) if charId == -1: charId = operator.char_unkId char_index.append(charId) inst.chars_index.append(char_index)
29,198
def test_heartrate_reject(): """Test the correct rejection for invalid heartrate datapoints.""" acc_tester = AcceptanceTester('heartrate') dp = Datapoint(datetime(2018, 1, 1, 12, 0, 0), 0) assert acc_tester(dp) is False dp = Datapoint(datetime(2018, 1, 1, 12, 0, 0), -1) assert acc_tester(dp) is False dp = Datapoint(datetime(2018, 1, 1, 12, 0, 0), 255) assert acc_tester(dp) is False dp = Datapoint(datetime(2018, 1, 1, 12, 0, 0), 256) assert acc_tester(dp) is False
29,199