content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def sort_characters(text, alphabet): """Counting Sort""" dim = len(text) order = [0] * dim count = {k: 0 for v, k in enumerate(alphabet)} for char in text: count[char] += 1 for j in range(1, len(alphabet)): count[alphabet[j]] += count[alphabet[j-1]] for i, char in reversed(tuple(enumerate(text))): count[char] -= 1 order[count[char]] = i return order
9beb0f28a7f1ffb892e1393522b94f12e873ba66
3,633,600
def get_rp_throughput_summary(isamAppliance, date, duration, aspect, summary=True, check_mode=False, force=False): """ Retrieving a summary of throughput for all Reverse Proxy instances """ return isamAppliance.invoke_get("Retrieving a summary of throughput for all Reverse Proxy instances", "/analysis/reverse_proxy_traffic/throughput/{0}".format( tools.create_query_string(summary=summary, date=date, duration=duration, aspect=aspect)))
a559ae507939d8fc29db8ac92fdacda7742ee522
3,633,601
from typing import Optional from typing import Union from typing import Dict from typing import List from typing import Tuple def do_regress( test_features: np.ndarray, train_features: np.ndarray, train_targets: np.ndarray, nn_count: int = 30, batch_count: int = 200, loss_method: str = "mse", sigma_method: Optional[str] = "analytic", variance_mode: Optional[str] = None, kern: Optional[str] = None, k_kwargs: Union[Dict, Union[List[Dict], Tuple[Dict, ...]]] = dict(), nn_kwargs: Dict = dict(), apply_sigma_sq: bool = True, return_distances: bool = False, verbose: bool = False, ) -> Union[ Tuple[Union[MuyGPS, MMuyGPS], NN_Wrapper, np.ndarray], Tuple[Union[MuyGPS, MMuyGPS], NN_Wrapper, np.ndarray, np.ndarray], Tuple[ Union[MuyGPS, MMuyGPS], NN_Wrapper, np.ndarray, np.ndarray, np.ndarray ], Tuple[ Union[MuyGPS, MMuyGPS], NN_Wrapper, np.ndarray, np.ndarray, np.ndarray, np.ndarray, ], ]: """ Convenience function initializing a model and performing regression. Expected parameters include keyword argument dicts specifying kernel parameters and nearest neighbor parameters. See the docstrings of the appropriate functions for specifics. Also supports workflows relying upon multivariate models. In order to create a multivariate model, specify the `kern` argument and pass a list of hyperparameter dicts to `k_kwargs`. Example: >>> from MuyGPyS.testing.test_utils import _make_gaussian_data >>> from MuyGPyS.examples.regress import do_regress >>> from MuyGPyS.optimize.objective import mse_fn >>> train, test = _make_gaussian_data(10000, 1000, 100, 10) >>> nn_kwargs = {"nn_method": "exact", "algorithm": "ball_tree"} >>> k_kwargs = { ... "kern": "rbf", ... "metric": "F2", ... "eps": {"val": 1e-5}, ... "length_scale": {"val": 1.0, "bounds": (1e-2, 1e2)} ... } >>> muygps, nbrs_lookup, predictions, variance = do_regress( ... test['input'], ... train['input'], ... train['output'], ... nn_count=30, ... batch_count=200, ... loss_method="mse", ... variance_mode="diagonal", ... k_kwargs=k_kwargs, ... nn_kwargs=nn_kwargs, ... verbose=False, ... ) >>> # Can alternately return distance tensors for reuse >>> muygps, nbrs_lookup, predictions, variance, crosswise_dists, pairwise_dists = do_regress( ... test['input'], ... train['input'], ... train['output'], ... nn_count=30, ... batch_count=200, ... loss_method="mse", ... variance_mode="diagonal", ... k_kwargs=k_kwargs, ... nn_kwargs=nn_kwargs, ... return_distances=True, ... verbose=False, ... ) >>> mse = mse_fn(test['output'], predictions) >>> print(f"obtained mse: {mse}") obtained mse: 0.20842... Args: test_features: A matrix of shape `(test_count, feature_count)` whose rows consist of observation vectors of the test data. train_features: A matrix of shape `(train_count, feature_count)` whose rows consist of observation vectors of the train data. train_targets: A matrix of shape `(train_count, response_count)` whose rows consist of response vectors of the train data. nn_count: The number of nearest neighbors to employ. batch_count: The number of elements to sample batch for hyperparameter optimization. loss_method: The loss method to use in hyperparameter optimization. Ignored if all of the parameters specified by argument `k_kwargs` are fixed. Currently supports only `"mse"` for regression. sigma_method: The optimization method to be employed to learn the `sigma_sq` hyperparameter. Currently supports only `"analytic"` and `None`. If the value is not `None`, the returned :class:`MuyGPyS.gp.muygps.MuyGPS` object will possess a `sigma_sq` member whose value, invoked via `muygps.sigma_sq()`, is a `(response_count,)` vector to be used for scaling posterior variances. variance_mode: Specifies the type of variance to return. Currently supports `diagonal` and None. If None, report no variance term. kern: The kernel function to be used. See :ref:`MuyGPyS-gp-kernels` for details. Only used in the multivariate case. If `None`, assume that we are not using a multivariate model. k_kwargs: If given a list or tuple of length `response_count`, assume that the elements are dicts containing kernel initialization keyword arguments for the creation of a multivariate model (see :func:`~MuyGPyS.examples.regress.make_multivariate_regressor`). If given a dict, assume that the elements are keyword arguments to a MuyGPs model (see :func:`~MuyGPyS.examples.regress.make_regressor`). nn_kwargs: Parameters for the nearest neighbors wrapper. See :class:`MuyGPyS.neighbors.NN_Wrapper` for the supported methods and their parameters. apply_sigma_sq: If `True` and `variance_mode is not None`, automatically scale the posterior variances by `sigma_sq`. return_distances: If `True`, returns a `(test_count, nn_count)` matrix containing the crosswise distances between the test elements and their nearest neighbor sets and a `(test_count, nn_count, nn_count)` tensor containing the pairwise distances between the test's nearest neighbor sets. verbose: If `True`, print summary statistics. Returns ------- muygps: A (possibly trained) MuyGPs object. nbrs_lookup: A data structure supporting nearest neighbor queries into `train_features`. predictions: The predicted response associated with each test observation. variance: Estimated posterior variance of each test prediction. If `variance_mode == "diagonal"` return a `(test_count, response_count)` matrix where each row is the posterior variance. If `sigma_method is not None` and `apply_sigma_sq is True`, each column of the variance is automatically scaled by the corresponding `sigma_sq` parameter. crosswise_dists: A matrix of shape `(test_count, nn_count)` whose rows list the distance of the corresponding test element to each of its nearest neighbors. Only returned if `return_distances is True`. pairwise_dists: A tensor of shape `(test_count, nn_count, nn_count,)` whose latter two dimensions contain square matrices containing the pairwise distances between the nearest neighbors of the test elements. Only returned if `return_distances is True`. """ if sigma_method is None: apply_sigma_sq = False regressor_args = _decide_and_make_regressor( train_features, train_targets, nn_count=nn_count, batch_count=batch_count, loss_method=loss_method, sigma_method=sigma_method, kern=kern, k_kwargs=k_kwargs, nn_kwargs=nn_kwargs, return_distances=False, verbose=verbose, ) regressor, regressor_args_less1 = _unpack(*regressor_args) nbrs_lookup, regressor_args_less2 = _unpack(*regressor_args_less1) if len(regressor_args_less2) > 0: # Should not get here # crosswise_dists, pairwise_dists = regressor_args_less2 pass prediction_args, pred_timing = regress_any( regressor, test_features, train_features, nbrs_lookup, train_targets, variance_mode=variance_mode, apply_sigma_sq=apply_sigma_sq, return_distances=return_distances, ) # predictions, prediction_args_less1 = _unpack(*prediction_args) if verbose is True: print(f"prediction time breakdown:") for k in pred_timing: print(f"\t{k} time:{pred_timing[k]}s") if variance_mode is None and len(regressor_args_less2) == 0: return regressor, nbrs_lookup, prediction_args elif variance_mode is not None and return_distances is False: predictions, prediction_args_less1 = _unpack(*prediction_args) variance, prediction_args_less2 = _unpack(*prediction_args_less1) return regressor, nbrs_lookup, predictions, variance elif variance_mode is None and len(regressor_args_less2) > 0: predictions, prediction_args_less1 = _unpack(*prediction_args) crosswise_dists, prediction_args_less2 = _unpack(*prediction_args_less1) pairwise_dists, prediction_args_less3 = _unpack(*prediction_args_less2) return ( regressor, nbrs_lookup, predictions, crosswise_dists, pairwise_dists, ) else: predictions, prediction_args_less1 = _unpack(*prediction_args) variance, prediction_args_less2 = _unpack(*prediction_args_less1) crosswise_dists, prediction_args_less3 = _unpack(*prediction_args_less2) pairwise_dists, prediction_args_less4 = _unpack(*prediction_args_less3) return ( regressor, nbrs_lookup, predictions, variance, crosswise_dists, pairwise_dists, )
49257c187462cfea362b35b3cb399d2851f4a7e5
3,633,602
def set_computer_policy( name, setting, cumulative_rights_assignments=True, adml_language="en-US" ): """ Set a single computer policy Args: name (str): The name of the policy to configure setting (str): The setting to configure the named policy with cumulative_rights_assignments (bool): Determine how user rights assignment policies are configured. If True, user right assignment specifications are simply added to the existing policy. If False, only the users specified will get the right (any existing will have the right revoked) adml_language (str): The language files to use for looking up Administrative Template policy data (i.e. how the policy is displayed in the GUI). Defaults to 'en-US' (U.S. English). Returns: bool: True if successful, otherwise False CLI Example: .. code-block:: bash salt '*' lgpo.set_computer_policy LockoutDuration 1440 """ pol = {} pol[name] = setting ret = set_( computer_policy=pol, user_policy=None, cumulative_rights_assignments=cumulative_rights_assignments, adml_language=adml_language, ) return ret
708b39564e7e97be8a986981dab3b2eedec5e01f
3,633,603
import requests def get_api_result(url): """ Retrieve JSON data from API via a supplied URL """ s = requests.Session() r = s.get(url) return r.json()
933bd000b2e352f950ec86f8b6f1470ff2b0ecbd
3,633,604
from functools import reduce def lens_compose(big_lens, *smaller_lenses): """ Compose many lenses """ return reduce(_lens_compose2, smaller_lenses, big_lens)
f1be58ba017235661b5cd08bf38dbdfb3fcdfdd6
3,633,605
import logging import collections def create_left_maskval_to_projmat_dict(seed, whimsy_server_weights, whimsy_client_weights, left_mask, right_mask, build_projection_matrix): """Creates a dictionary mapping the values of `left_mask` and `right_mask` to projection matrices. Args: seed: A integer used for seeding whimsy_server_weights: A list of weight matrices whimsy_client_weights: A list of weight matrices left_mask: A list of equal length as the weight matrices in `client_output`. The value `k` in the list at index `i` indicates that the weight at index`i` should be left multiplied by `left_maskval_to_projmat_dict[str(k)]` right_mask: A list of equal length as the weight matrices in `client_output`. The value `k` in the list at index `i` indicates that the weight at index`i` should be right multiplied by `left_maskval_to_projmat_dict[str(k)]` build_projection_matrix: A function which builds the projection_matrix matrix used in this function Returns: A dictionary mapping the values of `left_mask` and `right_mask` to projection matrices. """ logging.info("starting create_left_maskval_projmat_dict") server_flat_mask = get_flat_mask(whimsy_server_weights) whimsy_server_trainable_variables = reshape_flattened_weights( whimsy_server_weights, server_flat_mask) client_flat_mask = get_flat_mask(whimsy_client_weights) whimsy_client_trainable_variables = reshape_flattened_weights( whimsy_client_weights, client_flat_mask) tf.debugging.assert_equal( server_flat_mask, client_flat_mask, "there should be a one-to-one correspondence between the flat tensors in server and client models." ) left_maskval_to_projmat_dict = collections.OrderedDict() for idx, val in enumerate(left_mask): server_weight_mat_shape = tf.shape(whimsy_server_trainable_variables[idx]) client_weight_mat_shape = tf.shape(whimsy_client_trainable_variables[idx]) desired_shape = (client_weight_mat_shape[-2], server_weight_mat_shape[-2] ) # left multiply server_mat to generate client_mat if len(client_weight_mat_shape) == 2: old_desired_shape = (client_weight_mat_shape[0], server_weight_mat_shape[0]) tf.debugging.assert_equal(desired_shape, old_desired_shape, "yoho1") if val < 0: left_maskval_to_projmat_dict[str(val)] = tf.ones([1], dtype=tf.float32) tf.debugging.assert_equal(server_weight_mat_shape[0], client_weight_mat_shape[0], "yoho2") elif val not in left_maskval_to_projmat_dict: projection_matrix = build_projection_matrix( seed=(seed, idx), desired_shape=desired_shape, is_left_multiply=True) left_maskval_to_projmat_dict[str(val)] = projection_matrix actual_shape = tf.shape(left_maskval_to_projmat_dict[str(val)]) tf.debugging.assert_equal(actual_shape, desired_shape, "yoho3") else: actual_mat = left_maskval_to_projmat_dict[str(val)] actual_shape = tf.shape(actual_mat) tf.debugging.assert_equal(actual_shape, desired_shape, f"yoho4{val}") for idx, val in enumerate(right_mask): server_weight_mat_shape = tf.shape(whimsy_server_trainable_variables[idx]) client_weight_mat_shape = tf.shape(whimsy_client_trainable_variables[idx]) desired_shape = (server_weight_mat_shape[-1], client_weight_mat_shape[-1] ) # right multiply server_mat to generate client_mat if len(client_weight_mat_shape) == 2: old_desired_shape = (server_weight_mat_shape[1], client_weight_mat_shape[1]) tf.debugging.assert_equal(desired_shape, old_desired_shape, "yoho5") if val < 0: left_maskval_to_projmat_dict[str(val)] = tf.ones([1], dtype=tf.float32) tf.debugging.assert_equal(server_weight_mat_shape[1], client_weight_mat_shape[1], "yoho6") elif val not in left_maskval_to_projmat_dict: projection_matrix = build_projection_matrix( seed=(seed, idx), desired_shape=desired_shape, is_left_multiply=False) left_maskval_to_projmat_dict[str(val)] = tf.transpose(projection_matrix) actual_shape = tf.shape( tf.transpose(left_maskval_to_projmat_dict[str(val)])) tf.debugging.assert_equal(actual_shape, desired_shape, "yoho7") else: actual_mat = tf.transpose(left_maskval_to_projmat_dict[str( val)]) # transposed because of right multiply actual_shape = tf.shape(actual_mat) tf.debugging.assert_equal(actual_shape, desired_shape, "yoho8") logging.info("finished create_left_maskval_projmat_dict") return left_maskval_to_projmat_dict
8d3c034b97266123b583f1b890d1e019173f22ae
3,633,606
def renew_defs(func: PrimFunc): """Re-generate the definition nodes for a TIR, including VarDef, BufferDef. This pass works as a simple DeepCopy to duplicate a function with different Vars and Buffers but the same behavior Parameters ---------- func: PrimFunc The input function Returns ------- result : PrimFunc The new generated func. """ return _ffi_api.RenewDefs(func)
838ebc2e30e72d6b3a9405980be2843800091cea
3,633,607
def requires_common_raster(method): """ A decorator for spectrum methods that require that another spectrum as an input and require it to be sampled on the same wavelength raster as us. :param method: A method belonging to a sub-class of Spectrum. """ def wrapper(spectrum, other, *args, **kwargs): assert isinstance(other, Spectrum), "Can only copy mask from another Spectrum object." assert spectrum.raster_hash == other.raster_hash, \ "Cannot do arithmetic on spectra sampled on a different wavelength rasters" return method(spectrum, other=other, *args, **kwargs) return wrapper
5ba5fb3c4dca60f730e201fa0a25781f898f5c97
3,633,608
def k8s_conf_dict(boot_conf, hb_conf): """ Generates and returns a dict of the k8s deployment configuration :param boot_conf: the snaps-boot config dict :param hb_conf: the adrenaline config dict :return: dict with one key 'kubernetes' containing the rest of the data """ k8s_dict = __generate_base_k8s_config(boot_conf, hb_conf) k8s_dict['kubernetes']['node_configuration'] = __generate_node_config( boot_conf, hb_conf) return k8s_dict
49d6ee49a7c665f521dde4f9a2cf2d9e10442064
3,633,609
def autodetect_mode(a, b): """ Return a code identifying the mode of operation (single, mixed, inverted mixed and batch), given a and b. See `ops.modes` for meaning of codes. :param a: Tensor or SparseTensor. :param b: Tensor or SparseTensor. :return: mode of operation as an integer code. """ a_dim = K.ndim(a) b_dim = K.ndim(b) if b_dim == 2: if a_dim == 2: return SINGLE elif a_dim == 3: return iMIXED elif b_dim == 3: if a_dim == 2: return MIXED elif a_dim == 3: return BATCH return UNKNOWN
48d7af7f075113863090380f1823349fc676f9ec
3,633,610
def f_unc(x, k, weight): """ similar to the raw function call, but uses unp instead of np for uncertainties calculations. :return: """ term = 1 # calculate the term k^x / x!. Can't do this directly, x! is too large. for n in range(0, int(x)): term *= k / (x - n) * unp.exp(-k/int(x)) return term * weight
6f24688bd9c08d7632846b145ef540235da6cd4f
3,633,611
import csv2_help from os import getenv def check_keys(gvar, mp, rp, op, not_optional=[], key_map=None, requires_server=True): """ Modify user settings. """ # Summarize the mandatory, required, and optional parameters for the current command. mandatory = [] required = [] options = [] valid_keys = ['server-address', 'server-password', 'server-user'] # valid_keys = ['server-address', 'server-grid-cert', 'server-grid-key', 'server-password', 'server-user'] for key in gvar['command_keys']: # 0.short_name, 1.long_name, 2.key_value(bool) if key[0] in mp: mandatory.append([key[0], '%-4s | %s' % (key[0], key[1]), key[1][2:]]) if key[0] in rp: required.append([key[0], '%-4s | %s' % (key[0], key[1]), key[1][2:]]) if key[0] in op or (op == ['*'] and key[0] not in mp + rp + not_optional): options.append([key[0], '%-4s | %s' % (key[0], key[1]), key[1][2:]]) if key[0] in mp + rp + op or (op == ['*'] and key[0] not in mp + rp + not_optional): valid_keys.append(key[1][2:]) # Check for invalid parameters for key in gvar['command_args']: if gvar['command_args'][key] and (key not in valid_keys): print('Error: The following command line arguments were invalid: {}'.format(key)) exit(1) # Check if help requested. csv2_help.help(gvar, mandatory=mandatory, required=required, options=options, requires_server=requires_server) # If the current command has mandatory parameters and they have not been specified, issue error messages and exit. form_data = {} missing = [] for key in mandatory: if key[2] in gvar['command_args']: if key_map and key[0] in key_map: # form_data[key_map[key[0]]] = gvar['command_args'][key[2]] form_data[key_map[key[0]]] = _check_keys_for_password(gvar, key) else: missing.append(key[1]) if missing: print('Error: "%s %s %s" - the following mandatory parameters must be specified on the command line:' % (gvar['command_name'], gvar['object'], gvar['action'])) for key in missing: print(' %s' % key) print('For more information, use -H.') exit(1) missing = [] for key in required: if key[2] in gvar['user_settings']: if key_map and key[0] in key_map: # form_data[key_map[key[0]]] = gvar['user_settings'][key[2]] form_data[key_map[key[0]]] = _check_keys_for_password(gvar, key) elif not(key[0] == '-te' and getenv('EDITOR') is not None): missing.append(key[1]) if missing: print('Error: "%s %s %s" - no value, neither default nor command line, for the following required parameters:' % (gvar['command_name'], gvar['object'], gvar['action'])) for key in missing: print(' %s' % key) print('For more information, use -h or -H.') exit(1) if key_map: for key in options: if key[0] in key_map and key[2] in gvar['user_settings']: form_data[key_map[key[0]]] = _check_keys_for_password(gvar, key) return form_data
4ba5ccbb5fc10a06ab7ecbcee7dc68e823a5c457
3,633,612
def get_data(n_clients): """ Import the dataset via sklearn, shuffle and split train/test. Return training, target lists for `n_clients` and a holdout test set """ print("Loading data") diabetes = load_diabetes() y = diabetes.target X = diabetes.data # Add constant to emulate intercept X = np.c_[X, np.ones(X.shape[0])] # The features are already preprocessed # Shuffle perm = np.random.permutation(X.shape[0]) X, y = X[perm, :], y[perm] # Select test at random test_size = 50 test_idx = np.random.choice(X.shape[0], size=test_size, replace=False) train_idx = np.ones(X.shape[0], dtype=bool) train_idx[test_idx] = False X_test, y_test = X[test_idx, :], y[test_idx] X_train, y_train = X[train_idx, :], y[train_idx] # Split train among multiple clients. # The selection is not at random. We simulate the fact that each client # sees a potentially very different sample of patients. X, y = [], [] step = int(X_train.shape[0] / n_clients) for c in range(n_clients): X.append(X_train[step * c: step * (c + 1), :]) y.append(y_train[step * c: step * (c + 1)]) return X, y, X_test, y_test
0459f2ffbeaf1e21780efba9785c96d75a641d93
3,633,613
def fn(r): """ Returns the number of fields based on their radial distance :param r: radial distance :return: number of fields at radial distance """ return 4 * r + 4
5fa4a5e8f2304f907b9dd806281dc77a2152f431
3,633,614
def do_icon(name, *args, **kwargs): """ Render an icon This template is an interface to the `icon` function from `django_icons` **Tag name**:: icon **Parameters**: name The name of the icon to be rendered title The title attribute for the icon :default: None (no title attribute rendered) renderer The renderer to use for the icon :default: The default renderer as per ``settings.py``, or ultimately `FontAwesomeRenderer`. **Usage**:: {% icon name %} **Example**:: {% icon 'pencil' %} {% icon 'pencil' 'fa-big' %} {% icon 'trash' title='Delete' %} """ return icon(name, *args, **kwargs)
7b8addf38d056c070af20f447435a67e29a09a8a
3,633,615
def ED_BldGag(ED): """ Returns the radial position of ElastoDyn blade gages INPUTS: - ED: either: - a filename of a ElastoDyn input file - an instance of FileCl, as returned by reading the file, ED = weio.read(ED_filename) OUTPUTS: - r_gag: The radial positions of the gages, given from the rotor apex """ if not isinstance(ED,weio.FASTInFile): ED = weio.FASTInFile(ED) _,r_nodes= ED_BldStations(ED) nOuts = ED['NBlGages'] if nOuts<=0: return np.array([]) if type(ED['BldGagNd']) is list: Inodes = np.asarray(ED['BldGagNd']) else: Inodes = np.array([ED['BldGagNd']]) r_gag = r_nodes[ Inodes[:nOuts] -1] return r_gag
fa95475218bf35a90790296ce7149a286440a39e
3,633,616
from typing import List from typing import Dict def multiclass_confusion_matrix_metrics( cm: np.ndarray, labels: List[str] ) -> Dict[str, int]: """ Create a dictionary of multiple class labels and their TP, TN, FP, FN values :param cm: Confusion matrix :param labels: string labels corresponding to the Confusion Matrix rows and columns :return: a dictionary with "label.[TP | TN | FP | FN]" and count >>> from sklearn.metrics import confusion_matrix >>> ytest = ['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c'] >>> ypred = ['a', 'a', 'a', 'a', 'b', 'c', 'b', 'a', 'b'] >>> cm = confusion_matrix(ytest, ypred, labels=list("abc")) >>> metrics = multiclass_confusion_matrix_metrics(cm, labels=list("abc")) >>> assert(metrics['b.TP'] == 1) >>> assert(metrics['b.FP'] == 2) >>> assert(metrics['b.TN'] == 4) >>> assert(metrics['b.FN'] == 2) """ metrics = {} # type: Dict[str, int] # TP: The actual value and predicted value, which is found in the diagonal row of the m. TP = {labels[i]: np.diag(cm)[i] for i in range(len(labels))} # FN: The sum of values of corresponding rows except the TP value FN = {labels[i]: np.sum(cm[i]) - np.diag(cm)[i] for i in range(len(labels))} # FP : The sum of values of corresponding column except the TP value. FP = {labels[i]: np.sum(cm.T[i]) - np.diag(cm)[i] for i in range(len(labels))} # TN: The sum of values of all columns and row # except the values of that class that we are calculating the values for. TN = { labels[i]: np.sum(cm) - (TP[labels[i]] + FP[labels[i]]) - FN[labels[i]] for i in range(len(labels)) } for key, val in TP.items(): metrics[f"{key}.TP"] = int(val) # cast to int, for painless json serialization for key, val in FP.items(): metrics[f"{key}.FP"] = int(val) for key, val in TN.items(): metrics[f"{key}.TN"] = int(val) for key, val in FN.items(): metrics[f"{key}.FN"] = int(val) return metrics
4098dfd1e8618b1d9d87f93b924400925e047680
3,633,617
import argparse def parse_arguments(): """ Parse the command line arguments """ ap = argparse.ArgumentParser() ap.add_argument("-ann", "--annotations_path", required=True, help="Path to the directory containing the annotation files or path to the single annotation file.") ap.add_argument("-ann_type", "--annotations_type", required=False, default="voc", help="Annotations type ('voc', 'coco').") ap.add_argument("-det", "--detections_dir_path", required=True, help="Path to the '.pkl' file containing detections generated from class agnostic OD method. The " "detections should be a dictionary with keys as 'image names' and values as the tuple of " "predicted boxes & scores (i.e. ([boxes], [scores]))") ap.add_argument("-N", "--top_N_dets", required=False, type=int, default=50, help="Maximum number of top N detections sorted by confidence to be used for metrics calculations. " "Note that the script also reports average number of ") ap.add_argument("-iou", "--iou_thresh", required=False, type=float, default=0.5, help="IOU threshold to be used for computing AP and Recall. Default is 0.5.") ap.add_argument("--extra_metrics", action='store_true', help="Flag to decide if to evaluate AP-small, AP-medium and AP-large.") args = vars(ap.parse_args()) return args
e564699cbc74fba69b2bba90372a0513a81ae84c
3,633,618
import textwrap def is_perf_benchmarks_scheduling_valid( perf_waterfall_file, outstream): """Validates that all existing benchmarks are properly scheduled. Return: True if all benchmarks are properly scheduled, False otherwise. """ scheduled_non_telemetry_tests = get_scheduled_non_telemetry_benchmarks( perf_waterfall_file) all_perf_gtests = set(GTEST_BENCHMARKS) all_perf_other_tests = set(OTHER_BENCHMARKS) error_messages = [] for test_name in all_perf_gtests - scheduled_non_telemetry_tests: error_messages.append( 'Benchmark %s is tracked but not scheduled on any perf waterfall ' 'builders. Either schedule or remove it from GTEST_BENCHMARKS.' % test_name) for test_name in all_perf_other_tests - scheduled_non_telemetry_tests: error_messages.append( 'Benchmark %s is tracked but not scheduled on any perf waterfall ' 'builders. Either schedule or remove it from OTHER_BENCHMARKS.' % test_name) for test_name in scheduled_non_telemetry_tests.difference( all_perf_gtests, all_perf_other_tests): error_messages.append( 'Benchmark %s is scheduled on perf waterfall but not tracked. Please ' 'add an entry for it in GTEST_BENCHMARKS or OTHER_BENCHMARKS in' '//tools/perf/core/perf_data_generator.py.' % test_name) for message in error_messages: print('*', textwrap.fill(message, 70), '\n', file=outstream) return not error_messages
732664db5c17b8c7e4474048da6822c0e5ea207a
3,633,619
import base64 def download_link(object_to_download, download_filename, download_link_text): """Generates a link from which the user can download object_to_download Method from https://discuss.streamlit.io/t/heres-a-download-function-that-works-for-dataframes-and-txt/4052 Args: object_to_download (str, pd.DataFrame): The object to be downloaded download_filename (str): Filename and extension of file (e.g. mydata.csv or some_txt_output.txt) download_link_text (str): Text to display for download link Example: download_link(YOUR_DF, 'YOUR_DF.csv', 'CSV built! Click here to download your data!') """ if isinstance(object_to_download,pd.DataFrame): object_to_download = object_to_download.to_csv(index=True) # some strings <-> bytes conversions necessary here b64 = base64.b64encode(object_to_download.encode()).decode() return f'<a href="data:file/txt;base64,{b64}" download="{download_filename}">{download_link_text}</a>'
81299651997d0bf41cf0c2e000741e6e5f7ba3d2
3,633,620
def run_epoch(sess, cost_op, ops, reset, num_unrolls): """Runs one optimization epoch.""" sess.run(reset) for _ in range(num_unrolls): results = sess.run([cost_op] + ops) return results[0], results[1:]
975634b3498d6385b53222a88b8f79b7d3ee4d3d
3,633,621
def transformToUTM(gdf, utm_crs, estimate=True, calculate_sindex=True): """Transform GeoDataFrame to UTM coordinate reference system. Arguments --------- gdf : :py:class:`geopandas.GeoDataFrame` :py:class:`geopandas.GeoDataFrame` to transform. utm_crs : str :py:class:`rasterio.crs.CRS` string for destination UTM CRS. estimate : bool, optional .. deprecated:: 0.2.0 This argument is no longer used. calculate_sindex : bool, optional .. deprecated:: 0.2.0 This argument is no longer used. Returns ------- gdf : :py:class:`geopandas.GeoDataFrame` The input :py:class:`geopandas.GeoDataFrame` converted to `utm_crs` coordinate reference system. """ gdf = gdf.to_crs(utm_crs) return gdf
02405ca581054b5d804c6e4eb49be96d0915e3de
3,633,622
import re def remove_prohibited_characters(prompt_str: str) -> str: """ Remove prohibited characters. """ prohibited_chars = ["[", "]", "<", ">", "#", "%", "$", ":", ";", "~", "\r", " ", "\n"] result_str = prompt_str for ch in prohibited_chars: result_str = result_str.replace(ch, "") if "\x1b" in result_str: # for powerline. result_str = re.sub("\x1b.*h", "", result_str) result_str = re.sub("\x1b.*m", "", result_str) return result_str
8eabb923b5ee59656fb41164d14be0ba6e4535f4
3,633,623
import scipy def correction_factors(kappa, eta, gamma, b0, use_eta=True): """Computes correction factors for MLE of high dimensional logistic reg.""" system_ = get_system(kappa, eta, gamma, b0, use_eta) if use_eta: init = np.array([2, 2, np.sqrt(eta / 2), b0 / 2]) else: init = np.array([2, 2, np.sqrt(gamma**2 + 1), b0]) soln = scipy.optimize.root( lambda x: system_(*x), init, method='lm', options={ 'xtol': 1e-4, 'eps': 1e-8 }) x0 = soln.x if kappa >= 0.03 and (x0[0] < 1 or x0[2] < 0.1): print('Rerunning due to convergence issue') init += 0.1 * np.random.randn(4) init = np.maximum(init, np.array([1, 0.5, 0.1, b0 / 2.0])) soln = scipy.optimize.root( lambda x: system_(*x), init, method='lm', options={ 'xtol': 1e-4, 'eps': 1e-8 }) x0 = soln.x return x0
633183d63fc4c974b4f95d587f5bd625ab14e8b9
3,633,624
import subprocess def bzr_find_files(dirname): """Find versioned files using bzr, for use in 'setuptools.file_finders' entry point in setup.py.""" cmd = 'bzr ls --versioned ' + dirname proc = subprocess.Popen( cmd.split(), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, _stderr = proc.communicate() return stdout.splitlines()
8bfc6975b3aaaabc3a955dbef92d96dcea15f518
3,633,625
def make_style_prompt(choices: list, default: str = None, prompt_msg: str = "Would you like to:", main_style: str = "none", frame_style: str = "none", frame_border_style: str = "none") -> str: """ Prompts user in a cool way and retrieves what the user picked. :param choices: A list of choices. :param default: The value that gets returned if user doesn't type anything in. :param prompt_msg: The message being printed before the choices. :param main_style: The main theme/color of the prompt. :param frame_style: The theme/color for the text in the panels. :param frame_border_style: The theme/color for the frame in the panels. :return: str value of final style choice the user selected. """ choices = [str(i) for i in choices] if default is not None: # Get index of default so it can be set to the default choice. default_index = str(choices.index(default) + 1) else: default_index = None choices_styled = [] c = 0 for i in choices: c += 1 choices_styled.append(Panel(f"{c}. {i}", style=frame_style, border_style=frame_border_style)) console.print(Panel(prompt_msg.replace("\n", ""), style=main_style, border_style=main_style)) for i in choices_styled: console.print(i) choice_index = Prompt.ask(Text.assemble(("╰>", main_style)), choices=[str(x) for x in range(1, c + 1)], default=default_index) choice_index = int(choice_index) return choices[choice_index - 1]
06f20893f6e8616998142fee43ae5cb8ccb16bad
3,633,626
from typing import OrderedDict def jsonfile_1(): """ A JSON File object """ return thresh.TabularFile( content=OrderedDict({"bar": 4, "foo": 3}), alias="JSON_", length_check=False, namespace_only=True )
7c9d856f7619fad54a7614107d018ec4be645498
3,633,627
def convert(digits, base1, base2): """Convert given digits in base1 to digits in base2. digits: str -- string representation of number (in base1) base1: int -- base of given number base2: int -- base to convert to return: str -- string representation of number (in base2)""" # Handle up to base 36 [0-9a-z] assert 2 <= base1 <= 36, 'base1 is out of range: {}'.format(base1) assert 2 <= base2 <= 36, 'base2 is out of range: {}'.format(base2) # TODO: Convert digits from base 2 to base 16 (and vice versa) if base1 == 2 and base2 == 16: baseTen = decode(digits, base1) result = encode(baseTen, base2) return result # TODO: Convert digits from base 2 to base 10 (and vice versa) elif base1 == 2 and base2 == 10: baseTen = decode(digits, base1) result = encode(baseTen, base2) return result # TODO: Convert digits from base 10 to base 16 (and vice versa) elif base1 == 10 and base2 == 16: baseTen = decode(digits, base1) result = encode(baseTen, base2) return result # TODO: Convert digits from any base to any base (2 up to 36) else: baseTen = decode(digits, base1) result = encode(baseTen, base2) return result
7a51d56d0c8d04e4c2c1a178da214d900a52d908
3,633,628
def _get_cmfs_xy(): """ xy色度図のプロットのための馬蹄形の外枠のxy値を求める。 Returns ------- array_like xy coordinate for chromaticity diagram """ # 基本パラメータ設定 # ------------------ cmf = CMFS.get(CMFS_NAME) d65_white = D65_WHITE # 馬蹄形のxy値を算出 # -------------------------- cmf_xy = XYZ_to_xy(cmf.values, d65_white) return cmf_xy
67517dedbb53a30270b6bf2022dec64f796e1e31
3,633,629
def square_matrix_multiply(A, B): """ 定義通りの計算 Θ(n^3) """ n = len(A) C = [[0] * n for _ in range(n)] for i in range(n): for j in range(n): for k in range(n): C[i][j] += A[i][k] * B[k][j] return C
e0c2766bb9f5f77df1f95f9158fd027db6b7eadb
3,633,630
def _filter_irregular_boxes(boxes, min_ratio=0.2, max_ratio=5): """Remove all boxes with any side smaller than min_size.""" ws = boxes[:, 2] - boxes[:, 0] + 1 hs = boxes[:, 3] - boxes[:, 1] + 1 rs = ws / hs keep = np.where((rs <= max_ratio) & (rs >= min_ratio))[0] return keep
31c6113c45195a31c0a9325a113363cc018a0a24
3,633,631
import http def login(user, password): """ Authenticate against SomethingAwful, both storing that authentication in the global cookiejar and returning the relevant cookies :param user: your awful username for somethingawful dot com :param password: your awful password for somethingawful dot com :return: the authentication cookies for somethingawful dot com """ get_sa_cookies = lambda jar: [ c for c in jar if c.domain.endswith("forums.somethingawful.com") and (c.name == "bbuserid" or c.name == "bbpassword") ] http.jar.clear_expired_cookies() sa_cookies = get_sa_cookies(http.jar) if len(sa_cookies) == 2: return sa_cookies http.get( LOGIN_URL, cookies=True, post_data=urlencode({ "action": "login", "username": user, "password": password }) ) sa_cookies = get_sa_cookies(http.jar) if len(sa_cookies) < 2: return None return sa_cookies
ad76dc9e1af0e33cfb9d64b6a64dc7d1b3d4e57e
3,633,632
def streams(url: str, **params): """ Initializes an empty Streamlink session, attempts to find a plugin and extracts streams from the URL if a plugin was found. :param url: a URL to match against loaded plugins :param params: Additional keyword arguments passed to :meth:`streamlink.Streamlink.streams` :raises NoPluginError: on plugin resolve failure :returns: A :class:`dict` of stream names and :class:`streamlink.stream.Stream` instances """ session = Streamlink() return session.streams(url, **params)
58a3cefa0c2957a168282f41457d1844fafdb728
3,633,633
def get_questionnaire_example() -> pd.DataFrame: """Return questionnaire example data. Returns ------- data : :class:`~pandas.DataFrame` dataframe with questionnaire example data """ return load_questionnaire_data(_get_data("questionnaire_sample.csv"))
ed067c3a051e91d95326002fa52245304d1d7085
3,633,634
import os def auto_dsk(dsk_row,synth,bounds,conv_limit=0,conv_bounds=[None,None],phase_args=(0.,360.,1.),highcut=0.,order=3): """ Returns the maximum likelihood phase shift to deskew the data to match a provided synthetic given a bounds on a window to match. Parameters ---------- dsk_row : Pandas.Series Single row of a deskew file with valid path to data file synth : list This should be the output of the make synthetic function it needs to contain three elements 0) an array of the synthetic magnetic anomalies 1) an array of the distance coordinates of the points in 0, should be of equal length to 0, MUST be in the same coordinate system as the profile provided in dsk_row!!! Which it may not by default. 2) the distance resolution of the synthetic in 0 and 1 bounds : list of floats Has two elements which corespond to the left and right bounds of the window conv_limit : float, optional Weather or not to realign the anomaly each phase shift using a time lagged convolution method which increases runtime significantly but can also increase accuracy. This argument should be a positve float which corresponds to the amount of +- shift the anomaly is allowed to move used otherwise it should be 0 to not use the shift method (Default: 0, which implies not to use method). conv_bounds : list of 2 floats, optional The left and right boundary in the distance domain to use to time lag convolve the synthetic and the filtered data signal. Thus 300 km of signal can be convolved but only the 10 km of motion allowed to pin down the crossing location. (Default: [None,None], which implies conv_bounds=bounds) phase_args : tuple or other unpackable sequence, optional Arguments to np.arange which define the phases searched in the minimization. (Default: (0.,360.,1.) which implies a search of the entire parameter space of phases at 1 degree resolution) highcut : float, optional The upper cutoff frequency to filter the data by in order to remove any topographic anomalies in the data. This value should be between 0 and Nyquest of the synthetic which MUST be regularly sampled like those returned by make_synthetic. The data is up or down sampled to the synthetic before filtering. (Default: 0 which implies not to filter the data) order : int, optional The order of the lowpass butterworth filter to apply to the data. Returns ---------- best_phase : float The maximum liklehood phase shift to match the data to the synthetic best_shift : float the maximum likelihood shift for the best_phase which aligned the two anomalies phase_func : Numpy.NdArray The summed phase asynchrony between the data and the synthetic as a function of phase shift (best_phase is the global minimum of this function) best_shifts : Numpy.NdArray the maximum likelihood shift as a function of the phase shift """ #Unpack Arguments dage = dsk_row["age_max"]-dsk_row["age_min"] phases = np.arange(*phase_args) left_bound,right_bound = bounds synth_mag = np.array(synth[0]) synth_dis = np.array(synth[1]) ddis = synth[2] data_path = os.path.join(dsk_row["data_dir"],dsk_row["comp_name"]) data_df = utl.open_mag_file(data_path) projected_distances = utl.calc_projected_distance(dsk_row['inter_lon'],dsk_row['inter_lat'],data_df['lon'].tolist(),data_df['lat'].tolist(),(180+dsk_row['strike'])%360) if conv_limit: #create the fully interpolated profile for convolution #create the shortened synthetic for the time lagged convolution if isinstance(conv_bounds[0],type(None)): conv_bounds[0] = bounds[0] if isinstance(conv_bounds[1],type(None)): conv_bounds[1] = bounds[1] left_idx = np.argmin(np.abs(synth_dis - conv_bounds[0])) right_idx = np.argmin(np.abs(synth_dis - conv_bounds[1])) right_idx,left_idx = max([right_idx,left_idx]),min([right_idx,left_idx]) conv_synth,conv_synth_dis = synth_mag[left_idx:right_idx],synth_dis[left_idx:right_idx] if np.any(np.diff(projected_distances["dist"])<0): #redefine to the right because interp dumbs mag = data_df["mag"].to_numpy()[::-1] mag_dis = projected_distances["dist"].to_numpy()[::-1] full_imag = np.interp(conv_synth_dis,mag_dis,mag) if highcut: full_fimag = butter_lowpass_filter(full_imag,highcut=highcut,fs=1/ddis,order=order) else: full_fimag = full_imag #trim to only window of relivence left_idx = np.argmin(np.abs(synth_dis - left_bound)) right_idx = np.argmin(np.abs(synth_dis - right_bound)) right_idx,left_idx = max([right_idx,left_idx]),min([right_idx,left_idx]) tsynth_mag = synth_mag[left_idx:right_idx] tsynth_dis = synth_dis[left_idx:right_idx] N = len(tsynth_mag) #because this is easier and regularly sampled plus the user can set it simply al2 = np.angle(hilbert(np.real(tsynth_mag),N),deg=False) best_shifts = [] #record best shifts as function of phase shift phase_async_func = [] #record summed phase asynchrony as a function of phase shift for i,phase in enumerate(phases): shifted_mag = phase_shift_data(data_df["mag"],phase) if conv_limit: #DON'T YOU KNOW WE'RE GONNAAAA DOOOOOOO THE COOONVOLUTIOOOON!!! shifted_full_fimag = phase_shift_data(full_fimag,phase) correlation_func = np.abs(np.convolve(shifted_full_fimag,conv_synth,"full")) correlation_func = correlation_func[int(len(conv_synth)-conv_limit/ddis+.5):int(len(conv_synth)+conv_limit/ddis+.5)] best_shift = ddis*(len(correlation_func)/2-np.argmax(correlation_func))/2 else: best_shift = 0. #trim the data to the right segments left_idx = np.argmin(np.abs(projected_distances["dist"] - left_bound + best_shift)) right_idx = np.argmin(np.abs(projected_distances["dist"]- right_bound + best_shift)) right_idx,left_idx = max([right_idx,left_idx]),min([right_idx,left_idx]) tproj_dist = projected_distances["dist"][left_idx:right_idx] + best_shift tshifted_mag = shifted_mag[left_idx:right_idx] #numpy.interp only works for monotonic increasing independent variable data if np.any(np.diff(tproj_dist)<0): itshifted_mag = np.interp(-tsynth_dis,-tproj_dist,tshifted_mag) else: itshifted_mag = np.interp(tsynth_dis,tproj_dist,tshifted_mag) if highcut: fitshifted_mag = butter_lowpass_filter(itshifted_mag,highcut=highcut,fs=1/ddis,order=order) else: fitshifted_mag = itshifted_mag al1 = np.angle(hilbert(fitshifted_mag,N),deg=False) phase_asynchrony = np.sin((al1-al2)/2) #shouldn't go negative but...just in case best_shifts.append(best_shift) phase_async_func.append(phase_asynchrony.sum()) best_idx = np.argmin(phase_async_func) return phases[best_idx],best_shifts[best_idx],phase_async_func,best_shifts
9c26daac03e7e54ca80db0b04268cbaf12784c9e
3,633,635
def tabulate_e2e_vectors(*, tau_n=dna_params['tau_n'], unwrap=None): """Return a lookup table of entry->exit vectors with the right magnitude in nm. Multiply on the left with the entry orientation matrix to obtain the entry to exit displacement vector. One vector for each possible level of unwrapping. Returns ------- entry_to_exit_vectors: pd.DataFrame columns are x, y, z components of vector """ if unwrap is None: unwrap = np.arange(0, 147) Lws = np.array([(bp_in_nuc - u - 1) for u in unwrap]) #list of vectors, one for each unwrapping level e2evecs = np.zeros((Lws.size, 3)) for i, lw in enumerate(Lws): e2evecs[i,:] = entry_exit_vector(lw).T[0] df = pd.DataFrame(e2evecs, columns=['x', 'y', 'z']) return df
469e5e4389ea7215cad44862f8377a517714137f
3,633,636
def load_fooof_task_pe(data_path, side='Contra', param_ind=1, folder='FOOOF'): """Loads task data for all subjects, selects and return periodic FOOOF outputs. data_path : path to where data side: 'Ipsi' or 'Contra' """ # Collect measures together from FOOOF results into matrices all_alphas = np.zeros(shape=[N_LOADS, N_SUBJS, N_TIMES]) for li, load in enumerate(['Load1', 'Load2', 'Load3']): pre, early, late = _load_fgs(data_path, folder, side, load) for ind, fg in enumerate([pre, early, late]): temp_alphas = get_band_peak_fg(fg, BANDS.alpha) all_alphas[li, :, ind] = temp_alphas[:, param_ind] return all_alphas
7b7b6c26343b58c7c579c890d384f3d8a31611fa
3,633,637
def warning(message): """Generic warning message formatter. Args: message (string): A message that describes the warning. Returns: (str): Formatted warning message. """ return bcolors.WARNING + "WARNING: " + bcolors.ENDC + message
d7a06aaf90f24cbb18028a8b921086d3c83efe76
3,633,638
def GetResult(cl, opts, result): """Waits for jobs and returns whether they have succeeded Some OpCodes return of list of jobs. This function can be used after issueing a given OpCode to look at the OpCode's result and, if it is of type L{ht.TJobIdListOnly}, then it will wait for the jobs to complete, otherwise just return L{constants.EXIT_SUCCESS}. @type cl: L{ganeti.luxi.Client} @param cl: client that was used to submit the OpCode, which will also be used to poll the jobs @param opts: CLI options @param result: result of the opcode which might contain job information, in which case the jobs will be polled, or simply the result of the opcode @rtype: int @return: L{constants.EXIT_SUCCESS} if all jobs completed successfully, L{constants.EXIT_FAILURE} otherwise """ if not ht.TJobIdListOnly(result): return constants.EXIT_SUCCESS jex = cli.JobExecutor(cl=cl, opts=opts) for (status, job_id) in result[constants.JOB_IDS_KEY]: jex.AddJobId(None, status, job_id) bad_jobs = [job_result for success, job_result in jex.GetResults() if not success] if len(bad_jobs) > 0: for job in bad_jobs: cli.ToStdout("Job failed, result is '%s'.", job) cli.ToStdout("%s job(s) failed.", bad_jobs) return constants.EXIT_FAILURE else: return constants.EXIT_SUCCESS
8bdee53bc6a693436084362f8c6c643e3e565a0d
3,633,639
from .SpectralDecomposer import Decomposer from .model_housing import indivmodel def decomposition_method(input): """ Decomposition of an individual spectrum using input guesses from the parent SAA Parameters ---------- input : list A list which contains the following: spectral_axis : an array of the spectral axis specids : a mask over which the spectrum will be fitted fittype : the type of fit scouse will attempt to perform tol : the tolerance values for comparison with the parent saa spectrum res : the channel spacing of the data indivspec : an instance of the individual_spectrum class Returns ------- A list containing the model and the updated guesses """ # unpack the inputs spectral_axis,specids,fittype,tol,res,indivspec = input spectrum=indivspec.spectrum[specids] rms=indivspec.rms # set up the decomposer decomposer=Decomposer(spectral_axis,spectrum,rms) setattr(decomposer,'psktemplate',indivspec.template,) # inputs to initiate the fitter if np.size(indivspec.guesses_updated)<=1: guesses=indivspec.guesses_from_parent else: guesses=indivspec.guesses_updated # always pass the parent SAA parameters for comparison guesses_parent=indivspec.guesses_from_parent # fit the spectrum Decomposer.fit_spectrum_from_parent(decomposer,guesses,guesses_parent,tol,res,fittype=fittype,) # # generate a model if decomposer.validfit: model=indivmodel(decomposer.modeldict) else: model=None return [model,decomposer.guesses_updated]
071ce1bde7d5302d09d41fc30216d578263cfbbf
3,633,640
from typing import Tuple import os def create_feature_columns() -> Tuple[list, list, list]: """ 生成MMOE模型输入特征和label Returns: dense_feature_columns (list): 连续特征的feature_columns category_feature_columns (list): 类别特征的feature_columns(包括序列特征) label_feature_columns (list): 因变量的feature_columns """ dense_feature_columns, category_feature_columns, label_feature_columns = [], [], [] # 连续特征 videoplayseconds = fc.numeric_column('videoplayseconds', default_value=0.0) u_read_comment_7d_sum = fc.numeric_column('u_read_comment_7d_sum', default_value=0.0) u_like_7d_sum = fc.numeric_column('u_like_7d_sum', default_value=0.0) u_click_avatar_7d_sum = fc.numeric_column('u_click_avatar_7d_sum', default_value=0.0) u_forward_7d_sum = fc.numeric_column('u_forward_7d_sum', default_value=0.0) u_comment_7d_sum = fc.numeric_column('u_comment_7d_sum', default_value=0.0) u_follow_7d_sum = fc.numeric_column('u_follow_7d_sum', default_value=0.0) u_favorite_7d_sum = fc.numeric_column('u_favorite_7d_sum', default_value=0.0) i_read_comment_7d_sum = fc.numeric_column('i_read_comment_7d_sum', default_value=0.0) i_like_7d_sum = fc.numeric_column('i_like_7d_sum', default_value=0.0) i_click_avatar_7d_sum = fc.numeric_column('i_click_avatar_7d_sum', default_value=0.0) i_forward_7d_sum = fc.numeric_column('i_forward_7d_sum', default_value=0.0) i_comment_7d_sum = fc.numeric_column('i_comment_7d_sum', default_value=0.0) i_follow_7d_sum = fc.numeric_column('i_follow_7d_sum', default_value=0.0) i_favorite_7d_sum = fc.numeric_column('i_favorite_7d_sum', default_value=0.0) c_user_author_read_comment_7d_sum = fc.numeric_column('c_user_author_read_comment_7d_sum', default_value=0.0) dense_feature_columns += [videoplayseconds, u_read_comment_7d_sum, u_like_7d_sum, u_click_avatar_7d_sum, u_forward_7d_sum, u_comment_7d_sum, u_follow_7d_sum, u_favorite_7d_sum, i_read_comment_7d_sum, i_like_7d_sum, i_click_avatar_7d_sum, i_forward_7d_sum, i_comment_7d_sum, i_follow_7d_sum, i_favorite_7d_sum, c_user_author_read_comment_7d_sum] # 类别特征 userid = fc.categorical_column_with_vocabulary_file('userid', os.path.join(FLAGS.vocabulary_dir, 'userid.txt')) feedid = fc.categorical_column_with_vocabulary_file('feedid', os.path.join(FLAGS.vocabulary_dir, 'feedid.txt')) device = fc.categorical_column_with_vocabulary_file('device', os.path.join(FLAGS.vocabulary_dir, 'device.txt')) authorid = fc.categorical_column_with_vocabulary_file('authorid', os.path.join(FLAGS.vocabulary_dir, 'authorid.txt')) bgm_song_id = fc.categorical_column_with_vocabulary_file('bgm_song_id', os.path.join(FLAGS.vocabulary_dir, 'bgm_song_id.txt')) bgm_singer_id = fc.categorical_column_with_vocabulary_file('bgm_singer_id', os.path.join(FLAGS.vocabulary_dir, 'bgm_singer_id.txt')) manual_tag_list = fc.categorical_column_with_vocabulary_file('manual_tag_list', os.path.join(FLAGS.vocabulary_dir, 'manual_tag_id.txt')) his_read_comment_7d_seq = fc.categorical_column_with_vocabulary_file('his_read_comment_7d_seq', os.path.join(FLAGS.vocabulary_dir, 'feedid.txt')) userid_emb = fc.embedding_column(userid, 16) feedid_emb = fc.shared_embedding_columns([feedid, his_read_comment_7d_seq], 16, combiner='mean') device_emb = fc.embedding_column(device, 2) authorid_emb = fc.embedding_column(authorid, 4) bgm_song_id_emb = fc.embedding_column(bgm_song_id, 4) bgm_singer_id_emb = fc.embedding_column(bgm_singer_id, 4) manual_tag_id_emb = fc.embedding_column(manual_tag_list, 4, combiner='mean') category_feature_columns += [userid_emb, device_emb, authorid_emb, bgm_song_id_emb, bgm_singer_id_emb, manual_tag_id_emb] category_feature_columns += feedid_emb # feedid_emb是list # label label_feature_columns += [fc.numeric_column(task_name, default_value=0.0) for task_name in FLAGS.task_names.split(",")] return dense_feature_columns, category_feature_columns, label_feature_columns
aa09ff0e690d389b08754a7db7d7ba068827ec05
3,633,641
def cal_features(pm): """ only one track in pm, all bars are calculated Returns: used_pitch used_note pitch_histogram pitch_interval_hist # not for track 2 pitch_range onset_interval_hist duration_hist """ result_features = {} chromagram = np.zeros(12) duration_hist = np.zeros(32) pitch_intervals_hist = np.zeros(12) onset_interval_hist = np.zeros(32) track_total_notes = pm.instruments[0].notes used_pitch = set() total_notes = [] duration_name_to_time, duration_time_to_name, duration_times, bar_duration = get_note_duration_dict( pm.get_beats()[1]-pm.get_beats()[0], (pm.time_signature_changes[0].numerator, pm.time_signature_changes[0].denominator)) sixteenth_time = round(duration_name_to_time['sixteenth'],2) bar_time = list(pm.get_downbeats()) bar_time.append(bar_time[-1] + bar_duration) for note in track_total_notes: used_pitch.add(note.pitch) chromagram[note.pitch % 12] += 1 if len(total_notes) > 0: if 0 <= round(note.start - total_notes[-1].end,2) <= 2: intervals = abs(note.pitch - total_notes[-1].pitch) if intervals > 11: continue pitch_intervals_hist[intervals] += 1 if sixteenth_time <= round(note.start - total_notes[-1].start,2) <= sixteenth_time * 32 : onset_interval_hist[int(round(note.start - total_notes[-1].start,2) / sixteenth_time)-1] += 1 total_notes.append(note) duration = int(round(note.end - note.start,2) / sixteenth_time) # print(round(note.end - note.start, 2)) # print(duration) if duration > 32: duration = 32 duration_hist[duration-1] += 1 used_pitch_number = len(used_pitch) if used_pitch_number == 0: return None chromagram = chromagram / sum(chromagram) pitch_range = max(used_pitch) - min(used_pitch) used_notes_number = len(total_notes) pitch_intervals_hist = pitch_intervals_hist / sum(pitch_intervals_hist) duration_hist = duration_hist / sum(duration_hist) onset_interval_hist = onset_interval_hist / sum(onset_interval_hist) result_features['pitch_number'] = used_pitch_number result_features['note_number'] = used_notes_number result_features['pitch_range'] = pitch_range result_features['chromagram'] = chromagram result_features['pitch_intervals_hist'] = pitch_intervals_hist result_features['duration_hist'] = duration_hist result_features['onset_interval_hist'] = onset_interval_hist return result_features
aad1d644b97c4b294c6bf25b6a5447475bcaaf4a
3,633,642
def grayscale(img): """Applies the Grayscale transform """ return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) # Or use BGR2GRAY if you read an image with cv2.imread() # return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
7e6408b4decb2b3a6a66b92afc1359fba2036735
3,633,643
def sample_user(email='test@joeshak.com', password='testpass'): """ Create a sample user """ return get_user_model().objects.create_user(email, password)
a999bba7581edfd65491eee68ea6b9f0b786dcf0
3,633,644
from typing import Concatenate def GroupConv1D(x, in_channels, out_channels, groups=1, kernel=1, strides=1, name=''): """ group Convolution 1D group=1 means pointwise convolution ---- input: - x: input tensor - in_channels: input channels - out_channels: output channels - groups: divide channels by group - kernel: kernel size - strdies: strides - name """ if groups == 1: # Point-wise Conv return Conv1D(filters=out_channels, kerenel_size=kernel, padding='same', use_bias=False, strdies=strides, name=name)(x) channel_by_groups = in_channels // groups groups_list = [] assert out_channels % groups == 0 for i in range(groups): # channel divide # group = Lambda(lambda z: z[:,:, int(channel_by_groups * i): int(channel_by_groups * (i + 1))], name='%s/%d_slice'%(name, i))(x) # 각각의 filter channel ( int(0.5 + out_channels / groups)) # groups_list.append(Conv1D(int(0.5 + out_channels / groups), kernel_size=kernel, padding='same', use_bias=False, strides=strides, name='%s_/%d' %(name, i))(group)) return Concatenate(name='%s/concat' % name)(groups_list)
7921e245a4988c0b5cbe4e45061d8aa1a97ece7a
3,633,645
import logging def get_logger_obj(logger=None): """Get a logger object that can be specified by its name, or passed as is. Defaults to the root logger. """ if logger is None or isinstance(logger, py.builtin._basestring): logger = logging.getLogger(logger) return logger
263b2f70211fa82891a41dce0cdee23cc7ca3e93
3,633,646
import logging import sys import os def opt_validate_predictd ( options ): """Validate options from a OptParser object. Ret: Validated options object. """ # gsize try: options.gsize = efgsize[options.gsize] except: try: options.gsize = float(options.gsize) except: logging.error("Error when interpreting --gsize option: %s" % options.gsize) logging.error("Available shortcuts of effective genome sizes are %s" % ",".join(efgsize.keys())) sys.exit(1) # format options.gzip_flag = False # if the input is gzip file options.format = options.format.upper() if options.format == "ELAND": options.parser = ELANDResultParser elif options.format == "BED": options.parser = BEDParser elif options.format == "ELANDMULTI": options.parser = ELANDMultiParser elif options.format == "ELANDEXPORT": options.parser = ELANDExportParser elif options.format == "SAM": options.parser = SAMParser elif options.format == "BAM": options.parser = BAMParser options.gzip_flag = True elif options.format == "BAMPE": options.parser = BAMPEParser options.gzip_flag = True options.nomodel = True elif options.format == "BEDPE": options.parser = BEDPEParser options.nomodel = True elif options.format == "BOWTIE": options.parser = BowtieParser elif options.format == "AUTO": options.parser = guess_parser else: logging.error("Format \"%s\" cannot be recognized!" % (options.format)) sys.exit(1) # uppercase the format string options.format = options.format.upper() # upper and lower mfold options.lmfold = options.mfold[0] options.umfold = options.mfold[1] if options.lmfold > options.umfold: logging.error("Upper limit of mfold should be greater than lower limit!" % options.mfold) sys.exit(1) options.modelR = os.path.join( options.outdir, options.rfile ) # logging object logging.basicConfig(level=(4-options.verbose)*10, format='%(levelname)-5s @ %(asctime)s: %(message)s ', datefmt='%a, %d %b %Y %H:%M:%S', stream=sys.stderr, filemode="w" ) options.error = logging.critical # function alias options.warn = logging.warning options.debug = logging.debug options.info = logging.info return options
6418e5792ee043d3b854bb0371d511fe55d24cf3
3,633,647
import copy def associate_trajectories(traj_1, traj_2, max_diff=0.01, offset_2=0.0, first_name="first trajectory", snd_name="second trajectory"): """ Synchronizes two trajectories by matching their timestamps. :param traj_1: trajectory.PoseTrajectory3D object of first trajectory :param traj_2: trajectory.PoseTrajectory3D object of second trajectory :param max_diff: max. allowed absolute time difference for associating :param offset_2: optional time offset of second trajectory :param first_name: name of first trajectory for verbose logging :param snd_name: name of second trajectory for verbose/debug logging :return: traj_1, traj_2 (synchronized) """ if not isinstance(traj_1, PoseTrajectory3D) \ or not isinstance(traj_2, PoseTrajectory3D): raise SyncException("trajectories must be PoseTrajectory3D objects") snd_longer = len(traj_2.timestamps) > len(traj_1.timestamps) traj_long = copy.deepcopy(traj_2) if snd_longer else copy.deepcopy(traj_1) traj_short = copy.deepcopy(traj_1) if snd_longer else copy.deepcopy(traj_2) max_pairs = len(traj_long.timestamps) # First, match the timestamps of the shorter trajectory to the longer one. matching_indices = matching_time_indices( traj_short.timestamps, traj_long.timestamps, max_diff, offset_2 if snd_longer else -offset_2) traj_long.reduce_to_ids(matching_indices) # Next, reversely match the reduced long trajectory to the shorter one. matching_indices = matching_time_indices( traj_long.timestamps, traj_short.timestamps, max_diff, -offset_2 if snd_longer else offset_2) traj_short.reduce_to_ids(matching_indices) traj_1 = traj_short if snd_longer else traj_long traj_2 = traj_long if snd_longer else traj_short if len(matching_indices) == 0: raise SyncException( "found no matching timestamps between {} and {} with max. time " "diff {} (s) and time offset {} (s)".format( first_name, snd_name, max_diff, offset_2)) logger.debug( "Found {} of max. {} possible matching timestamps between...\n" "\t{}\nand:\t{}\n..with max. time diff.: {} (s) " "and time offset: {} (s).".format( len(matching_indices), max_pairs, first_name, snd_name, max_diff, offset_2)) return traj_1, traj_2
f6c38f11da445ef17ea095abe01bed4a1826b531
3,633,648
def calculate_wake_wing_influence_matrix(cpoints, wake, normals): """ Calculate influence matrix (steady wake contribution). Parameters ---------- cpoints : np.ndarray, shape (m, n, 3) Array containing the (x,y,z) coordinates of all collocation points. wake : np.ndarray, shape (n, 4, 3) Array containing the (x,y,z) coordinates of the panel vertices that form the steady wake. normals : np.ndarray, shape (m, n, 3) Array containing the normal vectors to all wing panels. Returns ------- aic : np.ndarray, shape (m * n, m * n) Wake contribution to the influence matrix. """ m, n = cpoints.shape[:2] aic_w = np.zeros((m * n, m * n)) r = ( wake[:, np.newaxis, :, :] - cpoints.reshape((1, m * n, 1, 3)) ) vel = biot_savart_vectorized(r) nv = normals.reshape((m * n, 3)) aic_w[:, -n:] = np.einsum('ijk,jk->ji', vel, nv) return aic_w
d99ca1c1291688462a8044fcfd558fbbba5f09b4
3,633,649
def get_recent_games(summoner_id): """ https://developer.riotgames.com/api/methods#!/1016/3445 Args: summoner_id (int): the ID of the summoner to find recent games for Returns: RecentGames: the summoner's recent games """ request = "{version}/game/by-summoner/{summoner_id}/recent".format(version=cassiopeia.dto.requests.api_versions["game"], summoner_id=summoner_id) return cassiopeia.type.dto.game.RecentGames(cassiopeia.dto.requests.get(request))
12e5ec6816b987af74b79b0839c76b582793c145
3,633,650
import itertools def create_daily_rate_line_plot(sources, services, y_axis_type='log', y_range=(1, 10**7)): """ Returns ------- plotting.figure A Bokeh plot that can be shown. """ # create plot with a datetime axis type p = plotting.figure(plot_width=700, plot_height=1200, x_axis_type="datetime", y_axis_type=y_axis_type, y_range=y_range) line_dashes = ['solid'] * 11 line_dashes.extend(['dashed'] * 11) line_dashes.extend(['dotted'] * 11) for source, service, color, line_dash in zip(sources, services, itertools.cycle(Spectral11), line_dashes): legend_label = f'{service[0]} {service[1]}' p.line(x='date', y='rate_mean', line_width=2, source=source, color=color, legend_label=legend_label, line_dash=line_dash) p.legend.click_policy = "hide" p.title.text = "Mean Daily Bytes per Second v. Start Time" p.xaxis.axis_label = 'Start Time' p.yaxis.axis_label = 'Mean Bytes per Second (s)' p.legend.location = (0, 0) p.add_layout(p.legend[0], 'below') return p
ef71f51aef3ebb039661c64a6ec9e7191ce04d21
3,633,651
def masked_loss(y_gt, y_pred, loss_fn, **kwargs): """Calculate 2d loss by removing mask, normally it's durrations/f0s/energys loss.""" real_len = tf.reduce_sum(tf.cast(tf.math.not_equal( y_gt, 0), tf.float32), axis=1) # shape [B,] max_len = tf.shape(y_gt)[1] max_len = tf.cast(max_len, real_len.dtype) if len(y_gt.shape) == 3: # Mel shape is [B, MelLength, num_mels], every element should be same in last dimension real_len = tf.reduce_mean(real_len, axis=-1) ratio = max_len / tf.reduce_max(real_len) # loss = tf.losses.mean_squared_error(y_gt, y_pred) loss = loss_fn(y_gt, y_pred) if len(loss.shape) == 2: loss = tf.reduce_mean(loss, axis=-1) loss = tf.math.multiply(loss, ratio) return loss
eb9491d0dde283942a2983bce582d61c34aaa95c
3,633,652
def call_ft(function, *args): """Call an FTDI function and check the status. Raise exception on error""" status = function(*args) if len(bRaiseExceptionOnError) > 0: if status != FT_OK: raise DeviceError(status) return status
b78b7f39b06d990199f73c6d705408e291b85a91
3,633,653
import random def reshuffle_words_to_fit(word_tuples_to_fit): """Within each length-class, reshuffle the words.""" new_word_tuples_to_fit = deepcopy(word_tuples_to_fit) distinct_lens = set(len(wt.board) for wt in new_word_tuples_to_fit) for word_len in distinct_lens: word_inds_with_len = [i for i,wt in enumerate(new_word_tuples_to_fit) if len(wt.board) == word_len] random.shuffle(word_inds_with_len) # Relies on the fact that all words of a given length are consequtive in the list new_word_tuples_to_fit = new_word_tuples_to_fit[:min(word_inds_with_len)] + [new_word_tuples_to_fit[i] for i in word_inds_with_len] + new_word_tuples_to_fit[max(word_inds_with_len)+1:] return new_word_tuples_to_fit
17ef2434dbf540757daf194ae96ae5b8f6c098e6
3,633,654
def make_tree_all_params(species, dbh, height, stem_x, stem_y, stem_z, lean_direction, lean_severity, crown_ratio, crown_radius_E, crown_radius_N, crown_radius_W, crown_radius_S, crown_edge_height_E, crown_edge_height_N, crown_edge_height_W, crown_edge_height_S, shape_top_E, shape_top_N, shape_top_W, shape_top_S, shape_bot_E, shape_bot_N, shape_bot_W, shape_bot_S): """Creates a tree and returns its crown as a hull exposing all parameters used as individual arguments. This is used primarily for the plotting functions in the visualization.py script in this package. The parameters are the same as involved in instantiating a Tree object. Returns -------- x, y, z : numpy arrays the x, y, and z coordinates of points that occur along the edge of the tree crown. """ crown_radii = np.array((crown_radius_E, crown_radius_N, crown_radius_W, crown_radius_S)) crown_edge_heights = np.array((crown_edge_height_E, crown_edge_height_N, crown_edge_height_W, crown_edge_height_S)) crown_shapes = np.array(((shape_top_E, shape_top_N, shape_top_W, shape_top_S), (shape_bot_E, shape_bot_N, shape_bot_W, shape_bot_S) )) new_tree = Tree(species, dbh, height, stem_x, stem_y, stem_z, lean_direction, lean_severity, crown_ratio, crown_radii, crown_edge_heights, crown_shapes) x, y, z = new_tree.get_hull() return x, y, z
c7be83ebd67d6de21a2c7f82248903c23d63a83d
3,633,655
def get_cfg_defaults(): """Get a yacs CfgNode object with default values""" # Return a clone so that the defaults will not be altered # This is for the "local variable" use pattern return _C.clone()
7cbf9b8f325ba417cf6c959d900b61c727cec816
3,633,656
import os import logging def outage_check(data, filename='outage.txt'): """ Quality assurance check on the weather service :-) """ outage_checker = Outage(data) outage_checker.check_outage() outage_result = outage_checker.parse_outage() outfilepath = os.path.join(data['output_dir'], filename) if outage_result is None: logging.info('No radar outage(s) detected. Proceeding.') try: logging.debug('Removing file at %s', outfilepath) os.unlink(outfilepath) except OSError: logging.error('File does not exist: %s', outfilepath) else: logging.warn('There is radar outage text: %s', outage_result) try: cur = open(outfilepath, 'w') cur.write(outage_result) cur.close() except OSError as exc: logging.error('OSError-- %s: %s', outfilepath, exc) return outage_result
071a8215763a15a00049d0d74942a535b5ce18e3
3,633,657
from typing import List import fnmatch def should_ignore(file: str, exclusions: List[str]) -> bool: """Check if a file matches a line in the exclusion list.""" for excl in exclusions: if fnmatch(file, excl): return True return False # for file in Path(".").glob("**/*.py*"): # if should_ignore(str(file), exclusions): # print(file)
ea8c4e4a6546d4f73009296208718696a443ac9f
3,633,658
def fpn_classifier_graph(rois, feature_maps,image_shape, pool_size, num_classes,config): """Builds the computation graph of the feature pyramid network classifier and regressor heads. selector: 0 for training and 1 for inference rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized coordinates. feature_maps: List of feature maps from diffent layers of the pyramid, [P2, P3, P4, P5]. Each has a different resolution. image_shape: [height, width, depth] pool_size: The width of the square feature map generated from ROI Pooling. num_classes: number of classes/feature, which determines the depth of the results Returns: logits: [N, NUM_FEATURES, NUM_CLASSES] classifier logits (before softmax) probs: [N,NUM_FEATURES, NUM_CLASSES] classifier probabilities bbox_deltas: [N, (dy, dx, log(dh), log(dw))] Deltas to apply to proposal boxes poses: [N, (tetax,tetay,tetaz,x,y,z)] """ # ROI Pooling # Shape: [batch, num_boxes, pool_height, pool_width, channels] x = PyramidROIAlign([pool_size, pool_size], image_shape, name="roi_align_classifier")([rois] + feature_maps) # Two 1024 FC layers (implemented with Conv2D for consistency) x = KL.TimeDistributed(KL.Conv2D(1024, (pool_size, pool_size), padding="valid"), name="robotvqa_class_conv1")(x) #x = KL.TimeDistributed(BatchNorm(axis=3), name='robotvqa_class_bn1')(x) x = KL.Activation('relu')(x) x = KL.TimeDistributed(KL.Conv2D(1024, (1, 1)), name="robotvqa_class_conv2")(x) #x = KL.TimeDistributed(BatchNorm(axis=3),name='robotvqa_class_bn2')(x) x = KL.Activation('relu')(x) shared = KL.Lambda(lambda x: K.squeeze(K.squeeze(x, 3), 2), name="pool_squeeze")(x) #add rois' location (global features) to rois' features(local features) shared=KL.Concatenate(axis=2)([shared,rois]) # Classifier head robotvqa_class_logits=[] robotvqa_class_logits.append(KL.TimeDistributed(KL.Dense(num_classes[0],activation='relu'), name='robotvqa_class_logits0')(shared)) robotvqa_class_logits.append(KL.TimeDistributed(KL.Dense(num_classes[1],activation='relu'), name='robotvqa_class_logits1')(shared)) robotvqa_class_logits.append(KL.TimeDistributed(KL.Dense(num_classes[2],activation='relu'), name='robotvqa_class_logits2')(shared)) robotvqa_class_logits.append(KL.TimeDistributed(KL.Dense(num_classes[3],activation='relu'), name='robotvqa_class_logits3')(shared)) robotvqa_class_logits.append(KL.TimeDistributed(KL.Dense(num_classes[4],activation='relu'), name='robotvqa_class_logits4')(shared)) robotvqa_probs = [] robotvqa_probs.append(KL.TimeDistributed(KL.Activation("softmax"), name="robotvqa_class0")(robotvqa_class_logits[0])) robotvqa_probs.append(KL.TimeDistributed(KL.Activation("softmax"), name="robotvqa_class1")(robotvqa_class_logits[1])) robotvqa_probs.append(KL.TimeDistributed(KL.Activation("softmax"), name="robotvqa_class2")(robotvqa_class_logits[2])) robotvqa_probs.append(KL.TimeDistributed(KL.Activation("softmax"), name="robotvqa_class3")(robotvqa_class_logits[3])) robotvqa_probs.append(KL.TimeDistributed(KL.Activation("softmax"), name="robotvqa_class4")(robotvqa_class_logits[4])) # BBox head # [batch, boxes, num_classes * (dy, dx, log(dh), log(dw))] x = KL.TimeDistributed(KL.Dense(num_classes[0] * 4, activation='linear'), name='mrcnn_bbox_fc')(shared) # Reshape to [batch, boxes, num_classes, (dy, dx, log(dh), log(dw))] s = K.int_shape(x) mrcnn_bbox = KL.Reshape((s[1], num_classes[0], 4), name="mrcnn_bbox")(x) # Poses head with residual connections # [batch, boxes, num_classes * (tx,ty,tz,x,y,z)] x1 = KL.TimeDistributed(KL.Dense(1028, activation='relu'), name='robotvqa_poses_fc2')(shared) x2 = KL.TimeDistributed(KL.Dense(1028, activation='linear'), name='robotvqa_poses_fc3')(KL.Average()([shared,x1])) x3 = KL.TimeDistributed(KL.Dense(1028, activation='relu'), name='robotvqa_poses_fc0')(KL.Average()([x1, x2])) x4 = KL.TimeDistributed(KL.Dense(1028, activation='linear'), name='robotvqa_poses_fc1')(KL.Average()([x2, x3])) x = KL.TimeDistributed(KL.Dense(num_classes[0] * 6, activation='relu'), name='robotvqa_poses_fc4')(KL.Average()([x3, x4])) # Reshape to [batch, boxes, num_classes, (tx,ty,tz,x,y,z)] s = K.int_shape(x) robotvqa_poses = KL.Reshape((s[1], num_classes[0], 6), name="robotvqa_poses")(x) return robotvqa_class_logits, robotvqa_probs, mrcnn_bbox, robotvqa_poses,shared
3d6b649b2d4eab53aa856b169d87d83ba4c2eaff
3,633,659
def fit_unitarity(depths, shifted_purities, weights=None): """Construct and fit an RB curve with appropriate guesses :param depths: The clifford circuit depths (independent variable) :param shifted_purities: The shifted purities (dependent variable) :param weights: Optional weightings of each point to use when fitting. :return: an lmfit Model """ _check_data(depths, shifted_purities, weights) unitarity_model = Model(unitarity_fn) params = unitarity_guess(model=unitarity_model, y=shifted_purities) return unitarity_model.fit(shifted_purities, x=depths, params=params, weights=weights)
a8fb739b4c64cf63ceff51ef9eabd276c2b6c48d
3,633,660
import re def filter_paragraph(p): """Simple filter to remove obviously bad paragraphs (bad text extraction). Note this needs to run very quickly as it is applied to every paragraph in the corpus, so nothing fancy! This whole method should be linear expected time in len(p). Args: p: string, paragraph Returns: True if we should remove the paragraph. """ # creating a space between a word and the punctuation following it # eg: "he is a boy." => "he is a boy . p = re.sub(r"([?.!,¿])", r" \1 ", p) p = re.sub(r'[" "]+', " ", p) # Expect a minimum number of words. tokens = p.split() if len(tokens) < 6: #print(tokens, 'aqui') return True # Require some letters. if not re.search(_SOME_ALPHA_RE, p): #print(tokens, 'aqui1') return True # Keep this one at the end, probably the most complicated logic. # We try to detect sentences, which should have a minimum of 3 tokens # with only alphabetic characters. last = 0 found_sentence = False num_alpha = 0 for i, x in enumerate(tokens): if x == '.': if i - last > 3 and num_alpha >= 3: found_sentence = True break last = i num_alpha = 0 if re.match(_ONLY_ALPHA_RE, x): #print('OIOIOIO') num_alpha += 1 if not found_sentence: #print(tokens, 'aqui2') return True #print(tokens, 'aqui3') return False
4458a480c176149d1375dfafb13211b4fd7ee9d0
3,633,661
def get_matching_tables(tables, path): """Get list of matching tables for provided path Return list is sorted by longest matching path part :param tables: List of `Table' objects :param path: Path like string :return: List of matched by path tables """ candidates = [] for table in tables.values(): for candidate in table.path: if common_prefix(candidate, path) == candidate: candidates.append(table) return sorted(candidates, key=lambda c: max((len(p) for p in c.path)), reverse=True)
e91e93cef56d3eb6e5b6ae85f522fbb042e472a9
3,633,662
import requests def email_video_link(talk): """Send the presenter a link to their video, asking to confirm.""" meeting_recordings = common.zoom_request( requests.get, common.ZOOM_API + f"/meetings/{talk['zoom_meeting_id']}/recordings" ) if not len(meeting_recordings["recording_files"]): raise RuntimeError("No recordings found") message = RECORDING_AVAILABLE_TEMPLATE.render( share_url=meeting_recordings["share_url"], **talk, ) return common.api_query( requests.post, common.MAILGUN_DOMAIN + "messages", data={ "from": "VSF team <no-reply@mail.virtualscienceforum.org>", "to": f"{talk['speaker_name']} <{talk['email']}>", "subject": "Approve your Speakers' Corner recording", "text": common.markdown_to_plain(message), "html": common.markdown_to_email(message), } )
f47139dd42606a57406295f2079fdcc18e8fcfc0
3,633,663
import traceback def make_import_user_csv_files(uw_accounts, filepath): """ :param uw_accounts: a list of UwAccount objects Writes all csv files. Returns number of records wrote out. """ if not uw_accounts or len(uw_accounts) == 0: return 0 file_size = get_csv_file_size() total_users = len(uw_accounts) f_index = 1 user_number = 0 csv_headers = get_aline_csv(get_headers()) f = open_file(get_user_file_name(filepath, f_index)) f.write(csv_headers) for uw_account in uw_accounts: if uw_account.disabled or uw_account.has_terminate_date(): continue person = get_person(uw_account.netid) if (person is None or person.is_test_entity or not person.is_emp_state_current()): continue if person.uwnetid != uw_account.netid: logger.error("OLD netid, Skip {0}".format(uw_account)) continue aline = get_aline_csv(get_attr_list(person, get_worker(person))) try: f.write(aline) except Exception: log_exception( logger, "{0:d}th file: ({1}), Skip!".format(f_index, aline), traceback.format_exc()) continue user_number += 1 if user_number < total_users and (user_number % file_size) == 0: f.close() logger.info("Finish writing {0:d} entries.".format(user_number)) f_index += 1 f = open_file(get_user_file_name(filepath, f_index)) f.write(csv_headers) f.close() logger.info("Finish writing {0:d} entries.".format(user_number)) return user_number
f41077fa18103edc63b727574b2e2c8ea8d039c8
3,633,664
def distanceInOval(x, y, a=3, b=2, k=0.2): """ :param x: high-dimension embedding of cell A :param y: high-dimension embedding of cell B :param a: major axis length :param b: minor axie length :param k: Deformation parameter :return: distance between cell A and B in oval whose function is x^2/a^2 + y^2/(t(x)*b^2) = 1 where t(x) = exp(kx) """ result = 0.0 x = x % (2*np.pi) y = y % (2*np.pi) for i in range(x.shape[0]): result += (a*np.cos(x[i]) - a*np.cos(y[i]))**2 + (b*np.sqrt(adjustment(x[i], k, a))*np.sin(x[i]) - b*np.sqrt(adjustment(y[i], k, a))*np.sin(y[i]))**2 d = np.sqrt(result) grad1 = -(a**2)*(np.cos(x) - np.cos(y))*np.sin(x) grad2 = (b**2)*(np.sqrt(adjustment(x, k, a))*np.sin(x) - np.sqrt(adjustment(y, k, a))*np.sin(y)) grad3 = adjustmentGrad(x, k, a)*np.sin(x)/(2*np.sqrt(adjustment(x, k, a))) + np.cos(x)*np.sqrt(adjustment(x, k, a)) grad = grad1 + grad2 * grad3 grad = grad/(d + 1e-6) return d, grad
acea2495d146a858ddfe5770025d95899f00c797
3,633,665
def file_generator( wrapped=None, ids=["file"], names=[uuid4().hex], suffixes=[""], dirs=[SANDBOX], properties=None, ): """Decorator which automates setup and return for file generation functions. The decorator fulfills 3 tasks: 1. Generating required temporary file names. 2. Creating an output dictionary (`D`) which lists file locations. 3. Modifies the return of the wrapped function to instead return `D`. Wrapped functions must contain a parameter named `meta`, this parameter will be replaced by `D` at runtime, allowing the function to access the generated filenames. Parameters ---------- wrapped: Callable Should not be defined manually, will be passed automatically by python. ids: Iterable[str] A list of ids for output files. These will be the keys for the output dictionary. names: Iterable[str] A list of output file basenames. suffixes: Iterable[str] A list of file suffixes for output files. dirs: Iterable[PathLike] A list of output directories. properties: Dict Dictionary of extra metadata to be included in output dictionary. """ # warning: this function gets pretty hairy if wrapped is None: # Happens when some optional params are defined # See wrapt docs for reasoning return partial( file_generator, ids=ids, names=names, suffixes=suffixes, dirs=dirs, properties=properties, ) @wrapt.decorator def wrapper(wrapped, instance, args, kwargs): # Wrapper function with decorator arguments included implicitely as # variables in outer scope. # First generate the metadata which will be passed into the decorated # function. filenames = [ local.path(directory) / (name + suffix) for directory, name, suffix in zip(dirs, names, suffixes) ] output_dict = dict(zip(ids, filenames)) if properties: output_dict.update(properties) # Generate a partial function with meta keyword arg predefined. partial_wrapped_func = partial(wrapped, meta=output_dict) def replacement_fixture_func(**kwargs): # Function which will replace the decorated function. Run the # wrapped function with injected `meta` variable. Then return the # metadata. partial_wrapped_func(**kwargs) return output_dict wrapped_argnames = get_param_names(wrapped) # pytest checks that all fixture arguments are valid fixtures, so we # need to purge all references to `meta` parameter wrapped_argnames.remove("meta") del kwargs["meta"] replacement_fixture_func = replace_param_sig( replacement_fixture_func, wrapped_argnames ) return replacement_fixture_func(**kwargs) return wrapper(wrapped)
ea36dbb67f722513bb149250f7536f331f95918d
3,633,666
def optimal_t_from_selection(x, y, ts, J, min_n): """ Time complexity: O(T*N) Space complexity: O(N+T) - to store the input """ N = len(x) best_loss = np.inf best_t = -np.inf idx = None for t in ts: # O(T) # evaluate loss for splitting [:s], [s:] y_l = y[x <= t] # O(N) y_r = y[x > t] # O(N) s = len(y_l) loss = J(y_l)*s/N + J(y_r)*(N-s)/N # O(N) if loss < best_loss and s >= min_n and s <= N-min_n: best_loss = loss best_t = t idx = s return best_t, best_loss, idx
90e882f97cbf83c66dd395c8bf45993ae445cd9a
3,633,667
def _deserialize_qnode(qnode_id, qnode): """Returns a QNode from a single deserialized QueryGraph node in a TRAPI request """ constraints = [] try: ids = qnode.get('ids') categories = qnode.get('categories') is_set = qnode.get('is_set') req_constraints = qnode.get('constraints') if req_constraints: for constraint in req_constraints: try: query_constraint = QueryConstraint(**constraint) constraints.append(query_constraint) except TypeError: BadRequest(f'Could not deserialize constraint={constraint}') qnode = QNode(ids, categories, is_set, constraints) setattr(qnode, 'qnode_id', qnode_id) except TypeError: raise BadRequest(f'Could not deserialize qnode={qnode}') return qnode
81278d775ecc68b9202fc3941549414f98e8b7f9
3,633,668
def combined_f1_rmse(y_true, y_pred): """Difference between F1 score and root mean square error (rmse). The optimal values for F1 score and rmse are 1 and 0 respectively. Therefore, the combined optimal value is 1. """ return f1_score(y_true, y_pred) - rmse(y_true, y_pred)
e43511cbea8a6a7fe5eaa5e142ece568d59ec0f8
3,633,669
from pathlib import Path def get_images_with_annotations(host, public_key, private_key, project_id=None, download=True, annotation_ids=None): """ Find and download (if not present) annotation information and images :param annotation_ids: List of annotations to fetch :param download: Whether or not to download missing files. Otherwise just fetches information. :param private_key: Private key as string :param public_key: Public key as string :param host: Hostname :param project_id: Restrict :return: List of dictionaries with 'image' and 'annotations' """ output = [] with Cytomine(host=host, public_key=public_key, private_key=private_key) as cytomine: annotations = AnnotationCollection() if project_id is not None: annotations.project = project_id annotations.fetch() print(f'{annotations}') image_regions = dd(list) for annotation in annotations: print(annotation) if annotation_ids is not None and annotation.id not in annotation_ids: continue print(f'Found annotation {annotation.id}') annotation: Annotation path = Path('/tmp') / 'cytomine' / 'p{project}' / 'i{image}' / 'masked{id}.png' formatted = str(path).format(id=annotation.id, image=annotation.image, project=annotation.project) print(f'Checking whether or not to download to {formatted}') if download and not Path(formatted).is_file(): print(f'Dumping annotation to {formatted}') annotation.dump(str(path), override=True, mask=True, alpha=True) assert Path(formatted).is_file(), "Annotation image not found after download!" image_regions[annotation.image]\ .append(formatted) print(image_regions) return image_regions
75ac5e025ead946e9d2c626799eb54d02e0cde71
3,633,670
import requests import json import os def read_inap(room_name='K1N0624', start='2018-02-01', end='2018-03-01'): """ Function to download Indoor Environmental Quality (IEQ) data monitored by INAP sensors in SL demo-case using IRI-UL web api. You may need to update the token if the current one is expired (contact IRI-UL). Args: room_name (str): Room identification. start (str): Start date as a date string object. end (str): End date as a date string object. Returns: df (pandas DataFrame object): IEQ data (CO2, temperature, relative humidity, VOC). Example: df_R3N0808 = read_inap('R3N0808', '2018-02-01', '2018-03-01') """ # Room INAP sensor id dictionary sensor_id = {'R3N0808': '00681B5B', 'R3N0644': '00682753', 'K1N0623': '0068224F', 'K3N0605': '0068272C', 'R2N0805': '0029DC18', 'R2N0634': '00681B21', 'K1N0624': '000CC736', 'K3N0618': '00681A09'} # Selected room sensor id ids = sensor_id[room_name] # Create URL with correct parameters (contact IRI-UL to get token) start_date = f'{start}T00:00:00Z' end_date = f'{end}T23:59:59Z' token = '' url = f'http://52.211.97.129:8080/api/sensors/data?token={token}&id={ids}&startDate={start_date}&endDate={end_date}' # Connect to IRI-UL INAP sensor api r = requests.get(url, stream=True) # Check connection status print(f'\nRoom {room_name}: Connection is good!') if r else print('\nAn error has occurred.') with open('INAP_chunks.txt', 'wb') as txt: for chunk in r.iter_content(chunk_size=1024): # writing one chunk at a time to txt file if chunk: txt.write(chunk) with open('INAP_chunks.txt', 'r') as f: f_contents = f.readlines() # Convert api response to DataFrame object lst = json.loads(f_contents[0]) data = lst[0]['measurements'] if lst[0]['id'] != ids: print('Sensor id in response do not match') df = pd.DataFrame(data) df.drop(labels=['interupt', 'rgbw', 'score', 'sound', 'type'], axis=1, inplace=True) df['timeStamp'] = pd.to_datetime(df['timeStamp'], format='%Y-%m-%d %H:%M:%S') df.set_index('timeStamp', inplace=True) df.columns = [f'{room_name}_INAP_{col}' for col in df.columns] df.index.rename('Timestamp', inplace=True) if 'df' in locals(): os.remove('INAP_chunks.txt') else: print('DataFrame is not created') print(f'Room {room_name}: Data from {start} to {end} is downloaded.') return df
2fe5af49654a425b5be8bcf4cb5dfc2403a9ae32
3,633,671
async def infer_type_make_record(engine, _cls: dtype.TypeType, *elems): """Infer the return type of make_record.""" cls = _cls.values[VALUE] if cls is ANYTHING: raise MyiaTypeError('Expected a class to inst') expected = list(cls.attributes.items()) if len(expected) != len(elems): raise MyiaTypeError('Wrong class inst') for (name, t), elem in zip(expected, elems): if not (await issubtype(elem, t)): raise MyiaTypeError('Wrong class inst') return AbstractClass( cls.tag, { name: elem for (name, _), elem in zip(expected, elems) }, cls.methods )
8389217a1fee854b73d6189cf811a26a7613680a
3,633,672
from functools import reduce import operator def concatenate(trajectories): """Return the concatenation of a sequence of trajectories. Parameters ---------- trajectories : sequence of sequences A sequence of trajectories. Returns ------- sequence The concatenation of `trajectories`. """ if isinstance(trajectories[0], np.ndarray): return np.concatenate(trajectories) if mdtraj and isinstance(trajectories[0], mdtraj.Trajectory): return trajectories[0].join(trajectories[1:], check_topology=False) return reduce(operator.concat, trajectories)
dee2e64b579d07adea0842eb375c543453e9eede
3,633,673
def dlonlat_at_grid_center(ctr_lat, ctr_lon, dx=4.0e3, dy=4.0e3, x_bnd = (-100e3, 100e3), y_bnd = (-100e3, 100e3), proj_datum = 'WGS84', proj_ellipse = 'WGS84'): """ Utility function useful for producing a regular grid of lat/lon data, where an approximate spacing (dx, dy) and total span of the grid (x_bnd, y_bnd) is desired. Units are in meters. There is guaranteed to be distortion away from the grid center, i.e., only the grid cells adjacent to the center location will have area dx * dy. Likewise, the lat, lon range is calculated naively using dlat, dlon multiplied by the number of grid cells implied by x_bnd/dx, y_bnd/dy. This is the naive approach, but probably what's expected when specifying distances in kilometers for an inherently distorted lat/lon grid. Returns: (dlon, dlat, lon_bnd, lat_bnd) corresponding to (dx, dy, x_range, y_range) """ # Use the Azimuthal equidistant projection as the method for converting to kilometers. proj_name = 'aeqd' mapProj = MapProjection(projection=proj_name, ctrLat=ctr_lat, ctrLon=ctr_lon, lat_ts=ctr_lat, lon_0=ctr_lon, lat_0=ctr_lat, lat_1=ctr_lat, ellipse=proj_ellipse, datum=proj_datum) geoProj = GeographicSystem() # Get dlat lon_n, lat_n, z_n = geoProj.fromECEF(*mapProj.toECEF(0,dy,0)) dlat = lat_n - ctr_lat # Get dlon lon_e, lat_e, z_e = geoProj.fromECEF(*mapProj.toECEF(dx,0,0)) dlon = lon_e - ctr_lon lon_min = ctr_lon + dlon * (x_bnd[0]/dx) lon_max = ctr_lon + dlon * (x_bnd[1]/dx) lat_min = ctr_lat + dlat * (y_bnd[0]/dy) lat_max = ctr_lat + dlat * (y_bnd[1]/dy) # Alternate method: lat lon for the actual distance to the NSEW in the projection #lon_range_n, lat_range_n, z_range_n = geoProj.fromECEF(*mapProj.toECEF(0,y_bnd,0)) #lon_range_e, lat_range_e, z_range_e = geoProj.fromECEF(*mapProj.toECEF(x_bnd,0,0)) return dlon, dlat, (lon_min, lon_max), (lat_min, lat_max)
a64fefbad5593e33af4dde3fb362aec54c0d6225
3,633,674
def attribute_test_service(request): """ Displays a list of all :model:`rr.Attribute` and values found from environment variables. **Context** ``object_list`` List of dictionaries containing attribute values and metadata. ``logout_url`` Logout URL. **Template:** :template:`attribute_test_service.html` """ if not hasattr(settings, 'ATTRIBUTE_TEST_SERVICE') or not settings.ATTRIBUTE_TEST_SERVICE: raise Http404(_("Attribute test service has been disabled")) attributes = Attribute.objects.filter(test_service=True).order_by('friendlyname') object_list = [] for attribute in attributes: value = request.META.get(attribute.shib_env, '').encode('latin1').decode('utf-8', 'ignore') regex = attribute.regex_test icon = _check_status(attribute, value, regex) if attribute.public_saml or value: object_list.append({'friendlyname': attribute.friendlyname, 'name': attribute.name, 'value': value.replace(";", "<br>"), 'regex': regex, 'icon': icon}) if hasattr(settings, 'ATTRIBUTE_TEST_SERVICE_LOGOUT_URL'): logout_url = settings.ATTRIBUTE_TEST_SERVICE_LOGOUT_URL else: logout_url = None return render(request, "attribute_test_service.html", { 'object_list': object_list, 'logout_url': logout_url, 'shib_auth_context': request.META.get('Shib-AuthnContext-Class', ''), 'shib_auth_method': request.META.get('Shib-Authentication-Method', '') })
79e7e4b861e74ab6739ccd3c2ea226d01d4bbe02
3,633,675
import re import threading import uuid from datetime import datetime import json def create_foundation_entity_instance(entity): """Create an instance of a Foundation SDK Entity""" # Get an SDK class and use the configuration generation behaviour to pass in parameters sdk_instance = SDK(**(request.get_json() or {})) try: # Entity names are PascalCase, SDK entity methods are snake case. method_name = re.sub('(?<!^)(?=[A-Z])', '_', entity).lower() entity_class = getattr(sdk_instance.foundation, method_name) except AttributeError: raise NotFound("Entity '%s' which was reformatted to '%s' cannot be found." % (entity, method_name)) instance = LockedInstance( lock=threading.Lock(), instance=entity_class(), module=None, entity=entity, uuid=str(uuid.uuid4().hex), created_at=datetime.datetime.utcnow(), ) STORE[instance.uuid] = instance response = app.response_class( response=json.dumps(serialise_instance(instance)), status=201, mimetype='application/json' ) return response
78d3bfcf4baa922213feedde1420112a9db7bf04
3,633,676
def guided_alignment_cost( attention_probs, gold_alignment, sequence_length=None, cost_type="ce", weight=1 ): """Computes the guided alignment cost. Args: attention_probs: The attention probabilities, a float ``tf.Tensor`` of shape :math:`[B, T_t, T_s]`. gold_alignment: The true alignment matrix, a float ``tf.Tensor`` of shape :math:`[B, T_t, T_s]`. sequence_length: The length of each sequence. cost_type: The type of the cost function to compute (can be: ce, mse). weight: The weight applied to the cost. Returns: The guided alignment cost. Raises: ValueError: if :obj:`cost_type` is invalid. """ if cost_type == "ce": loss = tf.keras.losses.CategoricalCrossentropy( reduction=tf.keras.losses.Reduction.SUM ) elif cost_type == "mse": loss = tf.keras.losses.MeanSquaredError(reduction=tf.keras.losses.Reduction.SUM) else: raise ValueError("invalid guided alignment cost: %s" % cost_type) if sequence_length is not None: sample_weight = tf.sequence_mask( sequence_length, maxlen=tf.shape(attention_probs)[1], dtype=attention_probs.dtype, ) sample_weight = tf.expand_dims(sample_weight, -1) normalizer = tf.reduce_sum(sequence_length) else: sample_weight = None normalizer = tf.size(attention_probs) cost = loss(gold_alignment, attention_probs, sample_weight=sample_weight) cost /= tf.cast(normalizer, cost.dtype) return weight * cost
b1acea456d0f17ff3e0917a071cc84d69b7893ad
3,633,677
def _log_object_event( ctx, dbOperationsEvent=None, event_status_id=None, dbAcmeAccount=None, dbAcmeAccountKey=None, dbAcmeDnsServer=None, dbAcmeOrder=None, dbCertificateCA=None, dbCertificateCAChain=None, dbCertificateRequest=None, dbCoverageAssuranceEvent=None, dbDomain=None, dbPrivateKey=None, dbQueueCertificate=None, dbQueueDomain=None, dbCertificateSigned=None, dbUniqueFQDNSet=None, ): """additional logging for objects""" dbOperationsObjectEvent = model_objects.OperationsObjectEvent() dbOperationsObjectEvent.operations_event_id = dbOperationsEvent.id dbOperationsObjectEvent.operations_object_event_status_id = event_status_id if dbAcmeAccount: dbOperationsObjectEvent.acme_account_id = dbAcmeAccount.id elif dbAcmeAccountKey: dbOperationsObjectEvent.acme_account_key_id = dbAcmeAccountKey.id elif dbAcmeOrder: dbOperationsObjectEvent.acme_order_id = dbAcmeOrder.id elif dbAcmeDnsServer: dbOperationsObjectEvent.acme_dns_server_id = dbAcmeDnsServer.id elif dbCertificateCA: dbOperationsObjectEvent.certificate_ca_id = dbCertificateCA.id elif dbCertificateCAChain: dbOperationsObjectEvent.certificate_ca_chain_id = dbCertificateCAChain.id elif dbCertificateRequest: dbOperationsObjectEvent.certificate_request_id = dbCertificateRequest.id elif dbCoverageAssuranceEvent: dbOperationsObjectEvent.coverage_assurance_event_id = ( dbCoverageAssuranceEvent.id ) elif dbDomain: dbOperationsObjectEvent.domain_id = dbDomain.id elif dbPrivateKey: dbOperationsObjectEvent.private_key_id = dbPrivateKey.id elif dbQueueCertificate: dbOperationsObjectEvent.queue_certificate_id = dbQueueCertificate.id elif dbQueueDomain: dbOperationsObjectEvent.queue_domain_id = dbQueueDomain.id elif dbCertificateSigned: dbOperationsObjectEvent.certificate_signed_id = dbCertificateSigned.id elif dbUniqueFQDNSet: dbOperationsObjectEvent.unique_fqdn_set_id = dbUniqueFQDNSet.id ctx.dbSession.add(dbOperationsObjectEvent) ctx.dbSession.flush(objects=[dbOperationsObjectEvent]) return dbOperationsObjectEvent
91e395e6dd9c512b93531c292056c2e324084622
3,633,678
from matplotlib.ticker import FuncFormatter import matplotlib import os def catlogmatch_plot(catalog_mt, dd=0.2, dir_fig='.', figformat='png', fnametag=None): """ To plot the pie figure after comparing two catalogs. Parameters ---------- catalog_mt : dic a comparison catalog, for detail see 'utils_dataprocess.catalog_match'. catalog_mt['status'] contains the comparison results. dd : float, optional distance in km for generating distance bins for bar plot. default is 0.2 km. dir_fig : str, optional dirctory for saving fiugre. The default is '.'. figformat : str, optional output figure format. The default is 'png'. fnametag : str, optional figure name tage. Returns ------- TYPE DESCRIPTION. """ # plot pie chart----------------------------------------------------------- N_matched = sum(catalog_mt['status'] == 'matched') # total number of matched events N_new = sum(catalog_mt['status'] == 'new') # total number of new events N_undetected = sum(catalog_mt['status'] == 'undetected') # total number of undetected/missed events def func(pct, allvals): absolute = int(round(pct/100.*np.sum(allvals))) return "{:.1f}%\n{:d}".format(pct, absolute) #fig = plt.figure(dpi=600, figsize=(8,4)) fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 4), subplot_kw={'aspect': 1}) # plot the first #ax.set_position([0.125, 0.125, 0.48, 0.88]) labels = ['Matched', 'New'] sizes = [N_matched, N_new] explode = (0.04, 0.04) # whether "explode" any slice colors = ['#66b3ff', '#99ff99'] ax1.pie(sizes, explode=explode, labels=labels, colors=colors, autopct=lambda pct: func(pct, sizes), pctdistance=0.8, shadow=False, startangle=90) # draw circle centre_circle = plt.Circle((0,0),0.6,fc='white') ax1.set_title('New catalog', fontsize=13, fontweight='bold') ax1.axis('equal') ax1.text(-0.5, -1.3, 'Total events: {}'.format(N_matched+N_new)) ax1.add_artist(centre_circle) # plot the second # ax2 = fig.add_subplot(1,2,2) #ax2.set_position([0.52, 0.125, 0.875, 0.88]) labels = ['Matched', 'Undetected'] sizes = [N_matched, N_undetected] explode = (0.01, 0.03) # whether "explode" any slice colors = ['#66b3ff', '#ff9999'] ax2.pie(sizes, explode=explode, labels=labels, colors=colors, autopct=lambda pct: func(pct, sizes), pctdistance=0.82, shadow=False, startangle=60) # draw circle centre_circle = plt.Circle((0,0),0.6,fc='white') ax2.set_title('Reference catalog', fontsize=13, fontweight='bold') ax2.axis('equal') ax2.text(-0.5, -1.3, 'Total events: {}'.format(N_matched+N_undetected)) ax2.add_artist(centre_circle) # output figure if fnametag is None: fname = os.path.join(dir_fig, 'catalog_compare_statistical_pie.'+figformat) else: fname = os.path.join(dir_fig, 'catalog_compare_statistical_pie_'+fnametag+'.'+figformat) fig.savefig(fname, dpi=600, bbox_inches='tight') plt.cla() fig.clear() plt.close(fig) #========================================================================== # plot horizontal distance barplot----------------------------------------- evhdistkm = catalog_mt['hdist_km'][catalog_mt['hdist_km'] != None] # distance in km bins = np.arange(0, evhdistkm.max()+dd, dd) # the edges of bins for accumulated plot fig = plt.figure(figsize=(8,6), dpi=600) ax1 = fig.add_subplot(111) ax1.hist(evhdistkm, bins, rwidth=1.0, color='black', histtype='step', linewidth=1.6, cumulative=True, density=True) ax1.set_xlabel('Distance (km)', color='k', fontsize=14) ax1.set_ylabel('Event percentage', color='k', fontsize=14) def to_percent(y, position): # Ignore the passed in position. This has the effect of scaling the default # tick locations. s = str(100 * y) # The percent symbol needs escaping in latex if matplotlib.rcParams['text.usetex'] is True: return s + r'$\%$' else: return s + '%' # Create the formatter using the function to_percent. This multiplies all the # default labels by 100, making them all percentages formatter = FuncFormatter(to_percent) # Set the formatter ax1.yaxis.set_major_formatter(formatter) # remove the vertical line at the end def fix_hist_step_vertical_line_at_end(ax): axpolygons = [poly for poly in ax.get_children() if isinstance(poly, matplotlib.patches.Polygon)] for poly in axpolygons: poly.set_xy(poly.get_xy()[:-1]) fix_hist_step_vertical_line_at_end(ax1) ax1.xaxis.set_major_locator(ticker.MultipleLocator(10*dd)) # forced the horizontal major ticks to appear by steps of '10dd' units ax1.xaxis.set_minor_locator(ticker.MultipleLocator(dd)) # forced the horizontal minor ticks to appear by steps of 'dd' units if fnametag is None: fname = os.path.join(dir_fig, 'catalog_compare_hdist_bar.'+figformat) else: fname = os.path.join(dir_fig, 'catalog_compare_hdist_bar_'+fnametag+'.'+figformat) fig.savefig(fname, dpi=600, bbox_inches='tight') plt.cla() fig.clear() plt.close(fig) #========================================================================== return
a7c6b168d3f42657c8a2bb8fbc53c940472dbfd6
3,633,679
def dismiss_message_url(course): """ Returns the URL for the dismiss message endpoint. """ return reverse( 'openedx.course_experience.dismiss_welcome_message', kwargs={ 'course_id': str(course.id), } )
aeabf651b2576f280634d7562d31e52fb7e0748f
3,633,680
def cllr(lrs, y, weights=(1, 1)): """ Calculates a log likelihood ratio cost (C_llr) for a series of likelihood ratios. Nico Brümmer and Johan du Preez, Application-independent evaluation of speaker detection, In: Computer Speech and Language 20(2-3), 2006. Parameters ---------- lrs : a numpy array of LRs y : a numpy array of labels (0 or 1) Returns ------- cllr the log likelihood ratio cost """ # ignore errors: # divide -> ignore divide by zero # over -> ignore scalar overflow with np.errstate(divide='ignore', over='ignore'): lrs0, lrs1 = Xy_to_Xn(lrs, y) cllr0 = weights[0] * np.mean(np.log2(1 + lrs0)) if weights[0] > 0 else 0 cllr1 = weights[1] * np.mean(np.log2(1 + 1/lrs1)) if weights[1] > 0 else 0 return (cllr0 + cllr1) / sum(weights)
31b7e022de94aec36efd570318e930d665349169
3,633,681
import torch def heatmaps_to_keypoints(maps: torch.Tensor, rois: torch.Tensor) -> torch.Tensor: """ Extract predicted keypoint locations from heatmaps. Args: maps (Tensor): (#ROIs, #keypoints, POOL_H, POOL_W). The predicted heatmap of logits for each ROI and each keypoint. rois (Tensor): (#ROIs, 4). The box of each ROI. Returns: Tensor of shape (#ROIs, #keypoints, 4) with the last dimension corresponding to (x, y, logit, score) for each keypoint. When converting discrete pixel indices in an NxN image to a continuous keypoint coordinate, we maintain consistency with :meth:`Keypoints.to_heatmap` by using the conversion from Heckbert 1990: c = d + 0.5, where d is a discrete coordinate and c is a continuous coordinate. """ # The decorator use of torch.no_grad() was not supported by torchscript. # https://github.com/pytorch/pytorch/pull/41371 maps = maps.detach() rois = rois.detach() offset_x = rois[:, 0] offset_y = rois[:, 1] widths = (rois[:, 2] - rois[:, 0]).clamp(min=1) heights = (rois[:, 3] - rois[:, 1]).clamp(min=1) widths_ceil = widths.ceil() heights_ceil = heights.ceil() num_rois, num_keypoints = maps.shape[:2] xy_preds = maps.new_zeros(rois.shape[0], num_keypoints, 4) width_corrections = widths / widths_ceil height_corrections = heights / heights_ceil keypoints_idx = torch.arange(num_keypoints, device=maps.device) for i in range(num_rois): outsize = (int(heights_ceil[i]), int(widths_ceil[i])) roi_map = F.interpolate( maps[[i]], size=outsize, mode="bicubic", align_corners=False ).squeeze( 0 ) # #keypoints x H x W # softmax over the spatial region max_score, _ = roi_map.view(num_keypoints, -1).max(1) max_score = max_score.view(num_keypoints, 1, 1) tmp_full_resolution = (roi_map - max_score).exp_() tmp_pool_resolution = (maps[i] - max_score).exp_() # Produce scores over the region H x W, but normalize with POOL_H x POOL_W, # so that the scores of objects of different absolute sizes will be more comparable roi_map_scores = tmp_full_resolution / tmp_pool_resolution.sum((1, 2), keepdim=True) w = roi_map.shape[2] pos = roi_map.view(num_keypoints, -1).argmax(1) x_int = pos % w y_int = (pos - x_int) // w assert ( roi_map_scores[keypoints_idx, y_int, x_int] == roi_map_scores.view(num_keypoints, -1).max(1)[0] ).all() x = (x_int.float() + 0.5) * width_corrections[i] y = (y_int.float() + 0.5) * height_corrections[i] xy_preds[i, :, 0] = x + offset_x[i] xy_preds[i, :, 1] = y + offset_y[i] xy_preds[i, :, 2] = roi_map[keypoints_idx, y_int, x_int] xy_preds[i, :, 3] = roi_map_scores[keypoints_idx, y_int, x_int] return xy_preds
1755ebd45ab741ef267b17f1d24a658143e5f6c5
3,633,682
import os from sys import flags def get_credentials(): """Gets valid user credentials from storage. If nothing has been stored, or if the stored credentials are invalid, the OAuth2 flow is completed to obtain the new credentials. Returns: Credentials, the obtained credential. """ home_dir = os.path.expanduser('~') credential_dir = os.path.join(home_dir, '.credentials') if not os.path.exists(credential_dir): os.makedirs(credential_dir) credential_path = os.path.join(credential_dir, 'todoist_gcal_sync.json') store = Storage(credential_path) credentials = store.get() if not credentials or credentials.invalid: flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES) flow.user_agent = APPLICATION_NAME if flags: credentials = tools.run_flow(flow, store, flags) log.info('Storing credentials to ' + credential_path) return credentials
c83dcef79c93f651e00c6e3d649dbfc20d1f02a0
3,633,683
import sys import re import json import time def logs(args): """View service logs""" stacks = StackCache.load() try: config = stacks[args.name] except KeyError: sys.stderr.write("Unknown stack '{}'. Available stacks are: {}\n".format(args.name, ', '.join(stacks.keys()))) return 1 if not args.stderr or args.stdout: args.stderr = True args.stdout = True if args.service is None: names = sorted(config['services'].keys()) raise ValueError("I need a single service at the moment") else: if not args.service in config['services']: sys.stderr.write("Stack '{}' has no service '{}'. Available services are: {}\n".format(args.name, args.service, ', '.join(config['services'].keys()))) return 1 names = [args.service] args.exclude = [re.compile(p) for p in args.exclude] args.include = [re.compile(p) for p in args.include] template = "{{:23s}} {{:{}s}}:{{}} {{:7s}} \u23b8 ".format(max(map(len, names))) for line in _get_log_lines(args): try: payload = json.loads(line.decode('utf-8')) except json.decoder.JSONDecodeError: continue if args.since is not None and payload['timestamp'] < args.since: continue if args.until is not None and payload['timestamp'] > args.until: continue if (not args.stderr and payload['source'] == 'stderr') or (not args.stdout and payload['source'] == 'stdout'): continue if (args.replica is not None and payload.get('replica', None) != args.replica): continue if any(p.match(payload['msg']) for p in args.exclude) or not all(p.match(payload['msg']) for p in args.include): continue ts = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(payload['timestamp'])) + "{:.3f}".format(payload['timestamp'] % 1)[1:] try: sys.stdout.write(template.format(ts, args.service, payload.get('replica', 0), payload['source'])) print(payload['msg']) except BrokenPipeError: break
d9397f836831cad45ab8d618aada731270ea7d03
3,633,684
def add_user_input_to_scene(scene, user_input, keep_space_around_bodies=True): """Converts user input to objects in the scene. Args: scene: scene_if.Scene. user_input: scene_if.UserInput or a triple (points, rectangulars, balls). keep_space_around_bodies: bool, if True extra empty space will be enforced around scene bodies. Returns: task_simulation: task_if.TaskSimulation. """ if not isinstance(user_input, scene_if.UserInput): user_input = build_user_input(*user_input) return deserialize( scene_if.Scene(), simulator_bindings.add_user_input_to_scene(serialize(scene), serialize(user_input), keep_space_around_bodies))
4020253049b3615d81d2cbca829433c211c3f9c1
3,633,685
def language_add(): """Return the page to add a language.""" is_user_logged_in = True if 'username' in login_session else False context = {'is_user_logged_in': is_user_logged_in} if request.method == 'POST': if not is_user_logged_in: flash('You have to log in to add a language.') return render_template('language_add.html', **context) new_language = Language(name=request.form['name'].lower(), user_id=login_session['user_id']) session.add(new_language) session.commit() flash("New Language Created!") return redirect(url_for('vocabulary')) else: if not is_user_logged_in: flash('You have to log in to add a language.') return render_template('language_add.html', **context)
8098ce3c3e6e8f8930790a636405ae69ce95cdd8
3,633,686
def pyramid_sum(lower, upper, margin = 0): """Returns the sum of the numbers from lower to upper, and outputs a trace of the arguments and return values on each call.""" blanks = " " * margin print(blanks, lower, upper) # Print the arguments if lower > upper: print(blanks, 0) # Print the returned value return 0 else: result = lower + pyramid_sum(lower + 1, upper, margin + 4) print(blanks, result) # Print the returned value return result
751facb309f362c35257aab2b239a37b39a98a04
3,633,687
def grad_qform_1_ZV(a,f_vals,X_grad,ind,n,alpha): """ Gradient for quadratic form in ZV-1 method """ Y = f_vals[:,ind] + X_grad @ a return 2./(n-1) * (X_grad*(Y - np.mean(Y)).reshape((n,1))).sum(axis=0) + 2*alpha*a
aab2ac9cc79a4cf43e4391872ebf905e849ba7d9
3,633,688
def get_plot_extent(df, grid_stepsize=None, grid=False) -> tuple: """ Gets the plot_extent from the values. Uses range of values and adds a padding fraction as specified in globals.map_pad Parameters ---------- grid : bool whether the values in df is on a equally spaced grid (for use in mapplot) df : pandas.DataFrame Plot values. Returns ------- extent : tuple | list (x_min, x_max, y_min, y_max) in Data coordinates. """ lat, lon, gpi = globals.index_names if grid and grid_stepsize in ['nan', None]: # todo: problem if only single lon/lat point is present? x_min, x_max, dx, len_x = _get_grid(df.index.get_level_values(lon)) y_min, y_max, dy, len_y = _get_grid(df.index.get_level_values(lat)) extent = [x_min-dx/2., x_max+dx/2., y_min-dx/2., y_max+dx/2.] elif grid and grid_stepsize: x_min, x_max, dx, len_x = _get_grid_for_irregulars(df.index.get_level_values(lon), grid_stepsize) y_min, y_max, dy, len_y = _get_grid_for_irregulars(df.index.get_level_values(lat), grid_stepsize) extent = [x_min - dx / 2., x_max + dx / 2., y_min - dx / 2., y_max + dx / 2.] else: extent = [df.index.get_level_values(lon).min(), df.index.get_level_values(lon).max(), df.index.get_level_values(lat).min(), df.index.get_level_values(lat).max()] dx = extent[1] - extent[0] dy = extent[3] - extent[2] # set map-padding around values to be globals.map_pad percent of the smaller dimension padding = min(dx, dy) * globals.map_pad / (1 + globals.map_pad) extent[0] -= padding extent[1] += padding extent[2] -= padding extent[3] += padding if extent[0] < -180: extent[0] = -180 if extent[1] > 180: extent[1] = 180 if extent[2] < -90: extent[2] = -90 if extent[3] > 90: extent[3] = 90 return extent
9acc348ed56f09d78589e7b9fa1a72c558ea51b0
3,633,689
def is_learner(user, program): """ Returns true if user is a learner Args: user (django.contrib.auth.models.User): A user program (courses.models.Program): Program object """ return ( not Role.objects.filter(user=user, role__in=Role.NON_LEARNERS, program=program).exists() )
4773a6ebcc2b892a5306614a1bde699d0e17f191
3,633,690
import warnings import math def calculate_rupture_rates( nhm_df: pd.DataFrame, rup_name: str = "rupture_name", annual_rec_prob_name: str = "annual_rec_prob", mag_name: str = "mag_name", ) -> pd.DataFrame: """Takes in a list of background ruptures and calculates the rupture rates for the given magnitudes The rupture rate calculation is based on the Gutenberg-Richter equation from OpenSHA. It discretises the recurrance rate per magnitude instead of storing the probability of rupture exceeding a certain magnitude https://en.wikipedia.org/wiki/Gutenberg%E2%80%93Richter_law https://github.com/opensha/opensha-core/blob/master/src/org/opensha/sha/magdist/GutenbergRichterMagFreqDist.java Also includes the rupture magnitudes """ data = np.ndarray( sum(nhm_df.n_mags), dtype=[ (rup_name, str, 64), (annual_rec_prob_name, np.float64), (mag_name, np.float64), ], ) # Make an array of fault bounds so the ith faults has # the ruptures indexes[i]-indexes[i+1]-1 (inclusive) indexes = np.cumsum(nhm_df.n_mags.values) indexes = np.insert(indexes, 0, 0) index_mask = np.zeros(len(data), dtype=bool) warnings.filterwarnings( "ignore", message="invalid value encountered in true_divide" ) for i, line in nhm_df.iterrows(): index_mask[indexes[i] : indexes[i + 1]] = True # Generate the magnitudes for each rupture sample_mags = np.linspace(line.M_min, line.M_cutoff, line.n_mags) for ii, iii in enumerate(range(indexes[i], indexes[i + 1])): data[rup_name][iii] = create_ds_rupture_name( line.source_lat, line.source_lon, line.source_depth, sample_mags[ii], line.tect_type, ) # Calculate the cumulative rupture rate for each rupture baseline = ( line.b * math.log(10, 2.72) / (1 - 10 ** (-1 * line.b * (line.M_cutoff - line.M_min))) ) f_m_mag = np.power(10, (-1 * line.b * (sample_mags - line.M_min))) * baseline f_m_mag = np.append(f_m_mag, 0) rup_prob = (f_m_mag[:-1] + f_m_mag[1:]) / 2 * 0.1 total_cumulative_rate = rup_prob * line.totCumRate # normalise total_cumulative_rate = ( line.totCumRate * total_cumulative_rate / np.sum(total_cumulative_rate) ) data[mag_name][index_mask] = sample_mags data[annual_rec_prob_name][index_mask] = total_cumulative_rate index_mask[indexes[i] : indexes[i + 1]] = False background_values = pd.DataFrame(data=data) background_values.fillna(0, inplace=True) return background_values
a58e656980454de2f53fb1db2f5b0ec37fec9334
3,633,691
def checkScriptParses(scriptVersion, script): """ checkScriptParses returns None when the script parses without error. Args: scriptVersion (int): The script version. script (ByteArray): The script. Returns: None or Exception: None on success. Exception is returned, not raised. """ tokenizer = ScriptTokenizer(scriptVersion, script) while tokenizer.next(): pass return tokenizer.err
da49e2ca94fe38ef93e92eac27acc4eafd02f3e9
3,633,692
import unicodedata def normalize_caseless(text): """Normalize a string as lowercase unicode KD form. The normal form KD (NFKD) will apply the compatibility decomposition, i.e. replace all compatibility characters with their equivalents. """ return unicodedata.normalize("NFKD", text.casefold())
c26f8470ea6312cce7a97930999d489ee30eb692
3,633,693
def select_device_mirrored(device, structured): """Specialize a nest of regular & mirrored values for one device.""" def _get_mirrored(x): if isinstance(x, DistributedValues): if not isinstance(x, Mirrored): raise TypeError( "Expected value to be mirrored across replicas: %s in %s." % (x, structured)) return x.get(device) else: return x return nest.map_structure(_get_mirrored, structured)
5b69c9464e1d8a4597f5661d85c056e90deb70bf
3,633,694
from typing import Dict from typing import Any from typing import Counter def default_base_builder(individual: "Individual", frame: Frame, **kwargs) -> Dict[str, Any]: """Get base stats of the frame""" v = dict() # nodes nodes = individual.nodes(frame_selector=frame, data=True) v['nodes'] = list(nodes.keys()) v['macro_counter'] = Counter([d['macro'] for d in nodes.values()]) # sub sections v['sub_sections'] = [s.section.name for s in individual.frame_tree.get_sub_frames(frame)] # frame info v['current_frame'] = frame v['distance_from_root'] = individual.frame_tree.get_distance_from_root(frame) return v
de0c3ea0120bef893d29e6917ec6cc0e19e451b9
3,633,695
def calculate_maximum_potential_edge_counts(channel_composition, N, max_ble_span): """Computes the maximum number of possible occurrences per potential edge type. Parameters ---------- channel_composition : Dict[str, int] Channel composition description. N : int Number of BLEs in the cluster. max_ble_span : int Maximum BLE span in the pattern. Returns ------- Dict[str, int] Maximum number of occurrences of each edge type. """ back_dir = {'L' : 'R', 'R' : 'L', 'U' : 'D', 'D' : 'U'} counts = {} for src_ble in range(0, N): for sink_ble in range(max(0, src_ble - max_ble_span),\ min(N - 1, src_ble + max_ble_span) + 1): for w_src in channel_composition: src_dirs = ('L', 'R') if w_src[0] == 'V': src_dirs = ('U', 'D') for src_dir in src_dirs: for w_sink in channel_composition: sink_dirs = ('L', 'R') if w_sink[0] == 'V': sink_dirs = ('U', 'D') for sink_dir in sink_dirs: if sink_dir == back_dir[src_dir]: continue inc = channel_composition[w_src] * channel_composition[w_sink] try: counts[(w_src, w_sink)] += inc except: counts.update({(w_src, w_sink) : inc}) e_str = lambda e : "potential_edge__%s%s__%s%s"\ % (e[0], "_tap_0" if e[0][0] == 'V' else '',\ e[1], "_tap_0" if e[1][0] == 'V' else '') return {e_str(e) : counts[e] for e in counts}
55f891631bd109066735e9997cbb3dc35de8d21a
3,633,696
def generic_document_type_formatter(view, context, model, name): """Return AdminLog.document field wrapped in URL to its list view.""" _document_model = model.get('document').document_type url = _document_model.get_admin_list_url() return Markup('<a href="%s">%s</a>' % (url, _document_model.__name__))
c00934e8778c5232092427e2b507785ef429570d
3,633,697
def string_to_bool(value): """ boolean string to boolean converter """ if value == "true": return True else: return False
0796b21c98d09592d8d3a6ae1dfc5b98564aec7f
3,633,698
def g_logv(s): """read a logical variable :param str s: :return bool: """ return s == '1' or s.lower() == 'yes' or s.lower() == 'true'
e9984eced79cccc09a465b07bfac5185db72a604
3,633,699