content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def wkt_to_proj4(wkt): """Converts a well-known text string to a pyproj.Proj object""" srs = osgeo.osr.SpatialReference() srs.ImportFromWkt(wkt) return pyproj.Proj(str(srs.ExportToProj4()))
17796040f4bac614d520591a5b41396cfca5a514
3,627,100
def runtime_expand1(bindings, filename, tree): """Macro-expand an AST value `tree` at run time, once. Run-time part of `expand1r`. `bindings` and `filename` are as in `mcpyrate.core.BaseMacroExpander`. Convenient for experimenting with quoted code in the REPL. """ expander = MacroExpander(bindings, filename) return expander.visit_once(tree).body
f1d22f6e4dd494d6febdd2088fa9a70b05e534b4
3,627,101
def elasticnet(exprDF, lMirUser = None, lGeneUser = None, n_core = 2): """ Function to calculate the ElasticNet correlation coefficient of each pair of miRNA-mRNA, return a matrix of correlation coefficients with columns are miRNAs and rows are mRNAs. Args: exprDF df Concat Dataframe rows are samples and cols are gene/mirs Returns: Cordf df A matrix that includes the lasso correlation coefficients. Columns are genes, rows are miRNA. """ pandarallel.initialize(verbose=1, nb_workers=n_core) lMir, lGene = header_list(exprDF=exprDF) if lGeneUser is not None: lGene = intersection(lGene,lGeneUser) if lMirUser is not None: lMir = intersection(lMir,lMirUser) Cordf = exprDF[lGene].parallel_apply(lambda gene: \ CoefElasticNetCV(exprDF[lMir],gene, n_core=n_core)) Cordf = Cordf.apply(lambda col: col.dropna()) return Cordf
8255b549743c65777574e6847b449f0badb121bf
3,627,102
def get_batch_unpack(args): # arguments dictionary """ Pass through function for unpacking get_batch arguments. Args: args: Arguments dictionary Returns: Return value of get_batch. """ # unpack args values and call get_batch return get_batch(tensors=args['tensors'], batch_size=args['batch_size'], i=args['i'], indices=args['indices'], return_malicious=args['return_malicious'], return_counts=args['return_counts'], return_tags=args['return_tags'])
2a47792afcf95575d1b70138faffbb55eca1a622
3,627,103
import six def simple_unlimited_args(one, two='hi', *args): """Expected simple_unlimited_args __doc__""" return "simple_unlimited_args - Expected result: %s" % (', '.join(six.text_type(arg) for arg in [one, two] + list(args)))
63cbd2d532cb638af8ff3964e1148e1bcabf632d
3,627,104
def _random_subset(seq, m, rng): """ Return m unique elements from seq. This differs from random.sample which can return repeated elements if seq holds repeated elements. Taken from networkx.generators.random_graphs """ targets = set() while len(targets) < m: x = rng.choice(seq) targets.add(x) return list(targets)
64a174d55a64b73eb55f3f795156733911a54802
3,627,105
def rnn_forward(x, nb_units, nb_layers, rnn_type, name, drop_rate=0., i=0, activation='tanh', return_sequences=False): """Multi-RNN layers. Parameters ---------- nb_units: int, the dimensionality of the output space for recurrent neural network. nb_layers: int, the number of the layers for recurrent neural network. rnn_type: str, type of recurrent neural network, including {'simple_rnn', 'gru', 'lstm'}. name: recurrent neural network name. drop_rate: float, the rate of Dropout for neural nets, default 0. return_sequences: bool, whether to return the last output. in the output sequence, or the full sequence. default False. """ RnnCell = {'lstm': layers.LSTM, 'gru': layers.GRU, 'simple_rnn': layers.SimpleRNN}[rnn_type] for i in range(nb_layers - 1): x = RnnCell(units=nb_units, activation=activation, return_sequences=True, name=f'{name}_{i}')(x) if drop_rate > 0. and drop_rate < 0.5: x = layers.Dropout(rate=drop_rate, name=f'{name}_{i}_dropout')(x) elif drop_rate >= 0.5: x = layers.BatchNormalization(name=f'{name}_{i}_norm')(x) x = RnnCell(units=nb_units, activation=activation, return_sequences=return_sequences, name=f'{name}_{i+1}')(x) return x
dd1e3b8f6c76bc67687219748bc98668c1edc046
3,627,106
def lambda_handler(*kwargs): """ Lambda handler for usercount :param event: Lambda event :param context: Lambda context """ print kwargs[0].get('account') function_name = kwargs[1].function_name account = kwargs[0].get('account') results = get_user_count(function_name, account) body = results if not account: response = { "statusCode": 200, "body": json.dumps(body) } else: response = body return response
1b1d2a00a98a00a0197cf59154d35baa9111b193
3,627,107
import sys def notebook_is_active() -> bool: """Return if script is executing in a IPython notebook (e.g. Jupyter notebook)""" for x in sys.modules: if x.lower() == 'ipykernel': return True return False
200962d831c75d636b310aafa0c8cc4e664e0b4a
3,627,108
def lstm_cond_layer(tparams, state_below, options, prefix='lstm', mask=None, init_memory=None, init_state=None, trng=None, use_noise=None, **kwargs): """ Computation graph for the conditional LSTM. """ nsteps = state_below.shape[0] n_samples = state_below.shape[1] n_annotations = state_below.shape[2] # mask if mask == None: mask = tensor.alloc(1., state_below.shape[0], 1) dim = tparams[_p(prefix, 'U')].shape[0] # initial/previous state if init_state == None: init_state = tensor.alloc(0., n_samples, dim) # initial/previous memory if init_memory == None: init_memory = tensor.alloc(0., n_samples, dim) def _slice(_x, n, dim): if _x.ndim == 3: return _x[:, :, n*dim:(n+1)*dim] return _x[:, n*dim:(n+1)*dim] def _step(m_, x_, h_, c_, a_, ct_, dp_=None, dp_att_=None): # mask, xt, ht-1, ct-1, alpha, ctx # attention pstate_ = tensor.dot(h_, tparams[_p(prefix,'Wd_att')]) # pstate_ pctx_ = tensor.dot(x_, tparams[_p(prefix,'Wc_att')]) + tparams[_p(prefix, 'b_att')] if options['n_layers_att'] > 1: for lidx in xrange(1, options['n_layers_att']): pctx_ = tensor.dot(pctx_, tparams[_p(prefix,'W_att_%d'%lidx)])+tparams[_p(prefix, 'b_att_%d'%lidx)] if lidx < options['n_layers_att'] - 1: pctx_ = tanh(pctx_) pctx_ = pctx_ + pstate_[:,None,:] pctx_list = [] pctx_list.append(pctx_) pctx_ = tanh(pctx_) alpha = tensor.dot(pctx_, tparams[_p(prefix,'U_att')])+tparams[_p(prefix, 'c_tt')] alpha_pre = alpha alpha_shp = alpha.shape alpha = tensor.nnet.softmax(options['temperature_inverse']*alpha.reshape([alpha_shp[0],alpha_shp[1]])) # softmax ctx_ = (x_ * alpha[:,:,None]).sum(1) # current context # print '\n\ncheck\n\n' if options['selector']: sel_ = tensor.nnet.sigmoid(tensor.dot(h_, tparams[_p(prefix, 'W_sel')])+tparams[_p(prefix,'b_sel')]) sel_ = sel_.reshape([sel_.shape[0]]) ctx_ = sel_[:,None] * ctx_ preact = tensor.dot(h_, tparams[_p(prefix, 'U')]) preact += tensor.dot(ctx_, tparams[_p(prefix, 'W')]) + tparams[_p(prefix, 'b')] i = _slice(preact, 0, dim) # z_it f = _slice(preact, 1, dim) # z_ft o = _slice(preact, 2, dim) # z_ot i = tensor.nnet.sigmoid(i) # it = sigmoid(z_it) f = tensor.nnet.sigmoid(f) # ft = sigmoid(z_ft) o = tensor.nnet.sigmoid(o) # ot = sigmoid(z_ot) c = tensor.tanh(_slice(preact, 3, dim)) # at = tanh(z_at) # compute the new memory/hidden state # if the mask is 0, just copy the previous state c = f * c_ + i * c # ct = ft * ct-1 + it * at c = m_[:,None] * c + (1. - m_)[:,None] * c_ h = o * tensor.tanh(c) # ht = ot * thanh(ct) h = m_[:,None] * h + (1. - m_)[:,None] * h_ rval = [h, c, alpha, ctx_] if options['selector']: rval += [sel_] rval += [pstate_, pctx_, i, f, o, preact, alpha_pre]+pctx_list # print '\n\ncheck\n\n' return rval if options['selector']: _step0 = lambda m_, x_, h_, c_, a_, ct_, sel_: _step(m_, x_, h_, c_, a_, ct_) else: _step0 = lambda m_, x_, h_, c_, a_, ct_: _step(m_, x_, h_, c_, a_, ct_) seqs = [mask, state_below] outputs_info = [init_state, init_memory, tensor.alloc(0., n_samples, n_annotations), tensor.alloc(0., n_samples, options['ctx_dim'])] if options['selector']: outputs_info += [tensor.alloc(0., n_samples)] outputs_info += [None, None, None, None, None, None, None] + [None]#*options['n_layers_att'] rval, updates = theano.scan(_step0, sequences=seqs, outputs_info=outputs_info, name=_p(prefix, '_layers'), n_steps=nsteps, profile=False) return rval
ddb443a6bdbe2f25a231f5df660036b682bc337f
3,627,109
from typing import Generator def get_frame_tree() -> Generator[dict, dict, FrameTree]: """Returns present frame tree structure. Returns ------- frameTree: FrameTree Present frame tree structure. """ response = yield {"method": "Page.getFrameTree", "params": {}} return FrameTree.from_json(response["frameTree"])
9a79281fbd6a9b469c8f7ef7746fe6a3e7b5156c
3,627,110
def lddmm_transform_points( points, deform_to="template", # lddmm_register output (lddmm_dict). affine_phi=None, phi_inv_affine_inv=None, template_resolution=1, target_resolution=1, **unused_kwargs, ): """ Apply the transform, or position_field, to an array of points to transform them between the template and target spaces, as determined by deform_to. Parameters ---------- points: np.ndarray The points in either the template space or the target space to be transformed into the other space, in physical units centered on the image. The last dimension of points must have length equal to the dimensionality of the template and target. deform_to: str, optional Either "template" or "target" indicating whether to transform points to the template space or the target space. By default "template". affine_phi: np.ndarray, optional The position field in the shape of the template for deforming points to the target space. By default None. phi_inv_affine_inv: np.ndarray, optional The position field in the shape of the target for deforming points to the template space. By default None. template_resolution: float, seq, optional The resolution of the template in each dimension, or just one scalar to indicate isotropy. By default 1. target_resolution: float, seq, optional The resolution of the target in each dimension, or just one scalar to indicate isotropy. By default 1. Returns ------- np.ndarray A copy of points transformed into the space determined by deform_to. Raises ------ TypeError Raised if deform_to is not of type str. ValueError Raised if deform_to is neither "template" nor "target". """ if not isinstance(deform_to, str): # Verify deform_to. raise TypeError(f"deform_to must be of type str.\n" f"type(deform_to): {type(deform_to)}.") elif deform_to not in ["template", "target"]: raise ValueError(f"deform_to must be either 'template' or 'target'.") # Define position_field and position_field_resolution. # Note: these are the reverse of what they are for lddmm_transform_image. if deform_to == "template": position_field = phi_inv_affine_inv position_field_resolution = np.copy(target_resolution) else: position_field = affine_phi position_field_resolution = np.copy(template_resolution) # Call _transform_points. transformed_points = _transform_points(points, position_field, position_field_resolution) return transformed_points
c63b032f25fa9b0bd50b7072fdb59d76ed68c170
3,627,111
def check_sanitization(mol): """ Given a rdkit.Chem.rdchem.Mol this script will sanitize the molecule. It will be done using a series of try/except statements so that if it fails it will return a None rather than causing the outer script to fail. Nitrogen Fixing step occurs here to correct for a common RDKit valence error in which Nitrogens with with 4 bonds have the wrong formal charge by setting it to -1. This can be a place to add additional correcting features for any discovered common sanitation failures. Handled here so there are no problems later. Inputs: :param rdkit.Chem.rdchem.Mol mol: an rdkit molecule to be sanitized Returns: :returns: rdkit.Chem.rdchem.Mol mol: A sanitized rdkit molecule or None if it failed. """ if mol is None: return None # easiest nearly everything should get through try: sanitize_string = Chem.SanitizeMol(mol, sanitizeOps = rdkit.Chem.rdmolops.SanitizeFlags.SANITIZE_ALL, catchErrors = True) except: return None if sanitize_string.name == "SANITIZE_NONE": return mol else: # try to fix the nitrogen (common problem that 4 bonded Nitrogens improperly lose their + charges) mol = Nitrogen_charge_adjustment(mol) Chem.SanitizeMol(mol, sanitizeOps = rdkit.Chem.rdmolops.SanitizeFlags.SANITIZE_ALL, catchErrors = True) sanitize_string = Chem.SanitizeMol(mol, sanitizeOps = rdkit.Chem.rdmolops.SanitizeFlags.SANITIZE_ALL, catchErrors = True) if sanitize_string.name == "SANITIZE_NONE": return mol # run a sanitation Filter 1 more time incase something slipped through # ie. if there are any forms of sanition which fail ie. KEKULIZE then return None sanitize_string = Chem.SanitizeMol(mol, sanitizeOps = rdkit.Chem.rdmolops.SanitizeFlags.SANITIZE_ALL, catchErrors = True) if sanitize_string.name != "SANITIZE_NONE": return None else: return mol
5014508cbde6ea1a89beeca106f0adeae1422817
3,627,112
import argparse def build_parser(): """Parser to grab and store command line arguments""" MINIMUM = 200000 SAVEPATH = "data/raw/" parser = argparse.ArgumentParser() parser.add_argument( "subreddit", help="Specify the subreddit to scrape from") parser.add_argument("-m", "--minimum", help="Specify the minimum number of data records to collect. For load data option, this means min number of new records to collect.", type=int, default=MINIMUM) parser.add_argument("-s", "--savepath", help="Save/load folder", type=str, default=SAVEPATH) parser.add_argument("-l", "--load", help="Load existing samples to continue scraping", action="store_true") parser.add_argument("-g", "--gui", help="Call this flag when running from Javascript GUI", action="store_true") return parser
d4f3eb484423416d3cb83ad64784747a8f453d98
3,627,113
from typing import Union import re def extract_msg(log: str, replica_name: str) -> Union[str, None]: """ Extracts a message from a single log Parameters ---------- log full log string replica_name identity name of replica Returns ------- msg message sent from primary to replica """ pattern = rf".*SENT-TO {replica_name}: (\d+-\d+-\d+-.*)" matches = re.findall(pattern, log) if len(matches) != 1: LOG.warn(f"Unknown log format: {log}") return None return matches[0]
da17f008af059d70cc4cc969bc03aa34ed846e0f
3,627,114
def tf_dmdm_fid(rho, sigma): """Trace fidelity between two density matrices.""" # TODO needs fixing rhosqrt = tf.linalg.sqrtm(rho) return tf.linalg.trace( tf.linalg.sqrtm(tf.matmul(tf.matmul(rhosqrt, sigma), rhosqrt)) )
057b01193412ee863cb431fd6b270492fd575125
3,627,115
def LF_report_is_short_demo(x): """ Checks if report is short. """ return NORMAL if len(x.text) < 280 else ABSTAIN
525fdbdf910c21d28824a4bf371dec069e9a7abb
3,627,116
from typing import Optional def binarize_swf( scores: SlidingWindowFeature, onset: float = 0.5, offset: float = 0.5, initial_state: Optional[bool] = None, ): """(Batch) hysteresis thresholding Parameters ---------- scores : SlidingWindowFeature (num_chunks, num_frames, num_classes)- or (num_frames, num_classes)-shaped scores. onset : float, optional Onset threshold offset : float, optional Offset threshold initial_state : np.ndarray or bool, optional Initial state. Returns ------- binarized : same as scores Binarized scores with same shape and type as scores. """ if scores.data.ndim == 2: num_frames, num_classes = scores.data.shape data = einops.rearrange(scores.data, "f k -> k f", f=num_frames, k=num_classes) binarized = binarize( data, onset=onset, offset=offset, initial_state=initial_state ) return SlidingWindowFeature( 1.0 * einops.rearrange(binarized, "k f -> f k", f=num_frames, k=num_classes), scores.sliding_window, ) elif scores.data.ndim == 3: num_chunks, num_frames, num_classes = scores.data.shape data = einops.rearrange( scores.data, "c f k -> (c k) f", c=num_chunks, f=num_frames, k=num_classes ) binarized = binarize( data, onset=onset, offset=offset, initial_state=initial_state ) return SlidingWindowFeature( 1.0 * einops.rearrange( binarized, "(c k) f -> c f k", c=num_chunks, f=num_frames, k=num_classes ), scores.sliding_window, ) else: raise ValueError( "Shape of scores must be (num_chunks, num_frames, num_classes) or (num_frames, num_classes)." )
3e578608501887943e0918b13fc6dcc10585685e
3,627,117
import sys def invlogit(x, eps=sys.float_info.epsilon): """The inverse of the logit function, 1 / (1 + exp(-x)).""" return (1.0 - 2.0 * eps) / (1.0 + tt.exp(-x)) + eps
bbdf200fa8e79d97aae4cd71e727eb75489e7242
3,627,118
import torch import time def train_pytorch_ch7(optimizer_fn, optimizer_hyperparams, features, labels, batch_size=10, num_epochs=2): """ The training function of chapter7, but this is the pytorch library version Parameters ---------- optimizer_fn : [function] the optimizer function that wants to use optimizer_hyperparams : [pair] hyperparams delivered to optimizer features : [tensor] batch of features labels : [tensor] batch of labels batch_size : [int], optional size of a batch, by default 10 num_epochs : [int], optional summary of number of epochs, by default 2 """ # init the net, using one linear layer to simulate the linear regression net = nn.Sequential( nn.Linear(features.shape[-1], 1) ) loss = nn.MSELoss() optimizer = optimizer_fn(net.parameters(), **optimizer_hyperparams) # get loss def eval_loss(): return loss(net(features).view(-1), labels).item()/2 # prepare data and strutures ls = [eval_loss()] data_iter = torch.utils.data.DataLoader( torch.utils.data.TensorDataset(features, labels), batch_size, shuffle=True) # for each epochs for _ in range(num_epochs): start = time.time() # for each batches for batch_i, (X, y) in enumerate(data_iter): # divided by 2 is used to make sure the loss is equal to train_ch7's l = loss(net(X).view(-1), y)/2 optimizer.zero_grad() l.backward() optimizer.step() # save current loss if(batch_i + 1) * batch_size % 100 == 0: ls.append(eval_loss()) # output results print('loss: %f, %f sec per epoch' % (ls[-1], time.time() - start)) plot.set_figsize() plot.plt.plot(np.linspace(0, num_epochs, len(ls)), ls) plot.plt.xlabel('epoch') plot.plt.ylabel('loss') plot.plt.show()
4b60531b47fc58df0c61bd36cdb77ac8a137ba76
3,627,119
def ProcessOptionInfileParameters(ParamsOptionName, ParamsOptionValue, InfileName = None, OutfileName = None): """Process parameters for reading input files and return a map containing processed parameter names and values. Arguments: ParamsOptionName (str): Command line input parameters option name. ParamsOptionValues (str): Comma delimited list of parameter name and value pairs. InfileName (str): Name of input file. OutfileName (str): Name of output file. Returns: dictionary: Processed parameter name and value pairs. Notes: The parameter name and values specified in ParamsOptionValues are validated before returning them in a dictionary. """ ParamsInfo = {'RemoveHydrogens': True, 'Sanitize' : True, 'StrictParsing': True, 'SMILESColumn': 1, 'SMILESNameColumn' : 2, 'SMILESDelimiter': ' ', 'SMILESTitleLine': 'auto'} _ProcessInfileAndOutfileParameters('Infile', ParamsInfo, ParamsOptionName, ParamsOptionValue, InfileName, OutfileName,) return ParamsInfo
ece155282aef5c7ba365ce54c529998590146043
3,627,120
import logging def user_annotation_for_application() -> UserInputClass: """ Make info from user annotation 1. Original File 2. VM Runtime configuration 3. Target Goal 3. Function service with parameters """ # Fist make dataclass and save default values user_input_class = UserInputClass() # Input source code source_code = input("enter your application code:\n") if source_code != "": user_input_class.original_file = source_code # Target Goal - Cost or Performance target_goal_config = TargetGoalConfig() cost_goal = input("Enter the cost goal ($):\n") if cost_goal != "": target_goal_config.cost = int(cost_goal) # Performance goal performance_goal = input("Enter the performance goal (ms):\n") if performance_goal != "": target_goal_config.performance = float(performance_goal) # Add target_goal_config to user_input_class user_input_class.target_goal_config = target_goal_config # VM Runtime Configuration, VM AWS RegionName vm_runtime_config = VMRuntimeConfig() vm_instance_types = input(f"VM Selection: select VM types : {return_vm_types()}\n") if vm_instance_types != "": vm_selection_list = [ x.strip() for x in vm_instance_types.split(",") if vm_instance_types ] vm_runtime_config.vm_instance_selection_list = vm_selection_list # AWS Region vm_region_name = input("Choose aws region for VM:\n") if vm_region_name != "": vm_runtime_config.region_name = vm_region_name # Add vm_runtime_config to user input class user_input_class.vm_runtime_config = vm_runtime_config logging.debug(f"user_input_class is \n {pformat(user_input_class.__dict__)}") return user_input_class
a4473128a86253ea8bddc533a84bdf893e49853a
3,627,121
def brier_score(y_true, y_pred): """Brier score Computes the Brier score between the true labels and the estimated probabilities. This corresponds to the Mean Squared Error between the estimations and the true labels. Parameters ---------- y_true : label indicator matrix (n_samples, n_classes) True labels. # TODO Add option to pass array with shape (n_samples, ) y_pred : matrix (n_samples, n_classes) Predicted scores. Returns ------- score : float Positive value between 0 and 1. Examples -------- >>> from pycalib.metrics import cross_entropy >>> Y = np.array([[0, 1], [0, 1]]) >>> S = np.array([[0.1, 0.9], [0.6, 0.4]]) >>> brier_score(Y, S) 0.185 """ # TODO Consider using the following code instead # np.mean(np.abs(S - Y)**2) return mean_squared_error(y_true, y_pred)
06a457db29de6e5943900000ea5395cbffac2ab5
3,627,122
from typing import Tuple def computeD1D2(current: float, volatility: float, ttm: float, strike: float, rf: float) -> Tuple[float, float]: """Helper function to compute the risk-adjusted priors of exercising the option contract, and keeping the underlying asset. This is used in the computation of both the Call and Put options in the Black-Scholes-Merton framework. Arguments: current {float} -- Current price of the underlying asset. volatility {float} -- Volatility of the underlying asset price. ttm {float} -- Time to expiration (in years). strike {float} -- Strike price of the option contract. rf {float} -- Risk-free rate (annual). Returns: Tuple[float, float] -- Tuple with d1, and d2 respectively. """ d1 = (np.log(current / strike) + (rf + ((volatility ** 2) / 2)) * ttm) \ / (volatility * np.sqrt(ttm)) d2 = d1 - (volatility * np.sqrt(ttm)) return (d1, d2)
76dc53df4bde1c2974749bf10f007ba3c8e748ff
3,627,123
def control_event(data_byte1, data_byte2=0, channel=1): """Return a MIDI control event with the given data bytes.""" data_byte1 = muser.utils.key_check(data_byte1, CONTROL_BYTES, 'upper') return (STATUS_BYTES['CONTROL'] + channel - 1, data_byte1, data_byte2)
5195d1236f1cd4441281777ab1a50591b3b9fbe0
3,627,124
def create_noise_mask(mean, variance, threshold=25): """Creates a binary data mask based on quartile thresholds from two mean and variance arrays. Parameters ---------- mean : numpy array Array containing pixel mean values. variance : numpy array Array containing pixel variance values. threshold : int Quartile threshold to include in the mask creation. Numbers less than the threshold will be included. Returns ------- noise_mask : numpy array Array containing all 0s and 1s, representing the intersection of quartiles (i.e. pixels in the bottom 25% for both the mean and variance). Value of 1 indicates the intersection of quartiles. Example ------- >>> # Create mask >>> mask_25 = create_noise_mask( ... radiance_mean_arr, ... radiance_variance_arr, ... threshold=25 ... ) >>> # Check mask values >>> np.unique(mask_25) array([0, 1], dtype=int64) """ # Raise error if threshold has invalid value if threshold not in [25, 50, 75]: raise ValueError("Invalid threshold. Must be 25, 50, or 75.") # Get quartile break points for mean and variance mean_percentile_25, mean_percentile_50, mean_percentile_75 = np.percentile( mean, [25, 50, 75]) variance_percentile_25, variance_percentile_50, variance_percentile_75 = np.percentile( variance, [25, 50, 75]) # Check threshold value if threshold == 25: # Reclassify mean and variance values into bins (0, 1) # for pixels (above, below) 25th quartile break point mean_reclassified = np.digitize( mean, bins=[mean_percentile_25, -np.inf], right=True) variance_reclassified = np.digitize( mean, bins=[variance_percentile_25, -np.inf], right=True) elif threshold == 50: # Reclassify mean and variance values into bins (0, 1) # for pixels (above, below) quartile break point mean_reclassified = np.digitize( mean, bins=[mean_percentile_50, -np.inf], right=True) variance_reclassified = np.digitize( mean, bins=[variance_percentile_50, -np.inf], right=True) else: # Reclassify mean and variance values into bins (0, 1) # for pixels (above, below) quartile break point mean_reclassified = np.digitize( mean, bins=[mean_percentile_75, -np.inf], right=True) variance_reclassified = np.digitize( mean, bins=[variance_percentile_75, -np.inf], right=True) # Create mask satisfying both mean and variance percentile conditions noise_mask = np.multiply(mean_reclassified, variance_reclassified) # Return noise mask array return noise_mask
40dc9907e0a65a28cdf81a3b76e11754d15257fe
3,627,125
def initialCondition1D(u, a): """ use this function only if initial condition != 0 is needed ????? """ nx = u.size ul = np.zeros(nx) ul[1:nx-1] = u[1:nx-1]+0.5*a[1:nx-1]**2*(u[2:]-2*u[1:nx-1]+u[0:nx-2]) return ul
8047a95fe733867e4dcf1c30acdb130fdc4ff9f6
3,627,126
def learnability_objective_function(throughput, delay): """Objective function used in https://cs.stanford.edu/~keithw/www/Learnability-SIGCOMM2014.pdf throughput: Mbps delay: ms """ score = np.log(throughput) - np.log(delay) # print(throughput, delay, score) score = score.replace([np.inf, -np.inf], np.nan).dropna() return score
9646af095668bf0c449f2ec05319c1cc35d59d39
3,627,127
def partition_graph(graph, partitions): """ Create a new graph based on `graph`, where nodes are aggregated based on `partitions`, similar to :func:`~networkx.algorithms.minors.quotient_graph`, except that it only accepts pre-made partitions, and edges are not given a 'weight' attribute. Much fast than the quotient_graph, since it creates edges based on existing edges rather than trying all possible combinations. Parameters ---------- graph: networkx.Graph The graph to partition partitions: collections.abc.Iterable[collections.abc.Iterable[collections.abc.Hashable]] E.g. a list of lists of node indices, describing the partitions. Will be sorted by lowest index. Returns ------- networkx.Graph The coarser graph. """ new_graph = nx.Graph() partitions = sorted(partitions, key=min) mapping = {} for idx, node_idxs in enumerate(partitions): subgraph = nx.subgraph(graph, node_idxs) new_graph.add_node(idx, graph=subgraph, nnodes=len(subgraph), nedges=len(subgraph.edges), density=nx.density(subgraph)) mapping.update({node_idx: idx for node_idx in node_idxs}) for idx, jdx in graph.edges: if mapping[idx] != mapping[jdx]: new_idx, new_jdx = mapping[idx], mapping[jdx] edge_attrs = graph.edges[(idx, jdx)] if new_graph.has_edge(new_idx, new_jdx): old_attrs = new_graph.edges[(new_idx, new_jdx)] new_attrs = {key: old_attrs[key] for key in old_attrs.keys() & edge_attrs.keys()\ if old_attrs[key] == edge_attrs[key]} old_attrs.clear() new_graph.add_edge(new_idx, new_jdx, **new_attrs) else: new_graph.add_edge(new_idx, new_jdx, **edge_attrs) return new_graph
98aae7e3c3354a04b30c005c6e0183676f983234
3,627,128
import logging def __misc_badbarcode(): """DEPRECATED: setting badbarcode boolean. Use /misc/itemattr instead. Gets or Sets the barcode-okayness of a SKU. This will return the barcode state of a SKU in a GET message, and will set the barcode state of a SKU in a POST message. :param int sku: The SKU to view/modify :param int badbarcode: POST only. Set to 1 to indicate true. """ sku = escape(request.args.get('sku',0)) if request.method == 'POST': sku = escape(request.form.get('sku',0)) badbarcode = int(escape(request.form.get('badbarcode',0))) logging.info(f'badbarcode={bool(badbarcode)}') query = f'INSERT INTO iteminfolist (sku, badbarcode) VALUES ({sku},{bool(badbarcode)}) ON DUPLICATE KEY UPDATE badbarcode={bool(badbarcode)}' logging.info(query) g.cur.execute(query) query = f'SELECT sku, badbarcode FROM iteminfolist WHERE sku={sku}' g.cur.execute(query) result = g.cur.fetchone() return result
dcc02a80348cc327e3705c8392724eecc6243610
3,627,129
def lzip(*args): """ this function emulates the python2 behavior of zip (saving parentheses in py3) """ return list(zip(*args))
92aa6dea9d4058e68764b24eb63737a2ec59a835
3,627,130
def sanitize_url(url: str) -> str: """ This function strips to the protocol, e.g., http, from urls. This ensures that URLs can be compared, even with different protocols, for example, if both http and https are used. """ prefixes = ["https", "http", "ftp"] for prefix in prefixes: if url.startswith(prefix): url = url[len(prefix) :] return url
9c61a9844cfd6f96e158a9f663357a7a3056abf0
3,627,131
from typing import Dict from typing import Union def trimming_parameters( library_type: LibraryType, trimming_min_length: int ) -> Dict[str, Union[str, int]]: """ Derive trimming parameters based on the library type, and minimum allowed trim length. :param library_type: The LibraryType (eg. srna) :param trimming_min_length: The minimum length of a read before it is discarded. :return: the trimming parameters """ if library_type == LibraryType.amplicon: return { **TRIM_PARAMETERS, "end_quality": 0, "mean_quality": 0, "min_length": trimming_min_length } if library_type == LibraryType.srna: return { **TRIM_PARAMETERS, "min_length": 20, "max_length": 22, } return { **TRIM_PARAMETERS, "min_length": trimming_min_length }
9eb891eb685a0163c7df0d3d8946606ad54ea11d
3,627,132
def make_nn(output_size, hidden_sizes): """ Creates a fully connected neural network. Params: output_size: output dimensionality hidden_sizes: list of hidden layer sizes. List length is the number of hidden layers. """ NNLayers = [tf.keras.layers.Dense(h, activation=tf.nn.relu, dtype=tf.float32) for h in hidden_sizes] NNLayers.append(tf.keras.layers.Dense(output_size, dtype=tf.float32)) return tf.keras.Sequential(NNLayers)
66945e649f8dba407e72fb9790eb2f23d052d6fb
3,627,133
def image_to_world(bbox, size): """Function generator to create functions for converting from image coordinates to world coordinates""" px_per_unit = (float(size[0])/bbox.width, float(size[1]/bbox.height)) return lambda x,y: (x/px_per_unit[0] + bbox.xmin, (size[1]-y)/px_per_unit[1] + bbox.ymin)
35fcfbf8e76e0ec627da9bf32a797afdae11fe17
3,627,134
def error_func(A, b, x, x_star, fold=50): """Calculate errors ||Ax_1-b||-||Ax_star-b||, where x1 \in x. Param: A: n*d np.ndarray, coefficient in ||Ax-b|| b: n*1 np.ndarray, coefficient in ||Ax-b|| x: tuple, (x_linBoost, x_inverse, x_cholesky) x_star: d*1 np.ndarray, x* by lstsq() fold: if MomeryError occurs, increase this parameter. Return: error: tuple, (e_linBoost, e_inverse, e_cholesky) """ factor = len(b) // fold + 1 # ||Ax_star-b||_2^2 sum2_star = np.float64(.0) for i in range(fold): sum2_star += np.linalg.norm( A[i*factor:(i + 1)*factor] @ x_star - b[i*factor:(i + 1)*factor])**2 # ||Ax-b||_2^2 x_linBoost, x_inverse, x_cholesky = x sum2_linBoost = np.float32(.0) sum2_inverse = np.float32(.0) sum2_cholesky = np.float32(.0) # // for i in range(fold): A1 = A[i * factor:(i + 1) * factor] b1 = b[i * factor:(i + 1) * factor] sum2_linBoost += np.linalg.norm(A1 @ x_linBoost - b1)** 2 sum2_inverse += np.linalg.norm(A1 @ x_inverse - b1)** 2 sum2_cholesky += np.linalg.norm(A1 @ x_cholesky - b1)** 2 # result error error = np.sqrt(sum2_linBoost) - np.sqrt(sum2_star), \ np.sqrt(sum2_inverse ) - np.sqrt(sum2_star), \ np.sqrt(sum2_cholesky) - np.sqrt(sum2_star) return error
59a6aae4566fcb8406b5e495727550790f9958ff
3,627,135
def git_reset_all(): """Function that unstages all files in repo for commit. Returns ------- out : str Output string from stdout if success, stderr if failure err : int Error code if failure, 0 otherwise. """ command = 'git reset HEAD' name = 'git_reset_all' return handle_basic_command(command, name)
6b3aea4d7cde04cbe5b81ccc58c2dc2bf2f6d1bc
3,627,136
def find_kern_timing(df_trace): """ find the h2d start and end for the current stream """ kern_begin = 0 kern_end = 0 for index, row in df_trace.iterrows(): if row['api_type'] == 'kern': kern_begin = row.start kern_end = row.end break; return kern_begin, kern_end
2e121e7a9f7ae19f7f9588b0105f282c59f125ba
3,627,137
def Get(SyslogSource, WorkspaceID): """ Get the syslog conf for specified workspace from the machine """ if conf_path == oms_syslog_ng_conf_path: NewSource = ReadSyslogNGConf(SyslogSource, WorkspaceID) else: NewSource = ReadSyslogConf(SyslogSource, WorkspaceID) for d in NewSource: if d['Severities'] == ['none']: d['Severities'] = [] return NewSource
e8b6613e821336644cdfe9c4091e914f9ec1c8ac
3,627,138
def partie_reelle(c : Complexe) -> float: """Renvoie la partie réelle du nombre complexe c. """ re, _ = c return re
555ded6a3814002a7ddc1c74467a9002a2bb341d
3,627,139
def get_client_folder_id(drive_service): """ Returns the client folder to take the backups TODO fetch client name """ client_name = frappe.db.get_value("ConsoleERP Settings", filters="*", fieldname="client_name") if not client_name: print("Client Name not set") return None print("Client Name: %s" % client_name) response = drive_service.files().list(q="name = '%s' and mimeType='application/vnd.google-apps.folder'" % client_name, pageSize=1, fields='nextPageToken, files(id, name)').execute() for file in response.get("files", []): return file.get("id") # client folder doesnt exist. creating folder file = drive_service.files().create(body={ 'name': client_name, 'mimeType': 'application/vnd.google-apps.folder'}, fields="id").execute() print("Folder Created. ID: %s" % file.get("id")) return file.get("id")
237793089ed98d92630d25fa9fbe859f6dad7214
3,627,140
from bs4 import BeautifulSoup import re def get_event_data(url: str) -> dict: """connpassイベントページより追加情報を取得する。 Parameters ---------- url : str connpassイベントのurl。 Returns ------- event_dict : dict[str, Any] イベント情報dict。 """ try: html = urlopen(url) except Exception: # アクセス失敗した場合には全てNoneで返す event_dict = { 'canceled': None, 'lottery': None, 'firstcome': None, 'free': None, 'prepaid': None, 'postpaid': None, 'amount': None } return event_dict soup = BeautifulSoup(html, 'html.parser') canceled = 0 cancel = soup.find(href=url + 'participation/#cancelled') if cancel is not None: canceled = cancel.text[9:-2] # 抽選 or 先着順(混在している場合には表示順上位の内容を優先) lottery = False firstcome = False free = False participant_decision_list = soup.find_all('p', class_='participants') for participant_decision in participant_decision_list: if '抽選' in participant_decision.text: lottery = True break elif '先着' in participant_decision.text: firstcome = True break # 抽選でも先着順でもないイベント free = not lottery and not firstcome # 会場払い or 前払い(混在している場合には表示順上位の内容を優先) prepaid = False postpaid = False # 金額(表示順上位・有料を優先) amount = 0 payment_list = soup.find_all('p', class_='join_fee') for payment in payment_list: payment_text = payment.text if '(前払い)' in payment_text: prepaid = True amount = re.sub(r'\D', '', payment_text) break elif '(会場払い)' in payment_text: postpaid = True amount = re.sub(r'\D', '', payment_text) break event_dict = { 'canceled': canceled, 'lottery': lottery, 'firstcome': firstcome, 'free': free, 'prepaid': prepaid, 'postpaid': postpaid, 'amount': amount } return event_dict
bbb95eba99c57c07c4067f47cf47d69f6260d45b
3,627,141
def overlap_branches(targetbranch: dict, sourcebranch: dict) -> dict: """ Overlaps to dictionaries with each other. This method does apply changes to the given dictionary instances. Examples: >>> overlap_branches( ... {"a": 1, "b": {"de": "ep"}}, ... {"b": {"de": {"eper": 2}}} ... ) {'a': 1, 'b': {'de': {'eper': 2}}} >>> overlap_branches( ... {}, ... {"ne": {"st": "ed"}} ... ) {'ne': {'st': 'ed'}} >>> overlap_branches( ... {"ne": {"st": "ed"}}, ... {} ... ) {'ne': {'st': 'ed'}} >>> overlap_branches( ... {"ne": {"st": "ed"}}, ... {"ne": {"st": "ed"}} ... ) {'ne': {'st': 'ed'}} Args: targetbranch(dict): Root where the new branch should be put. sourcebranch(dict): New data to be put into the sourcebranch. """ if not isinstance(sourcebranch, dict): return sourcebranch for key, newItem in sourcebranch.items(): if key not in targetbranch: targetbranch[key] = newItem elif isinstance(targetbranch[key], dict): targetbranch[key] = overlap_branches(targetbranch[key], newItem) else: targetbranch[key] = newItem return targetbranch
a11b54b72d4a7d79d0bfaa13ed6c351dd84ce45f
3,627,142
def get_dependencies(node, skip_sources=False): """Return a list of dependencies for node.""" if skip_sources: return [ get_path(src_file(child)) for child in filter_ninja_nodes(node.children()) if child not in node.sources ] return [get_path(src_file(child)) for child in filter_ninja_nodes(node.children())]
2f5589f99e240b1e0c3dfed1106275e6725eae2e
3,627,143
def depolarizing_channel_3q(q, p, system, ancillae): """Returns a QuantumCircuit implementing depolarizing channel on q[system] Args: q (QuantumRegister): the register to use for the circuit p (float): the probability for the channel between 0 and 1 system (int): index of the system qubit ancillae (list): list of indices for the ancillary qubits Returns: A QuantumCircuit object """ dc = QuantumCircuit(q) theta = 1/2 * np.arccos(1-2*p) dc.ry(theta, q[ancillae[0]]) dc.ry(theta, q[ancillae[1]]) dc.ry(theta, q[ancillae[2]]) # Prepare q[1] in a maximally mixed state by entangling it with q[0] dc.cx(q[ancillae[0]], q[system]) dc.cy(q[ancillae[1]], q[system]) dc.cz(q[ancillae[2]], q[system]) return dc
154e129dd6865dccff0a172df43f52df11df0004
3,627,144
def resample_30s(annot): """resample_30s: to resample annot dataframe when durations are multiple of 30s Parameters: ----------- annot : pandas dataframe the dataframe of annotations Returns: -------- annot : pandas dataframe the resampled dataframe of annotations """ annot["start"] = annot.Start.values.astype(np.float).astype(np.int) df_end = annot.iloc[[-1]].copy() df_end['start'] += df_end['Duration'].values.astype(np.float) df_end.index += 1 annot = annot.append(df_end) annot = annot.set_index('start') annot.index = pd.to_timedelta(annot.index, unit='s') annot = annot.resample('30s').ffill() annot = annot.reset_index() annot['duration'] = 30. onset = np.zeros(annot.shape[0]) onset[1:] = annot["duration"].values[1:].cumsum() annot["onset"] = onset return annot.iloc[:-1]
761ba6d624f7911873f3a980925c81ef6d0266dc
3,627,145
def jamoToHang(jamo: str): """자소 단위(초, 중, 종성)를 한글로 결합하는 모듈입니다. @status `Accepted` \\ @params `"ㅇㅏㄴㄴㅕㅇㅎㅏ_ㅅㅔ_ㅇㅛ_"` \\ @returns `"안녕하세요"` """ result, index = "", 0 while index < len(jamo): try: initial = chosung.index(jamo[index]) * 21 * 28 midial = jungsung.index(jamo[index + 1]) * 28 final = jongsung.index(jamo[index + 2]) result += chr(initial + midial + final + 0xAC00) index += 3 except: result += jamo[index] index += 1 return result
875d189f8547b637a13eb7b7eeba584044fbe484
3,627,146
def calc_Vs30(profile, option_for_profile_shallower_than_30m=1, verbose=False): """ Calculate Vs30 from the given Vs profile, where Vs30 is the reciprocal of the weighted average travel time from Z meters deep to the ground surface. Parameters ---------- profile : numpy.ndarray Vs profile, which should have at least two columns. option_for_profile_shallower_than_30m : {1, 2} If the provided `profile` has a total depth smaller than 30 m, then 1 - assume last layer extends to 30 meters 2 - only use actual total depth verbose : bool Whether to show a warning message when the Vs profile is shallower than 30 m. Returns ------- Vs30 : float Vs30. Notes ----- Rewritten into Python from MATLAB on 3/4/2017. """ Vs30 = calc_VsZ( profile, 30.0, option_for_profile_shallower_than_Z=option_for_profile_shallower_than_30m, verbose=verbose, ) return Vs30
3d66287836eec960b494617cb652478327ab0067
3,627,147
import torch from typing import Optional from typing import Dict from typing import Any def prepare_model( model: torch.nn.Module, move_to_device: bool = True, wrap_ddp: bool = True, ddp_kwargs: Optional[Dict[str, Any]] = None, ) -> torch.nn.Module: """Prepares the model for distributed execution. This allows you to use the same exact code regardless of number of workers or the device type being used (CPU, GPU). Args: model (torch.nn.Module): A torch model to prepare. move_to_device: Whether to move the model to the correct device. If set to False, the model needs to manually be moved to the correct device. wrap_ddp: Whether to wrap models in ``DistributedDataParallel``. ddp_kwargs (Dict[str, Any]): Args to pass into ``DistributedDataParallel`` initialization if ``wrap_ddp`` is set to True. """ return get_accelerator(_TorchAccelerator).prepare_model( model, move_to_device=move_to_device, wrap_ddp=wrap_ddp, ddp_kwargs=ddp_kwargs, )
28b1b9f3140c4782e3e6eb9fd1345c3bdec7b88f
3,627,148
def make_legend_labels(dskeys=[], tbkeys=[], sckeys=[], bmkeys=[], plkeys=[], dskey=None, tbkey=None, sckey=None, bmkey=None, plkey=None): """ @param dskeys : all datafile or examiner keys @param tbkeys : all table keys @param sckeys : all subchannel keys @param bmkeys : all beam keys @param plkeys : all polarization keys @param dskey : datafile or examiner key @param tbkey : table key @param sckey : subchannel key @param bmkey : beam key @param plkeys :polarization key """ label = "" if dskey != None and len(dskeys) > 1: label += "ds"+str(dskey+1) if tbkey != None and len(tbkeys) > 1: label += " tb"+str(tbkey+1) if sckey != None and len(sckeys) > 1: label += " sc"+str(sckey+1) if bmkey != None and len(bmkeys) > 1: label += " B"+str(bmkey+1) if plkey != None and len(plkeys) > 1: label += "P"+str(plkey+1) return label
a8b17916f896b7d8526c5ab7ae3cf4a7435627e2
3,627,149
import csv import sys def import_summary_tsv_data(file): """ Import the data from a summary_qc.tsv file """ _qc = dict() with open(file, 'r') as ifh: reader = csv.DictReader(ifh, delimiter='\t') for item in reader: if item['sample'] not in _qc: _qc.update(create_entry(line=item)) else: print(f"Sample: {item['sample']} already processed") sys.exit(1) return _qc
8c4ca80ed15bcd59ff773320d26579d248b61b7a
3,627,150
def get_parameter_change(old_params, new_params, ord='inf'): """Measure the change in parameters. Parameters ---------- old_params : list The old parameters as a list of ndarrays, typically from session.run(var_list) new_params : list The old parameters as a list of ndarrays, typically from session.run(var_list) ord : {non-zero int, inf, -inf, ‘fro’, ‘nuc’}, optional Type of norm used to quantify the change. Passed to `numpy.linalg.norm`. Returns ------- change : float The parameter change measured as a norm of the vector difference. """ if ord=='inf': ord = np.inf elif ord=='-inf': ord = -np.inf old_params = np.concatenate([param.ravel() for param in old_params]) new_params = np.concatenate([param.ravel() for param in new_params]) change = np.linalg.norm(new_params - old_params, ord=ord) return change
dc2f15c53b1c65acdfb60d25fd70f9c21f046b70
3,627,151
def get_image_dir(): """Return the `image_dir` set in the current context.""" return get_data_context().image_dir
44557bc421ba14212c089970dcc7f33978ac83fe
3,627,152
import collections def get_interface_config_vlan(): """ Return the interface configuration parameters for all IP static addressing. """ parameters = collections.OrderedDict() parameters['VLAN'] = 'yes' return parameters
61ef6affba231af19e4030c54bfcaaaa15a6438f
3,627,153
def get_browser(sport, debug=False): """ Use selenium and chromedriver to do our website getting. Might as well go all the way. :param debug: whether to set the browser to debug mode :param headless: go headless :return: """ chrome_options = webdriver.ChromeOptions() chrome_options.add_argument('--user-agent=%s' % USERAGENT) webdriver.DesiredCapabilities.CHROME["userAgent"] = "ignore" prefs = {"download.default_directory" : root_data_dir.format(sport=sport)} chrome_options.add_experimental_option("prefs",prefs) browser = webdriver.Chrome(chrome_options=chrome_options) browser.implicitly_wait(10) # wait up to 10s for an element if it's not instantly available return browser
2c0e975f63c8b6e2c61f9be18a76c86b0503d8b1
3,627,154
def parsear_ruta(linea): """ Lee una linea del archivo de rutas, separa los campos, y devuelve un objeto Ruta armado apropiadamente. Si hay un error al aplicar split, y hay menos campos de los esperados, devuelve None. Si algun valor no tiene el formato apropiado (documentado en la clase) devuelve None. Si la ciudad de destino es igual a la de llegada, devuelve None. """ campos = linea.split("\t") if (len(campos) < 5): return None try: if (int(campos[1]) == int(campos[2])): return None nueva_ruta = Ruta(int(campos[0]), int(campos[1]), int(campos[2]), int(campos[3]), float(campos[4])) except ValueError: return None return nueva_ruta
934122d266fa799e79812613cbb539bd8ebd501d
3,627,155
def split_channel_groups(data,meta): """ With respect to the sensor site, a different number of channels is given. In both sites the first 160 channels contain the meg data. params: ------- data: array w/ shape (160+type2channels+type3channels,time_samples) meta: returns: ------- meg,type2records,type3records """ ch_labels = load_key_chain(meta['D'],['channels'])['label'][0] type2trailing = ch_labels[160][0][:3] meg = data['data'][:160] N_type2 = np.max([j+160 for j,label in enumerate(ch_labels[160:]) if type2trailing in label[0]])+1 type2channels = slice(160,N_type2) type3channels = slice(N_type2,None) type2records = data['data'][type2channels] type3records = data['data'][type3channels] return meg,type2records,type3records
399bd66b6aa7681ac67db73c6c68aae1b5f7ba72
3,627,156
from .core import read_byte_data def _read_header_byte_data(header_structure): """ Reads the byte data from the data file for a PDS4 Header. Determines, from the structure's meta data, the relevant start and stop bytes in the data file prior to reading. Parameters ---------- header_structure : HeaderStructure The PDS4 Header data structure for which the byte data needs to be read. Should have been initialized via `HeaderStructure.from_file` method, or contain the required meta data. Returns ------- str or bytes The exact byte data for the header. """ meta_data = header_structure.meta_data start_byte = meta_data['offset'] stop_byte = start_byte + meta_data['object_length'] return read_byte_data(header_structure.parent_filename, start_byte, stop_byte)
7115d8ecdb4ef511fd0a7b0d74e0f8484673aaf7
3,627,157
import numbers def check_random_state(seed): """Turn seed into a np.random.RandomState instance Parameters ---------- seed : None | int | instance of RandomState If seed is None, return the RandomState singleton used by np.random. If seed is an int, return a new RandomState instance seeded with seed. If seed is already a RandomState instance, return it. Otherwise raise ValueError. """ if seed is None or seed is np.random: return np.random.mtrand._rand if isinstance(seed, numbers.Integral): return np.random.RandomState(seed) if isinstance(seed, np.random.RandomState): return seed raise ValueError('%r cannot be used to seed a numpy.random.RandomState' ' instance' % seed)
dbb76ad1094b2d4cb2acb7d0fb7d59290ed6fd78
3,627,158
import os import json def repolist(orgname, refresh=True): """Return list of repos for a GitHub organization. If refresh=False, we use the cached data in /data/repos{orgname}.json and don't retrieve the repo data from GitHub API. Returns tuples of (reponame, size). Note that this is the size returned by the GitHub API, which is typically 2-3X less than actual size. We ignore private repos and forks. """ filename = os.path.join(SETTINGS["folder"], orgname.lower()) + "/repodata.json" if not refresh and os.path.isfile(filename): repodata = json.loads(open(filename, "r").read()) # read cached data else: endpoint = "/orgs/" + orgname.lower() + "/repos?per_page=100" repodata = github_allpages(endpoint=endpoint) dicts2json(repodata, filename) print( f"\r{orgname} - {len(repodata)} total public non-forked repos found" + 60 * " " ) return sorted( [ (repo["name"].lower(), repo["size"]) for repo in repodata if not repo["private"] and not repo["fork"] ] )
5e3d3dacbf2ed3f638f068e9c7f2bd32e143b9e5
3,627,159
def normalize_string(value): """ Normalize a string value. """ if isinstance(value, bytes): value = value.decode() if isinstance(value, str): return value.strip() raise ValueError("Cannot convert {} to string".format(value))
86d8134f8f83384d83da45ed6cb82841301e2e52
3,627,160
def _is_test_env(env_config: tox.config.TestenvConfig) -> bool: """Check if it is a test environment. Tox creates environments for provisioning (`.tox`) and for isolated build (`.packaging`) in addition to the usual test environments. And in hooks such as `tox_testenv_create` it is not clear if the environment is a test environment or one of those environments used for tox's own purposes. So we start by excluding the provisioning environment named after `provision_tox_env` and the build isolation environment named after `isolated_build_env`. Then we keep only the environments listed in `envlist`. """ # is_test_env = False # tox_config = env_config.config env_name = env_config.envname # known_private_env_names = [] # provision_tox_env = getattr(tox_config, 'provision_tox_env', None) if provision_tox_env: known_private_env_names.append(provision_tox_env) # isolated_build_env = getattr(tox_config, 'isolated_build_env', None) if isolated_build_env: known_private_env_names.append(isolated_build_env) # if env_name not in known_private_env_names: if env_name in tox_config.envlist: is_test_env = True # return is_test_env
bf2d9ebdc3e8d3428a5bbc0d27abd0ecc10ca6be
3,627,161
def latest_version(): """Return the latest version of Windows git available for download.""" soup = get_soup('https://git-scm.com/download/win') if soup: tag = soup.find('a', string='Click here to download manually') if tag: return downloadable_version(tag.attrs['href']) return 'Unknown'
35563a0da6eb42e619609dd8d646a7bd5033b5da
3,627,162
from datetime import datetime def get_interval_date_list_by_freq_code(start_date, end_date, freq_code): """ :param freq_code: D, W, M """ end_date_list = get_end_date_list_by_freq_code(start_date, end_date, freq_code) start_date = start_date interval_date_list = [] for end_date in end_date_list: interval_date_list.append([start_date, end_date]) start_date = end_date + datetime.timedelta(days=1) return interval_date_list
34ce484294f62ef6e7f0726e73f0c502f2c56f01
3,627,163
from distutils.version import StrictVersion from distutils.spawn import find_executable import re import os def get_versions(): """ Try to find out the versions of gcc and ld. If not possible it returns None for it. """ gcc_exe = find_executable('gcc') if gcc_exe: out = os.popen(gcc_exe + ' -dumpversion','r') try: out_string = out.read() finally: out.close() result = re.search('(\d+\.\d+\.\d+)', out_string, re.ASCII) if result: gcc_version = StrictVersion(result.group(1)) else: gcc_version = None else: gcc_version = None # EMX ld has no way of reporting version number, and we use GCC # anyway - so we can link OMF DLLs ld_version = None return (gcc_version, ld_version)
3774f0fe270733512b3a6c1cb3e361a1cb90a362
3,627,164
def _rotate_move(move, axis, n=1): """Rotate a move clockwise about an axis The axis of rotation should correspond to a primitive rotation operation of a cube Face. """ if n == 0: return move table = { Face.U: { 'U': 'U', 'D': 'D', 'U\'': 'U\'', 'D\'': 'D\'', 'F': 'L', 'L': 'B', 'B': 'R', 'R': 'F', 'F\'': 'L\'', 'L\'': 'B\'', 'B\'': 'R\'', 'R\'': 'F\'', }, Face.D: { 'U': 'U', 'D': 'D', 'U\'': 'U\'', 'D\'': 'D\'', 'L': 'F', 'F': 'R', 'R': 'B', 'B': 'L', 'L\'': 'F\'', 'F\'': 'R\'', 'R\'': 'B\'', 'B\'': 'L\'', }, Face.R: { 'R': 'R', 'L': 'L', 'R\'': 'R\'', 'L\'': 'L\'', 'U': 'B', 'B': 'D', 'D': 'F', 'F': 'U', 'U\'': 'B\'', 'B\'': 'D\'', 'D\'': 'F\'', 'F\'': 'U\'', }, Face.L: { 'R': 'R', 'L': 'L', 'R\'': 'R\'', 'L\'': 'L\'', 'U': 'F', 'F': 'D', 'D': 'B', 'B': 'U', 'U\'': 'F\'', 'F\'': 'D\'', 'D\'': 'B\'', 'B\'': 'U\'', }, Face.F: { 'F': 'F', 'B': 'B', 'F\'': 'F\'', 'B\'': 'B\'', 'R': 'D', 'D': 'L', 'L': 'U', 'U': 'R', 'R\'': 'D\'', 'D\'': 'L\'', 'L\'': 'U\'', 'U\'': 'R\'', }, Face.B: { 'F': 'F', 'B': 'B', 'F\'': 'F\'', 'B\'': 'B\'', 'R': 'U', 'U': 'L', 'L': 'D', 'D': 'R', 'R\'': 'U\'', 'U\'': 'L\'', 'L\'': 'D\'', 'D\'': 'R\'', } } for _ in range(n): move = table[axis][move] return move
12554560bc9f2b65c101ace74b179cb252bdb62b
3,627,165
def mapAddress(name): """Given a register name, return the address of that register. Passes integers through unaffected. """ if type(name) == type(''): return globals()['RCPOD_REG_' + name.upper()] return name
21f2f9a085d259d5fd46b258cc3ee0298fdda158
3,627,166
def list_index(ls, indices): """numpy-style creation of new list based on a list of elements and another list of indices Parameters ---------- ls: list List of elements indices: list List of indices Returns ------- list """ return [ls[i] for i in indices]
7e5e35674f48208ae3e0befbf05b2a2e608bcdf0
3,627,167
def create_seed_population(cities, howmany): """Create a seed file with tours generated by the nearest-neighbour algorithm. """ attr = OrderedIndividual.get_attributes() attr['osi.num_genes'] = len(cities) - 1 tours = generate_nntours(cities, howmany) pop = [] for i in range(len(tours)): x = OrderedIndividual(attr) # assumes that the tour will start from the final city x.genes = tours[i][1:] pop.append(x) # OrderedIndividual.save_population(filename, pop) return pop
1b5338f687c0780b85c6788fe9891a10c9ee9633
3,627,168
import io import re def copyright_present(f): """ Check if file already has copyright header. Args: f - Path to file """ with io.open(f, "r", encoding="utf-8") as fh: return re.search('Copyright', fh.read())
afbffde0ab51984dab40d296f8ad9ca29829aef1
3,627,169
import math def calc_LFC(in_file_2, bin_list): """ Mods the count to L2FC in each bin """ #for itereating through the bin list bin_no=0 header_line = True with open(in_file_2, 'r') as f: for bin_count in f: if header_line: header_line = False else: bin_count = bin_count.strip().split(',') try: FC = bin_list[bin_no][3]/float(bin_count[3]) LFC = math.log(FC, 2) except: LFC = 0 bin_list[bin_no][3] = round(LFC, 2) bin_no+=1 return bin_list
379035fa4972c956d9734b958f3e81a3792c96d6
3,627,170
def parse_value(named_reg_value): """ Convert the value returned from EnumValue to a (name, value) tuple using the value classes. """ name, value, value_type = named_reg_value value_class = REG_VALUE_TYPE_MAP[value_type] return name, value_class(value)
9e77edad1cee75973ea06c0cb2bfe6ec217abc2e
3,627,171
def construct_model_vector(df, n): """ Convert a dataframe to an array of numpy vectors which are of the form [(1-hot encoding of position), (game stats for n games leading up to this one for a given player)]. If there are p positions and s stats this vector will be of dimension p + s * n. df: dataframe of game values n: number of games to include in analysis Returns: """ data = [] years = [] for game in df.iterrows(): pos = np.zeros(len(positions.keys())) pos[positions[game[1]['pos']]] = 1 age = np.array([game[1]['age']]) prev_games = last_n_weeks(df, game[1]['stub'], int(game[1]['career_game']), n) games_vec = prev_games[relevant_stats].to_numpy().flatten() final_vec = np.concatenate((pos, age, games_vec)) data.append(final_vec) years.append(game[1]['year']) return np.stack(data), years
019c5072a536e6910b2ef2a3ec9ae3682d949f10
3,627,172
import os def poscar_parser_file_object(): """Load POSCAR file using a file object. """ testdir = os.path.dirname(__file__) poscarfile = testdir + '/POSCAR' poscar = None with open(poscarfile) as file_handler: poscar = Poscar(file_handler=file_handler) return poscar
b642bbbedefa33fd612c65e49a9aa9e63caa7754
3,627,173
import yaml def parse_json(file_handle): """Parse a repeats file in the .json format Args: file_handle(iterable(str)) Returns: repeat_info(dict) """ repeat_info = {} try: raw_info = yaml.safe_load(file_handle) except yaml.YAMLError as err: raise SyntaxError("Repeats file is malformed") for i,repeat_unit in enumerate(raw_info, 1): try: repid = repeat_unit['LocusId'] except KeyError as err: raise SyntaxError("Repeat number {0} is missing 'LocusId'".format(i)) try: normal_max = repeat_unit['NormalMax'] except KeyError as err: LOG.warning("Repeat number {0} ({1}) is missing 'NormalMax'. Skipping..".format(i,repid)) continue try: pathologic_min = repeat_unit['PathologicMin'] except KeyError as err: LOG.warning("Repeat number {0} ({1}) is missing 'PathologicMin'. Skipping..".format(i,repid)) continue # ExHu 3.0 release candidate repids include the pathologic region of interest, but not the final version repeat_info[repid] = dict(normal_max=normal_max, pathologic_min=pathologic_min) # From ExHu 3.0 repids include the region of interest. try: reference_region = repeat_unit['ReferenceRegion'] except KeyError as err: LOG.warning("Repeat number {0} ({1}) is missing 'ReferenceRegion'. Skipping..".format(i,repid)) continue if 'PathologicRegion' in repeat_unit: repid += "_" + repeat_unit['PathologicRegion'] else: try: repid += "_" + reference_region except TypeError as err: LOG.warning("Repeat number {0} ({1}) has multiple 'ReferenceRegion' but no 'PathologicRegion'. Skipping..".format(i,repid)) continue # ExHu 3.0 release candidate repids include the pathologic region of interest, but not the final version repeat_info[repid] = dict(normal_max=normal_max, pathologic_min=pathologic_min) return repeat_info
889c99594c7d92dd278caefc2af2e71fdfb0354b
3,627,174
def get_value(obj, expr): """ Extracts value from object or expression. """ if isinstance(expr, F): expr = getattr(obj, expr.name) elif hasattr(expr, 'value'): expr = expr.value return expr
9413f762e6ed19895bbbfda8da5f258bba387c80
3,627,175
from inspect import ismethod from typing import Iterable def _get_common_evented_attributes( layers: Iterable[Layer], exclude: set[str] = {'thumbnail', 'status', 'name', 'data'}, with_private=False, ) -> set[str]: """Get the set of common, non-private evented attributes in ``layers``. Not all layer events are attributes, and not all attributes have corresponding events. Here we get the set of valid, non-private attributes that are both events and attributes for the provided layer set. Parameters ---------- layers : iterable A set of layers to evaluate for attribute linking. exclude : set, optional Layer attributes that make no sense to link, or may error on changing. {'thumbnail', 'status', 'name', 'data'} with_private : bool, optional include private attributes Returns ------- names : set of str A set of attribute names that may be linked between ``layers``. """ try: first_layer = next(iter(layers)) except StopIteration: raise ValueError( trans._( "``layers`` iterable must have at least one layer", deferred=True, ) ) common_events = set.intersection(*(set(lay.events) for lay in layers)) common_attrs = set.intersection(*(set(dir(lay)) for lay in layers)) if not with_private: common_attrs = {x for x in common_attrs if not x.startswith("_")} common = common_events & common_attrs - exclude # lastly, discard any method-only events (we just want attrs) for attr in set(common_attrs): # properties do not count as methods and will not be excluded if ismethod(getattr(first_layer.__class__, attr, None)): common.discard(attr) return common
33ce31cd98659f295f45e33788cfa69510ddb640
3,627,176
def farthest_from_point(point, point_set): """ find the farthest point in point_set from point and return its coordinate and its distance squared to point_set """ record = [] for i in point_set: distance = euclidean_distance_square(point, i) record.append([i, distance]) # create a list of [i,distance] where i is a point in point_set and distance is the distance from i to point sorted_record = sorted(record, key=itemgetter(1)) # sort the record list according to the second item of each entry, i.e, the distance from point to point_set return sorted_record[len(point_set) - 1] # return a list with the coordinate of the point first in an array and its distance to point_set in second
a2105d7e96e6289f9d67aff08d3fc1934fa05a0b
3,627,177
import subprocess import time def start_app(): """ ASSUMES AN EMULATOR HAS ALREADY BEEN STARTED. Starts the calculator program, finds the pid, and instantiates a TestMutator object. """ subprocess.call(["adb", "shell", "am start " + PACKAGE]) time.sleep(10) bits = subprocess.check_output('adb shell ps | grep ' + PACKAGE, shell=True) pid = bits.split()[1] print("Pid is " + pid) test = Test("microsleep 10000000\ninput tap 103 719\n", app_pkg=PACKAGE) return(TestMutator(test, pid, 700, PACKAGE), GATestMutator(test, pid, 700, PACKAGE))
5be6e57a7401530b4751f8e77485762e70a2fe4c
3,627,178
def get_subtypes(): """Get all available subtypes""" subtypes = [] for subtype in Subtype: subtypes.append(subtype.value) return subtypes
61b858731812e1e8fe67c4a09d9bcde2cbe6c596
3,627,179
def clean_dict(dictionary: dict) -> dict: """Recursively removes `None` values from `dictionary` Args: dictionary (dict): subject dictionary Returns: dict: dictionary without None values """ for key, value in list(dictionary.items()): if isinstance(value, dict): clean_dict(value) elif value is None: dictionary.pop(key) return dictionary
3968b6d354116cca299a01bf2c61d7b2d9610da9
3,627,180
def create_emoticon_stream(table, n_hours=None): """Creates a twitter stream object that will insert queries into object and will terminate in n_hours Parameters: ----------- table: connection to mongodb table n_hours: number of hours to run before termination, default = None Returns: -------- tweepy Stream object """ auth = OAuthHandler(TWITTER.CONSUMER_KEY, TWITTER.CONSUMER_SECRET) auth.set_access_token(TWITTER.ACCESS_TOKEN, TWITTER.ACCESS_SECRET) stream_listener = EmoticonStreamListener(table, n_hours=n_hours) twitter_stream = Stream(auth, stream_listener) return twitter_stream
62b1eb58a81d0ce7d2e752368c3b7a969b87736d
3,627,181
def tag_tranfsers(df): """Tag txns with description indicating tranfser payment.""" df = df.copy() tfr_strings = [' ft', ' trf', 'xfer', 'transfer'] exclude = ['fee', 'interest'] mask = (df.transaction_description.str.contains('|'.join(tfr_strings)) & ~df.transaction_description.str.contains('|'.join(exclude))) df.loc[mask, 'tag'] = 'transfers' return df
4fdfd775ec423418370776c34fac809a513f91b5
3,627,182
from typing import Optional from typing import Tuple from typing import List from typing import Dict def calc_box( df: dd.DataFrame, bins: int, ngroups: int = 10, largest: bool = True, dtype: Optional[DTypeDef] = None, ) -> Tuple[pd.DataFrame, List[str], List[float], Optional[Dict[str, int]]]: """ Compute a box plot over either 1) the values in one column 2) the values corresponding to groups in another column 3) the values corresponding to binning another column Parameters ---------- df Dataframe with one or two columns bins Number of bins to use if df has two numerical columns ngroups Number of groups to show if df has a categorical and numerical column largest When calculating a box plot per group, select the largest or smallest groups dtype: str or DType or dict of str or dict of DType, default None Specify Data Types for designated column or all columns. E.g. dtype = {"a": Continuous, "b": "Nominal"} or dtype = {"a": Continuous(), "b": "nominal"} or dtype = Continuous() or dtype = "Continuous" or dtype = Continuous() Returns ------- Tuple[pd.DataFrame, List[str], List[float], Dict[str, int]] The box plot statistics in a dataframe, a list of the outlier groups and another list of the outlier values, a dictionary logging the sampled group output """ # pylint: disable=too-many-locals grp_cnt_stats = None # to inform the user of sampled output x = df.columns[0] if len(df.columns) == 1: df = _calc_box_stats(df[x], x) else: y = df.columns[1] if is_dtype(detect_dtype(df[x], dtype), Continuous()) and is_dtype( detect_dtype(df[y], dtype), Continuous() ): minv, maxv, cnt = dask.compute(df[x].min(), df[x].max(), df[x].nunique()) bins = cnt if cnt < bins else bins endpts = np.linspace(minv, maxv, num=bins + 1) # calculate a box plot over each bin df = dd.concat( [ _calc_box_stats( df[(df[x] >= endpts[i]) & (df[x] < endpts[i + 1])][y], f"[{endpts[i]},{endpts[i + 1]})", ) if i != len(endpts) - 2 else _calc_box_stats( df[(df[x] >= endpts[i]) & (df[x] <= endpts[i + 1])][y], f"[{endpts[i]},{endpts[i + 1]}]", ) for i in range(len(endpts) - 1) ], axis=1, ).compute() endpts_df = pd.DataFrame( [endpts[:-1], endpts[1:]], ["lb", "ub"], df.columns ) df = pd.concat([df, endpts_df], axis=0) else: df, grp_cnt_stats, largest_grps = _calc_groups(df, x, ngroups, largest) # calculate a box plot over each group df = dd.concat( [_calc_box_stats(df[df[x] == grp][y], grp) for grp in largest_grps], axis=1, ).compute() df = df.append(pd.Series({c: i + 1 for i, c in enumerate(df.columns)}, name="x",)).T df.index.name = "grp" df = df.reset_index() df["x0"], df["x1"] = df["x"] - 0.8, df["x"] - 0.2 # width of whiskers for plotting outx, outy = _calc_box_otlrs(df) return df, outx, outy, grp_cnt_stats
2ad140d7897c1a12c72a4084837fde01667b0eda
3,627,183
def remove_dead_exceptions(graph): """Exceptions can be removed if they are unreachable""" def issubclassofmember(cls, seq): for member in seq: if member and issubclass(cls, member): return True return False for block in list(graph.iterblocks()): if not block.canraise: continue exits = [] seen = [] for link in block.exits: case = link.exitcase # check whether exceptions are shadowed if issubclassofmember(case, seen): continue # see if the previous case can be merged while len(exits) > 1: prev = exits[-1] if not (issubclass(prev.exitcase, link.exitcase) and prev.target is link.target and prev.args == link.args): break exits.pop() exits.append(link) seen.append(case) block.recloseblock(*exits)
fc0c810eef726f0979678e3003051c99775a981d
3,627,184
def borda_matrix(lTuple): """ Function to use the Borda count election to integrate the rankings from different miRNA coefficients. Args: lTuple list List of tuples with the correlation matrix, an the name of the analysis (df,"value_name") Returns: TableRank df A table with al the values merged and ranking """ lDf = [] for tuple in lTuple: df = matrix2table(tuple[0], tuple[1]) lDf.append(df) TableRank = merge_tables(lDf) TableRank = borda_table(TableRank) return TableRank
405ff7c469b9fc4026de899ab7a46e959c2280cc
3,627,185
import sys def plotHeatmap(fcsDF, x, y, vI=sentinel, bins=300, scale='linear', xscale='linear', yscale='linear', thresh=1000, aspect='auto', **kwargs): """ Core plotting function of AliGater. Mainly intended to be called internally, but may be called directly. Only plots. No gating functionalities. **Parameters** fcsDF : pandas.DataFrame Flow data loaded in a pandas DataFrame. x, y : str Marker labels. vI : list-like, optional list-like index of events in the fcsDF that correspond to the parent population. Defaults to plotting all events in fcsDF. bins : int, optional, default: 300 Resolution of the plotted heatmap. scale : str, optional, default: 'linear' Which scale to be used on both axes. xscale : str, optional, default: 'linear' Which scale to be used on the x-axis. yscale : str, optional, default: 'linear' Which scale to be used on the y-axis. T : int, optional, default: 1000 If the threshold for linear-loglike transition for bilog or logicle scales. aspect : str Aspect of plotted heatmap. Passed on to matplotlib.pyplot.imshow() **Keyword arguments** cmap : matplotlib.colors.Colormap or str, default: 'jet' Color map to use. Either string name of existing matplotlib colormap, or a colormap object. rcParams : matplotlib.rcParams Overrides rcParams with the passed rcParams object. mask_where : float,int, default : 0 scalar of heatmap values to mask, these become white when plotted **Returns** fig, matplotlib.pyplot.Figure matplotlib Figure object ax. matplotlib.pyplot.Axes matplotlib axes object **Examples** None currently. """ if vI is sentinel: vI=fcsDF.index elif len(vI)<2: sys.stderr.write("Passed index contains no events\n") return None, None if not isinstance(bins,str) and len(vI)<bins: bins=len(vI) if scale.lower()=='logicle': xscale='logicle' yscale='logicle' if scale.lower()=='bilog': xscale='bilog' yscale='bilog' #Default x and y lims bYlim=False bXlim=False if 'xlim' in kwargs: if not isinstance(kwargs['xlim'],list): raise TypeError("if xlim is passed, it must be a list of float/int") elif not all(isinstance(i,(float,int)) for i in kwargs['xlim']): raise TypeError("Non float/int element encountered in xlim") else: xscale_limits=kwargs['xlim'] if xscale.lower()=='logicle': xscale_limits=logicleTransform(xscale_limits,thresh) bXlim=True if 'ylim' in kwargs: if not isinstance(kwargs['ylim'],list): raise TypeError("if ylim is passed, it must be a list of float/int") elif not all(isinstance(i,(float,int)) for i in kwargs['ylim']): raise TypeError("Non float/int element encountered in ylim") else: yscale_limits=kwargs['ylim'] if yscale.lower()=='logicle': yscale_limits=logicleTransform(yscale_limits,thresh) bYlim=True if 'cmap' in kwargs: cmap = kwargs['cmap'] if not isinstance(cmap, str): collect_default=False else: collect_default=True else: collect_default=True cmap='jet' if 'rcParams' in kwargs: if not isinstance(kwargs['rcParams'],dict): raise TypeError("if rcParams is passed, it must be a dict") else: rcParams=kwargs['rcParams'] custom_rcParams=True else: custom_rcParams=False if 'mask_where' in kwargs: mask_value = kwargs['mask_where'] assert isinstance(mask_value,(float,int)) else: mask_value=0 vX=getGatedVector(fcsDF, x, vI, return_type="nparray") vY=getGatedVector(fcsDF, y, vI, return_type="nparray") plt.clf() if custom_rcParams: plt.rcParams=rcParams else: plt.rcParams['figure.figsize']=10,10 plt.rcParams['image.cmap']=cmap #extra plt.rcParams['font.size'] = 22 plt.rcParams['xtick.labelsize'] = 16 plt.rcParams['ytick.labelsize'] = 16 #plt.rcParams['label.size': 22] heatmap, xedges, yedges = getHeatmap(vX, vY, bins, scale, xscale, yscale, thresh) extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]] heatmap=np.ma.masked_where(heatmap <= mask_value, heatmap) plt.clf() fig, ax = plt.subplots() plt.imshow(heatmap.T, extent=extent, origin='lower',aspect=aspect, cmap=cmap) #CLOSES ALL OPEN FIGURES ON CALL - PERHAPS BAD ? plt.close('all') fig = plt.figure() ax = plt.gca() #matplotlib 3.2.x changed behaviour of interpolation #see https://github.com/matplotlib/matplotlib/issues/17722 #and https://matplotlib.org/3.2.1/api/api_changes.html#default-image-interpolation plt.imshow(heatmap.T, extent=extent, origin='lower',aspect=aspect, interpolation='none') plt.xlabel(x) plt.ylabel(y) if collect_default: cmap=plt.get_cmap() cmap.set_bad(color='white') #Zeroes should be white, not blue if xscale.lower()=='logicle': ax=plt.gca() ax.xaxis.set_major_locator(logicleLocator(linCutOff=thresh)) ax.xaxis.set_major_formatter(logicleFormatter(linCutOff=thresh)) if yscale.lower()=='logicle': ax=plt.gca() ax.yaxis.set_major_locator(logicleLocator(linCutOff=thresh)) ax.yaxis.set_major_formatter(logicleFormatter(linCutOff=thresh)) if xscale.lower()=='bilog': ax=plt.gca() ax.xaxis.set_major_locator(BiLogLocator(linCutOff=thresh)) ax.xaxis.set_major_formatter(BiLogFormatter(linCutOff=thresh)) if yscale.lower()=='bilog': ax=plt.gca() ax.yaxis.set_major_locator(BiLogLocator(linCutOff=thresh)) ax.yaxis.set_major_formatter(BiLogFormatter(linCutOff=thresh)) if bXlim: ax.xaxis.set_xlim(left=xscale_limits[0], right=xscale_limits[1]) if bYlim: ax.yaxis.set_xlim(left=yscale_limits[0], right=yscale_limits[1]) return fig,ax
3051f0840568be8bba6c5884385dec881e91055d
3,627,186
def _ImportModuleHookBySuffix(name, package=None): """Callback when a module is imported through importlib.import_module.""" _IncrementNestLevel() try: # Really import modules. module = _real_import_module(name, package) finally: if name.startswith('.'): if package: name = _ResolveRelativeImport(name, package) else: # Should not happen. Relative imports require the package argument. name = None if name: _ProcessImportBySuffix(name, None, None) return module
1d9b11cec308e1a74c2aaac138c5cb3edefce62b
3,627,187
import copy def from_fake(dbc_db, signals_properties, file_hash_blf=("00000000000000000000000000000000" "00000000000000000000000000000000"), file_hash_mat=("00000000000000000000000000000000" "00000000000000000000000000000000"), **kwargs): """ Create a data log with propterties given in a list of dicts with key name arguments of create_fake_can_data function. Parameters ---------- dbc_db : cantools.db.Database The dbc database which was used to convert the data from a binary format. signals_properties : :obj:`dict` Key-Value pairs of the properties. See create_fake_can_data for more information. file_hash_blf : str, optional Fake hash of the .blf file. The default is ("00000000000000000000000000000000" "00000000000000000000000000000000"). file_hash_mat : str, optional Fake hash of the .mat file. The default is ("00000000000000000000000000000000" "00000000000000000000000000000000"). Returns ------- CANDataLog Class of data log. """ log_data = {} # To prevent that .pop remove entry from original dict signals_properties = copy.deepcopy(signals_properties) for signal_properties in signals_properties: name = signal_properties.pop("name") log_data[name] = create_fake_can_data(**signal_properties) return CANDataLog(log_data, dbc_db, file_hash_blf, file_hash_mat, source="fake", **kwargs)
c7fb3f188893f6f52f9624c6a051a060bb189fad
3,627,188
def frozen(request: HttpRequest): """ Заглушка для редиректа со страниц с замороженным функционалом """ context = {'title': _('Frozen feature')} return render(request, template_name='core/frozen.html', context=context)
bb745cb5af702af074423f048e29a60664b7dda4
3,627,189
import os def find_root_path(resource_name, extension): """ Find root path, given name and extension (example: "/home/pi/Media") This will return the *first* instance of the file Arguments: resource_name -- name of file without the extension extension -- ending of file (ex: ".json") Output: root path to file """ search_dir = os.path.dirname(__file__) for root, dirs, files in os.walk(search_dir): # pylint: disable=W0612 for name in files: (base, ext) = os.path.splitext(name) if base == resource_name and extension == ext: return root print("No file found for that key") return ""
6bdd2a0c7e1ed8ea57cc41806773d70f8dcf096b
3,627,190
def zero_intensity_flag(row, name_group): """Check if the mean intensity of certain group of samples is zero. If zero, then the metabolite is not existed in that material. # Arguments: row: certain row of peak table (pandas dataframe). name_group: name of the group. # Returns: True (the mean intensity is zero) or False (the mean intensity is not zero). """ return np.mean(row[name_group]) <= 0
f71b9906032c61988ff3eeccd57fb228d1049526
3,627,191
def ComputeCountryTimeSeriesWaterChange(country_id, feature = None, zoom = 1): """Returns a series of water change over time for the country.""" collection = ee.ImageCollection('JRC/GSW1_0/YearlyHistory') collection = collection.select('waterClass') scale = REDUCTION_SCALE_METERS if feature is None: feature = GetFeature(country_id) else: scale = scale / zoom # Compute the mean temperature in the region in each image. def ComputeMeanWaterChange(img): reduction = img.reduceRegion(ee.Reducer.mean(), feature.geometry(), scale) return ee.Feature(None, { 'system:time_start' : img.get('system:time_start'), 'water': reduction.get('waterClass') }) chart_data = collection.map(ComputeMeanWaterChange).getInfo() # Extract the results as a list of lists. def ExtractMeanWaterChange(feature): if 'water' in feature['properties'] and feature['properties']['water'] is not None: return [ feature['properties']['system:time_start'], feature['properties']['water'] ] return map(ExtractMeanWaterChange, chart_data['features'])
57c20c4b02b66afe6ba8d05ce15f1a4259818a91
3,627,192
import os def get_abspath(filepath): """helper function to facilitate absolute test file access""" return os.path.join(TESTDATA_DIR, filepath)
29faad6a1c4b554793e6bd3a9ffddacfcc394afd
3,627,193
import os def get_wiki_img(): """ Returns a path to local image. """ this = os.path.dirname(__file__) img = os.path.join(this, "wiki.png") if not os.path.exists(img): raise FileNotFoundError("Unable to find '{}'.".format(img)) return img
ef522391665830019f7b48f545291d81b528bd45
3,627,194
import time def get_largest_component(G, strongly=False): """ Return the largest weakly or strongly connected component from a directed graph. Parameters ---------- G : networkx multidigraph strongly : bool if True, return the largest strongly instead of weakly connected component Returns ------- networkx multidigraph """ start_time = time.time() original_len = len(list(G.nodes())) if strongly: # if the graph is not connected retain only the largest strongly connected component if not nx.is_strongly_connected(G): G = max(nx.strongly_connected_component_subgraphs(G), key=len) msg = ('Graph was not connected, retained only the largest strongly ' 'connected component ({:,} of {:,} total nodes) in {:.2f} seconds') log(msg.format(len(list(G.nodes())), original_len, time.time()-start_time)) else: # if the graph is not connected retain only the largest weakly connected component if not nx.is_weakly_connected(G): G = get_largest_wcc_subgraph(G) msg = ('Graph was not connected, retained only the largest weakly ' 'connected component ({:,} of {:,} total nodes) in {:.2f} seconds') log(msg.format(len(list(G.nodes())), original_len, time.time()-start_time)) return G
67fe084033c54babc2ee5301ad97a9d00bab77d9
3,627,195
import importlib import os def get_git_versions(repos, get_dirty_status=False, verbose=0): """ Returns the repository head guid and dirty status and package version number if installed via pip. The version is only returned if the repo is installed as pip package without edit mode. NOTE: currently the dirty status is not working correctly due to a bug in dulwich... Args: repos ([str]): a list with repositories, e.g. ['qtt', 'qcodes']. get_dirty_status (bool): selects whether to use the dulwich package and collect the local code changes for the repositories. verbose (int): verbosity (0 == silent). Retuns: r (dict): dictionary with repo names, head guid and (optionally) dirty status for each given repository. """ heads = dict() dirty_stats = dict() for repo in repos: try: package = importlib.import_module(repo) init_location = os.path.split(package.__file__)[0] repo_location = os.path.join(init_location, '..') repository = Repo(repo_location) heads[repo] = repository.head().decode('ascii') if get_dirty_status: status = porcelain.status(repository) is_dirty = len(status.unstaged) == 0 or any(len(item) != 0 for item in status.staged.values()) dirty_stats[repo] = is_dirty except (AttributeError, ModuleNotFoundError, NotGitRepositoryError): heads[repo] = 'none' if get_dirty_status: dirty_stats[repo] = 'none' if verbose: print('{0}: {1}'.format(repo, heads[repo])) return heads, dirty_stats
78ee6dabffe8ca49c338827be6ca37b6a3956a48
3,627,196
def _argmin(t: 'Tensor', axis=None, isnew: bool = True) -> 'Tensor': """ Also see: -------- :param t: :param axis: :param isnew: :return: """ data = t.data.argmin(axis = axis) requires_grad = t.requires_grad if isnew: requires_grad = False if requires_grad: def grad_f(grad: np.ndarray) -> 'np.ndarray': return np.zeros_like(t.data) depends_on = [Dependency(t, grad_f)] else: depends_on = [] return Tensor(data, requires_grad, depends_on)
19dd2e9ed604f4296f08381d5b80affb9472fc2c
3,627,197
def new_measure_get_activity_activity(data: dict) -> MeasureGetActivityActivity: """Create GetActivityActivity from json.""" timezone = timezone_or_raise(data.get("timezone")) return MeasureGetActivityActivity( date=arrow_or_raise(data.get("date")).replace(tzinfo=timezone), timezone=timezone, deviceid=str_or_none(data.get("deviceid")), brand=int_or_raise(data.get("brand")), is_tracker=bool_or_raise(data.get("is_tracker")), steps=int_or_none(data.get("steps")), distance=float_or_raise(data.get("distance")), elevation=float_or_raise(data.get("elevation")), soft=int_or_none(data.get("soft")), moderate=int_or_none(data.get("moderate")), intense=int_or_none(data.get("intense")), active=int_or_none(data.get("active")), calories=float_or_raise(data.get("calories")), totalcalories=float_or_raise(data.get("totalcalories")), hr_average=int_or_none(data.get("hr_average")), hr_min=int_or_none(data.get("hr_min")), hr_max=int_or_none(data.get("hr_max")), hr_zone_0=int_or_none(data.get("hr_zone_0")), hr_zone_1=int_or_none(data.get("hr_zone_1")), hr_zone_2=int_or_none(data.get("hr_zone_2")), hr_zone_3=int_or_none(data.get("hr_zone_3")), )
dc77b0bc1528a409064626fd2f9c1527058d37a2
3,627,198
import os def establecer_destino_archivo_imagen(instance, filename): """ Establece la ruta de destino para el archivo de imagen cargado a la instancia. """ # Almacena el archivo en: # 'app_reservas/contingencia/<id_imagen>' ruta_archivos_ubicacion = 'app_reservas/contingencia/' filename = '{0!s}_{1!s}'.format(instance.id, filename) return os.path.join(ruta_archivos_ubicacion, filename)
13e233d113ac3232a6e76725b13ef2befcd47feb
3,627,199