content
stringlengths
22
815k
id
int64
0
4.91M
def contour(data2d, levels, container=None, **kwargs): """HIDE""" if container is None: _checkContainer() container = current.container current.object = kaplot.objects.Contour(container, data2d, levels, **kwargs) return current.object
11,200
def FilterSuboptimal(old_predictions, new_predictions, removed_predictions, min_relative_coverage=0.0, min_relative_score=0.0, min_relative_pide=0.0): """remove suboptimal alignments. """ best_predictions = {} for p in old_predictions: if not best_predictions.has_key(p.mQueryToken): best_predictions[p.mQueryToken] = MyBestEntry() x = best_predictions[p.mQueryToken] x.mQueryCoverage = max(x.mQueryCoverage, p.mQueryCoverage) x.score = max(x.score, p.score) x.mPercentIdentity = max(x.mPercentIdentity, p.mPercentIdentity) nnew = 0 for p in old_predictions: x = best_predictions[p.mQueryToken] if p.mQueryCoverage / x.mQueryCoverage < min_relative_coverage: if param_loglevel >= 2: print "# PRUNING: reason: coverage below best: removing %s" % str(p) if param_benchmarks: CheckBenchmark(p) removed_predictions.append(p) continue if p.score / x.score < min_relative_score: if param_loglevel >= 2: print "# PRUNING: reason: score below best: removing %s" % str(p) if param_benchmarks: CheckBenchmark(p) removed_predictions.append(p) continue if p.mPercentIdentity / x.mPercentIdentity < min_relative_pide: if param_loglevel >= 2: print "# PRUNING: reason: percent identity below best: removing %s" % str(p) if param_benchmarks: CheckBenchmark(p) removed_predictions.append(p) continue new_predictions.append(p) nnew += 1 return nnew
11,201
def send_message(msg_type, kwds): """Do some final preprocessing and send the message.""" if kwds["file"]: get_body_from_file(kwds) kwargs = trim_args(kwds) send(msg_type, send_async=False, **kwargs)
11,202
def process_pair(librispeech_md_file, librispeech_dir, wham_md_file, wham_dir, n_src, pair): """Process a pair of sources to mix.""" utt_pair, noise = pair # Indices of the utterances and the noise # Read the utterance files and get some metadata source_info, source_list = read_utterances( librispeech_md_file, utt_pair, librispeech_dir) # Add the noise source_info, source_list = add_noise( wham_md_file, wham_dir, noise, source_list, source_info) # Compute initial loudness, randomize loudness and normalize sources loudness, _, source_list_norm = set_loudness(source_list) # Randomly place the speech clips in the mixture source_info, source_list_pad = randomly_pad(source_list_norm, source_info, n_src) # Do the mixture mixture = mix(source_list_pad) # Check the mixture for clipping and renormalize if necessary # (we pass source_list_norm here because we don't want the zero padding # to influence the loudness) renormalize_loudness, did_clip = check_for_clipping(mixture, source_list_norm) # Compute gain gain_list = compute_gain(loudness, renormalize_loudness) return source_info, gain_list, did_clip
11,203
def getMultiDriverSDKs(driven, sourceDriverFilter=None): """get the sdk nodes that are added through a blendweighted node Args: driven (string): name of the driven node sourceDriverFilter (list, pynode): Driver transforms to filter by, if the connected SDK is not driven by this node it will not be returned. Returns: list: of sdk nodes """ sdkDrivers = [] for sdkUtility in SDK_UTILITY_TYPE: blend_NodePair = pm.listConnections(driven, source=True, type=sdkUtility, exactType=True, plugs=True, connections=True, sourceFirst=True, scn=True) or [] if not blend_NodePair: continue for pairs in blend_NodePair: sdkPairs = getConnectedSDKs(pairs[0].nodeName(), sourceDriverFilter=sourceDriverFilter) for sPair in sdkPairs: sdkDrivers.append([sPair[0], pairs[1]]) return sdkDrivers
11,204
def resnet_model_fn(features, labels, mode, model_class, resnet_size, weight_decay, learning_rate_fn, momentum, data_format, version, loss_filter_fn=None, multi_gpu=False): """Shared functionality for different resnet model_fns. Initializes the ResnetModel representing the model layers and uses that model to build the necessary EstimatorSpecs for the `mode` in question. For training, this means building losses, the optimizer, and the train op that get passed into the EstimatorSpec. For evaluation and prediction, the EstimatorSpec is returned without a train op, but with the necessary parameters for the given mode. Args: features: tensor representing input images labels: tensor representing class labels for all input images mode: current estimator mode; should be one of `tf.estimator.ModeKeys.TRAIN`, `EVALUATE`, `PREDICT` model_class: a class representing a TensorFlow model that has a __call__ function. We assume here that this is a subclass of ResnetModel. resnet_size: A single integer for the size of the ResNet model. weight_decay: weight decay loss rate used to regularize learned variables. learning_rate_fn: function that returns the current learning rate given the current global_step momentum: momentum term used for optimization data_format: Input format ('channels_last', 'channels_first', or None). If set to None, the format is dependent on whether a GPU is available. version: Integer representing which version of the ResNet network to use. See README for details. Valid values: [1, 2] loss_filter_fn: function that takes a string variable name and returns True if the var should be included in loss calculation, and False otherwise. If None, batch_normalization variables will be excluded from the loss. multi_gpu: If True, wrap the optimizer in a TowerOptimizer suitable for data-parallel distribution across multiple GPUs. Returns: EstimatorSpec parameterized according to the input params and the current mode. """ # Generate a summary node for the images tf.summary.image('images', features, max_outputs=6) model = model_class(resnet_size, data_format, version=version) logits = model(features, mode == tf.estimator.ModeKeys.TRAIN) predictions = { 'classes': tf.argmax(logits, axis=1), 'probabilities': tf.nn.softmax(logits, name='softmax_tensor') } if mode == tf.estimator.ModeKeys.PREDICT: return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions) # Calculate loss, which includes softmax cross entropy and L2 regularization. cross_entropy = tf.losses.softmax_cross_entropy( logits=logits, onehot_labels=labels) # Create a tensor named cross_entropy for logging purposes. tf.identity(cross_entropy, name='cross_entropy') tf.summary.scalar('cross_entropy', cross_entropy) # If no loss_filter_fn is passed, assume we want the default behavior, # which is that batch_normalization variables are excluded from loss. if not loss_filter_fn: def loss_filter_fn(name): return 'batch_normalization' not in name # Add weight decay to the loss. loss = cross_entropy + weight_decay * tf.add_n( [tf.nn.l2_loss(v) for v in tf.trainable_variables() if loss_filter_fn(v.name)]) # Create a tensor named cross_entropy for logging purposes. tf.identity(loss, name='train_loss') tf.summary.scalar('train_loss', loss) if mode == tf.estimator.ModeKeys.TRAIN: global_step = tf.train.get_or_create_global_step() learning_rate = learning_rate_fn(global_step) # Create a tensor named learning_rate for logging purposes tf.identity(learning_rate, name='learning_rate') tf.summary.scalar('learning_rate', learning_rate) optimizer = tf.train.MomentumOptimizer( learning_rate=learning_rate, momentum=momentum) # If we are running multi-GPU, we need to wrap the optimizer. if multi_gpu: optimizer = tf.contrib.estimator.TowerOptimizer(optimizer) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) train_op = tf.group(optimizer.minimize(loss, global_step), update_ops) else: train_op = None accuracy = tf.metrics.accuracy( tf.argmax(labels, axis=1), predictions['classes']) metrics = {'acc': accuracy} # Create a tensor named train_accuracy for logging purposes tf.identity(accuracy[1], name='train_accuracy') tf.summary.scalar('train_acc', accuracy[1]) return tf.estimator.EstimatorSpec( mode=mode, predictions=predictions, loss=loss, train_op=train_op, eval_metric_ops=metrics)
11,205
def dataframe_from_stomate(filepattern,largefile=True,multifile=True, dgvmadj=False,spamask=None, veget_npindex=np.s_[:],areaind=np.s_[:], out_timestep='annual',version=1, replace_nan=False): """ Parameters: ----------- filepattern: could be a single filename, or a file pattern out_timestep: the timestep of output file, used to provide information to properly scale the variable values, could be 'annual' or 'daily'. when 'annual', flux_scale_factor = 365 will be used. dgvmadj: use DGVM adjustment, in this case tBIOMASS rathern than TOTAL_M is used. veget_npindex: passed to the function of get_pftsum: 1. could be used to restrict for example the PFT weighted average only among natural PFTs by setting veget_npindex=np.s_[:,0:11,:,:]. It will be used to slice VEGET_MAX variable. 2. could also be used to slice only for some subgrid of the whole grid, eg., veget_npindex=np.s_[...,140:300,140:290]. Notes: ------ 1. This function could handle automatically the case of a single-point file or a regional file. When a single-point file (pattern) is given, PFT-weighted carbon density will be used rather than the total C over the spatial area. """ gnc_sto = gnc.Ncdata(filepattern,largefile=largefile,multifile=multifile, replace_nan=replace_nan) if version == 1: # list all pools and fluxes list_flux_pft = ['GPP','NPP','HET_RESP','CO2_FIRE','CO2FLUX','CO2_TAKEN'] list_flux_pftsum = ['CONVFLUX','CFLUX_PROD10','CFLUX_PROD100','HARVEST_ABOVE'] list_flux = list_flux_pft+list_flux_pftsum list_pool = ['TOTAL_M','TOTAL_SOIL_CARB'] list_all = list_flux_pft+list_flux_pftsum+list_pool nlist_var = [list_flux_pft, list_flux_pftsum, list_pool] for varlist in nlist_var: gnc_sto.retrieve_variables(varlist) gnc_sto.get_pftsum(print_info=False,veget_npindex=veget_npindex) gnc_sto.remove_variables(varlist) #handle adjustment of different variables if dgvmadj: gnc_sto.retrieve_variables(['tGPP','tRESP_GROWTH','tRESP_MAINT','tRESP_HETERO','tCO2_FIRE']) gnc_sto.pftsum.__dict__['NPP'] = gnc_sto.d1.tGPP - gnc_sto.d1.tRESP_MAINT - gnc_sto.d1.tRESP_GROWTH gnc_sto.pftsum.__dict__['HET_RESP'] = gnc_sto.d1.tRESP_HETERO gnc_sto.pftsum.__dict__['CO2_FIRE'] = gnc_sto.d1.tCO2_FIRE gnc_sto.remove_variables(['tGPP','tRESP_GROWTH','tRESP_MAINT','tRESP_HETERO','tCO2_FIRE']) gnc_sto.retrieve_variables(['tBIOMASS','tLITTER','tSOILC']) gnc_sto.pftsum.__dict__['TOTAL_M'] = gnc_sto.d1.tBIOMASS gnc_sto.pftsum.__dict__['TOTAL_SOIL_CARB'] = gnc_sto.d1.tLITTER + gnc_sto.d1.tSOILC gnc_sto.remove_variables(['tBIOMASS','tLITTER','tSOILC']) # we have to treat product pool independently try: gnc_sto.retrieve_variables(['PROD10','PROD100']) gnc_sto.pftsum.PROD10 = gnc_sto.d1.PROD10.sum(axis=1) gnc_sto.pftsum.PROD100 = gnc_sto.d1.PROD100.sum(axis=1) gnc_sto.remove_variables(['PROD10','PROD100']) except KeyError: gnc_sto.pftsum.PROD10 = gnc_sto.pftsum.NPP * 0. gnc_sto.pftsum.PROD100 = gnc_sto.pftsum.NPP * 0. # get the spatial operation and pass them into dataframe if not gnc_sto._SinglePoint: gnc_sto.get_spa() dft = pa.DataFrame(gnc_sto.spasum.__dict__) else: dft = pa.DataFrame(gnc_sto.pftsum.__dict__) # treat the output time step if out_timestep == 'annual': flux_scale_factor = 365. dft['CO2FLUX'] = dft['CO2FLUX']/30. #CO2FLUX is monthly output elif out_timestep == 'daily': flux_scale_factor = 1 dft[list_flux] = dft[list_flux]*flux_scale_factor # get total carbon pool dft['PROD'] = dft['PROD10'] + dft['PROD100'] dft['CarbonPool'] = dft['TOTAL_M'] + dft['TOTAL_SOIL_CARB'] + dft['PROD'] # calcate NBP dft['NBP_npp'] = dft['NPP']+dft['CO2_TAKEN']-dft['CONVFLUX']-dft['CFLUX_PROD10']-dft['CFLUX_PROD100']-dft['CO2_FIRE']-dft['HARVEST_ABOVE']-dft['HET_RESP'] dft['NBP_co2flux'] = -1*(dft['CO2FLUX']+dft['HARVEST_ABOVE']+dft['CONVFLUX']+dft['CFLUX_PROD10']+dft['CFLUX_PROD100']) elif version == 2: # list all pools and fluxes list_flux_pft = ['GPP','NPP','HET_RESP','CO2_FIRE','CO2FLUX','CO2_TAKEN','METHANE','RANIMAL'] list_flux_pftsum = ['CONVFLUX_LCC','CONVFLUX_HAR','CFLUX_PROD10_LCC','CFLUX_PROD10_HAR','CFLUX_PROD100_LCC','CFLUX_PROD100_HAR','HARVEST_ABOVE'] list_flux = list_flux_pft+list_flux_pftsum list_pool = ['TOTAL_M','TOTAL_SOIL_CARB','LEAF_M','SAP_M_AB','SAP_M_BE', 'HEART_M_AB','HEART_M_BE','ROOT_M','FRUIT_M','RESERVE_M', 'LITTER_STR_AB','LITTER_STR_BE','LITTER_MET_AB','LITTER_MET_BE'] list_all = list_flux_pft+list_flux_pftsum+list_pool nlist_var = [list_flux_pft, list_flux_pftsum, list_pool] for varlist in nlist_var: gnc_sto.retrieve_variables(varlist,mask=spamask) gnc_sto.get_pftsum(print_info=False,veget_npindex=veget_npindex) gnc_sto.remove_variables(varlist) #handle adjustment of different variables if dgvmadj: if veget_npindex != np.s_[:]: raise ValueError("dgvmadj is not handled when veget_npindex does not include all") else: gnc_sto.retrieve_variables(['tGPP','tRESP_GROWTH','tRESP_MAINT','tRESP_HETERO','tCO2_FIRE'],mask=spamask) gnc_sto.pftsum.__dict__['NPP'] = gnc_sto.d1.tGPP - gnc_sto.d1.tRESP_MAINT - gnc_sto.d1.tRESP_GROWTH gnc_sto.pftsum.__dict__['HET_RESP'] = gnc_sto.d1.tRESP_HETERO gnc_sto.pftsum.__dict__['CO2_FIRE'] = gnc_sto.d1.tCO2_FIRE gnc_sto.remove_variables(['tGPP','tRESP_GROWTH','tRESP_MAINT','tRESP_HETERO','tCO2_FIRE']) gnc_sto.retrieve_variables(['tBIOMASS','tLITTER','tSOILC'],mask=spamask) gnc_sto.pftsum.__dict__['TOTAL_M'] = gnc_sto.d1.tBIOMASS gnc_sto.pftsum.__dict__['TOTAL_SOIL_CARB'] = gnc_sto.d1.tLITTER + gnc_sto.d1.tSOILC gnc_sto.remove_variables(['tBIOMASS','tLITTER','tSOILC']) # we have to treat product pool independently list_prod = ['PROD10_LCC','PROD10_HAR','PROD100_LCC','PROD100_HAR'] gnc_sto.retrieve_variables(list_prod,mask=spamask) for var in list_prod: gnc_sto.pftsum.__dict__[var] = gnc_sto.d1.__dict__[var][veget_npindex].sum(axis=1) print gnc_sto.d1.__dict__['PROD10_LCC'][veget_npindex].shape print gnc_sto.d1.__dict__['PROD10_LCC'].shape print gnc_sto.pftsum.__dict__['PROD10_LCC'].shape gnc_sto.remove_variables(list_prod) # get the spatial operation and pass them into dataframe if not gnc_sto._SinglePoint: gnc_sto.get_spa(areaind=areaind) dft = pa.DataFrame(gnc_sto.spasum.__dict__) else: dft = pa.DataFrame(gnc_sto.pftsum.__dict__) # 2016-03-30: the shape of gnc_sto.d1.ContAreas could be # (nlat,nlon) when there is no "CONTFRAC" or "NONBIOFRAC" in # the history file, but could be (ntime,nlat,nlon) when they're # present. # # [++temporary++] treat CO2_TAKEN # # In case of shifting cultivation is simulated, the CO2_TAKEN # # could be big at the last day. However the veget_max is kept # # the same as the old one over the year, so we have to use # # last-year CO2_TAKEN multiply with the next-year veget_max. # gnc_sto.retrieve_variables(['CO2_TAKEN']) # co2taken_pftsum = np.ma.sum(gnc_sto.d1.CO2_TAKEN[:-1] * gnc_sto.d1.VEGET_MAX[1:],axis=1) # if not gnc_sto._SinglePoint: # dt = np.sum(co2taken_pftsum*gnc_sto.d1.ContAreas,axis=(1,2)) # else: # dt = co2taken_pftsum # dft['CO2_TAKEN'].iloc[:-1] = dt # treat the output time step if out_timestep == 'annual': flux_scale_factor = 365. dft['CO2FLUX'] = dft['CO2FLUX']/30. #CO2FLUX is monthly output elif out_timestep == 'daily': flux_scale_factor = 1 dft[list_flux] = dft[list_flux]*flux_scale_factor # get total carbon pool dft['PROD'] = dft['PROD10_LCC'] + dft['PROD10_HAR'] + dft['PROD100_LCC'] + dft['PROD100_HAR'] dft['CarbonPool'] = dft['TOTAL_M'] + dft['TOTAL_SOIL_CARB'] + dft['PROD'] dft['LITTER_AB'] = dft['LITTER_STR_AB'] + dft['LITTER_MET_AB'] dft['LITTER_BE'] = dft['LITTER_MET_BE'] + dft['LITTER_STR_BE'] dft['LITTER'] = dft['LITTER_BE'] + dft['LITTER_AB'] dft['BIOMASS_AB'] = dft.SAP_M_AB + dft.HEART_M_AB + dft.LEAF_M + dft.FRUIT_M + dft.RESERVE_M dft['BIOMASS_BE'] = dft.SAP_M_BE + dft.HEART_M_BE + dft.ROOT_M # treat GM dft['RANIMAL'] = dft['RANIMAL']*1000 dft['METHANE'] = dft['METHANE']*1000 dft['GMsource'] = dft['RANIMAL'] + dft['METHANE'] # treat LUC dft['CONVFLUX'] = dft['CONVFLUX_LCC'] + dft['CONVFLUX_HAR'] dft['CFLUX_PROD10'] = dft['CFLUX_PROD10_LCC'] + dft['CFLUX_PROD10_HAR'] dft['CFLUX_PROD100'] = dft['CFLUX_PROD100_LCC'] + dft['CFLUX_PROD100_HAR'] dft['LUCsource'] = dft['CONVFLUX'] + dft['CFLUX_PROD10'] + dft['CFLUX_PROD100'] # calcate NBP dft['NBP_npp'] = dft['NPP']+dft['CO2_TAKEN']-dft['CONVFLUX']-dft['CFLUX_PROD10']-dft['CFLUX_PROD100']-dft['CO2_FIRE'] \ -dft['HARVEST_ABOVE']-dft['HET_RESP']-dft['RANIMAL']-dft['METHANE'] dft['NBP_co2flux'] = -1*(dft['CO2FLUX']+dft['HARVEST_ABOVE']+dft['CONVFLUX']+dft['CFLUX_PROD10']+dft['CFLUX_PROD100']) # litter dft['LITTER'] = dft[['LITTER_STR_AB','LITTER_STR_BE','LITTER_MET_AB','LITTER_MET_BE']].sum(axis=1) dft['LITTER_AB'] = dft[['LITTER_STR_AB','LITTER_MET_AB']].sum(axis=1) dft['LITTER_BE'] = dft[['LITTER_STR_BE','LITTER_MET_BE']].sum(axis=1) dft['SOILC'] = dft['TOTAL_SOIL_CARB'] - dft['LITTER'] else: raise ValueError("Unknown version!") gnc_sto.close() return dft
11,206
def random_application(request, event, prev_application): """ Get a new random application for a particular event, that hasn't been scored by the request user. """ from applications.models import Application # circular import return Application.objects.filter( form__event=event ).exclude( pk=prev_application.id ).exclude( scores__user=request.user ).order_by('?').first()
11,207
def flutter_velocity(pressures, speeds_of_sound, root_chord, tip_chord, semi_span, thickness, shear_modulus=2.62e9): """Calculate flutter velocities for a given fin design. Fin dimensions are given via the root_chord, tip_chord, semi_span and thickness arguments. All dimensions are in centimetres. Use shear_modulus to specify the shear modulus of the fin material in Pascals. >>> import numpy as np >>> zs = np.linspace(0, 30000, 100) >>> ps, _, ss = model_atmosphere(zs) >>> vels = flutter_velocity(ps, ss, 20, 10, 10, 0.2) >>> assert vels.shape == ps.shape Args: pressures (np.array): 1-d array of atmospheric pressures in Pascals speeds_of_sound (np.array): 1-d array of speeds of sound in m/s root_chord: fin root chord (cm) tip_chord: fin tip chord (cm) semi_span: fin semi-span (cm) thickness: fin thickness (cm) shear_modulus: fin material shear modulus (Pascals) Returns: A 1-d array containing corresponding flutter velocities in m/s. """ # Ensure input is 1d array of floating point values pressures = np.atleast_1d(pressures).astype(np.float) # Compute derived dimensions from fin specification. S = 0.5 * (root_chord + tip_chord) * semi_span # Area Ra = (semi_span * semi_span) / S # Aspect ratio k = tip_chord / root_chord # Taper ratio Vf = np.zeros_like(pressures) A = 1.337 * Ra**3 * pressures * (k+1) B = 2 * (Ra + 2) * (thickness / root_chord)**3 Vf = speeds_of_sound * np.sqrt(shear_modulus * B / A) return Vf
11,208
def replace_within(begin_re, end_re, source, data): """Replace text in source between two delimeters with specified data.""" pattern = r'(?s)(' + begin_re + r')(?:.*?)(' + end_re + r')' source = re.sub(pattern, r'\1@@REPL@@\2' , source) if '@@REPL@@' in source: source = source.replace('@@REPL@@', data) else: log.log('') log.log('ERROR: Cannot match {!r} and {!r}'.format(begin_re, end_re)) log.log('') return source
11,209
def mean( a: Numeric, axis: Union[Int, None] = None, squeeze: bool = True ): # pragma: no cover """Take the mean of a tensor, possibly along an axis. Args: a (tensor): Tensor. axis (int, optional): Optional axis. squeeze (bool, optional): Squeeze the dimension after the reduction. Defaults to `True`. Returns: tensor: Reduced tensor. """
11,210
def if_binary_exists(binary_name, cc): """ Returns the path of the requested binary if it exists and clang is being used, None if not :param binary_name: Name of the binary :param cc: Path to CC binary :return: A path to binary if it exists and clang is being used, None if either condition is false """ binary = None if "clang" in cc: binary = shutil.which(binary_name, path=os.path.dirname(cc) + ":" + os.environ['PATH']) return binary
11,211
def test_email_chart_report_schedule_with_csv( csv_mock, email_mock, mock_open, mock_urlopen, create_report_email_chart_with_csv, ): """ ExecuteReport Command: Test chart email report schedule with CSV """ # setup csv mock response = Mock() mock_open.return_value = response mock_urlopen.return_value = response mock_urlopen.return_value.getcode.return_value = 200 response.read.return_value = CSV_FILE with freeze_time("2020-01-01T00:00:00Z"): AsyncExecuteReportScheduleCommand( TEST_ID, create_report_email_chart_with_csv.id, datetime.utcnow() ).run() notification_targets = get_target_from_report_schedule( create_report_email_chart_with_csv ) # assert that the link sent is correct assert ( f'<a href="http://0.0.0.0:8080/superset/slice/' f'{create_report_email_chart_with_csv.chart.id}/">Explore in Superset</a>' in email_mock.call_args[0][2] ) # Assert the email smtp address assert email_mock.call_args[0][0] == notification_targets[0] # Assert the email csv file smtp_images = email_mock.call_args[1]["data"] assert smtp_images[list(smtp_images.keys())[0]] == CSV_FILE # Assert logs are correct assert_log(ReportState.SUCCESS)
11,212
def random_outputs_for_tier(rng, input_amount, scale, offset, max_count, allow_extra_change=False): """ Make up to `max_number` random output values, chosen using exponential distribution function. All parameters should be positive `int`s. None can be returned for expected types of failures, which will often occur when the input_amount is too small or too large, since it becomes uncommon to find a random assortment of values that satisfy the desired constraints. On success, this returns a list of length 1 to max_count, of non-negative integer values that sum up to exactly input_amount. The returned values will always exactly sum up to input_amount. This is done by renormalizing them, which means the actual effective `scale` will vary depending on random conditions. If `allow_extra_change` is passed (this is abnormal!) then this may return max_count+1 outputs; the last output will be the leftover change if all max_counts outputs were exhausted. """ if input_amount < offset: return None lambd = 1./scale remaining = input_amount values = [] # list of fractional random values without offset for _ in range(max_count+1): val = rng.expovariate(lambd) # A ceil here makes sure rounding errors won't sometimes put us over the top. # Provided that scale is much larger than 1, the impact is negligible. remaining -= ceil(val) + offset if remaining < 0: break values.append(val) else: if allow_extra_change: result = [(round(v) + offset) for v in values[:-1]] result.append(input_amount - sum(result)) return result # Fail because we would need too many outputs # (most likely, scale was too small) return None assert len(values) <= max_count if not values: # Our first try put us over the limit, so we have nothing to work with. # (most likely, scale was too large) return None desired_random_sum = input_amount - len(values) * offset assert desired_random_sum >= 0 # Now we need to rescale and round the values so they fill up the desired. # input amount exactly. We perform rounding in cumulative space so that the # sum is exact, and the rounding is distributed fairly. cumsum = list(itertools.accumulate(values)) rescale = desired_random_sum / cumsum[-1] normed_cumsum = [round(rescale * v) for v in cumsum] assert normed_cumsum[-1] == desired_random_sum differences = ((a - b) for a,b in zip(normed_cumsum, itertools.chain((0,),normed_cumsum))) result = [(offset + d) for d in differences] assert sum(result) == input_amount return result
11,213
def test_slices_any(input, expected): """Test the "any()" function.""" assert input.any(lambda s: "abc" in s.raw) == expected
11,214
def test_slow_to_event_model(): """This doesn't use threads so it should be slower due to sleep""" source = Stream(asynchronous=True) t = FromEventStream("event", ("data", "det_image"), source, principle=True) assert t.principle a = t.map(slow_inc) L = a.sink_to_list() futures_L = a.sink_to_list() n = a.SimpleToEventStream(("ct",)) n.sink(print) tt = t.sink_to_list() p = n.pluck(0).sink_to_list() d = n.pluck(1).sink_to_list() t0 = time.time() for gg in y(10): yield source.emit(gg) while len(L) < len(futures_L): yield gen.sleep(.01) t1 = time.time() # check that this was faster than running in series td = t1 - t0 ted = .5 * 10 assert td > ted assert tt assert p == ["start", "descriptor"] + ["event"] * 10 + ["stop"] assert d[1]["hints"] == {"analyzer": {"fields": ["ct"]}}
11,215
def create_connection(host, username, password): """ create a database connection to the SQLite database specified by db_file :return: Connection object or None """ try: conn = mysql.connect(host=host, # your host, usually db-guenette_neutrinos.rc.fas.harvard.edu user=username, # your username passwd=password, # your password db='guenette_neutrinos') # name of the data base # autocommit=False) # Prevent automatic commits return conn except mysql.Error as e: print(e) return None
11,216
def Dadjust(profile_ref, profile_sim, diffsys, ph, pp=True, deltaD=None, r=0.02): """ Adjust diffusion coefficient fitting function by comparing simulated profile against reference profile. The purpose is to let simulated diffusion profile be similar to reference profile. Parameters ---------- profile_ref : DiffProfile Reference diffusion profile profile_sim : DiffProfile Simulated diffusion profile diffsys : DiffSystem Diffusion system ph : int Phase # to be adjusted, 0 <= ph <= diffsys.Np-1 Xp : 1d-array Reference composition to adjust their corresponding diffusivities. If provided, spline function Dfunc must be determined by [Xp, Dp] alone, where Dp = exp(Dfunc(Xp)). pp : bool, optional Point Mode (True) or Phase Mode (False). Point Mode adjusts each Dp at Xp by itself. In Phase Mode, all Dp are adjusted by the same rate, i.e. the diffusivity curve shape won't change. deltaD: float, optional Only useful at Phase Mode. deltaD gives the rate to change diffusion coefficients DC. DC = DC * 10^deltaD r : float, optional Only useful at Phase Mode, default = 0.02, 0 < r < 1. r gives the range to calculate the concentration gradient around X, [X-r, X+r]. """ dref, Xref, Ifref = profile_ref.dis, profile_ref.X, profile_ref.If dsim, Xsim, Ifsim = profile_sim.dis, profile_sim.X, profile_sim.If if ph >= diffsys.Np: raise ValueError('Incorrect phase #, 0 <= ph <= %i' % diffsys.Np-1) if pp and 'Xspl' not in dir(diffsys): raise ValueError('diffsys must have Xspl properties in per-point mode') Dfunc, Xr, Np = diffsys.Dfunc[ph], diffsys.Xr[ph], diffsys.Np rate = 1 # If there is phase consumed, increase adjustment rate if len(Ifref) != len(Ifsim): print('Phase consumed found, increase adjustment rate') rate = 2 if Xr[1] > Xr[0]: idref = np.where((Xref >= Xr[0]) & (Xref <= Xr[1]))[0] idsim = np.where((Xsim >= Xr[0]) & (Xsim <= Xr[1]))[0] else: idref = np.where((Xref <= Xr[0]) & (Xref >= Xr[1]))[0] idsim = np.where((Xsim <= Xr[0]) & (Xsim >= Xr[1]))[0] if 'Xspl' in dir(diffsys): Xp = diffsys.Xspl[ph] else: Xp = np.linspace(Xr[0], Xr[1], 30) Dp = np.exp(splev(Xp, Dfunc)) # If this is consumed phase, increase DC by 2 or 10^deltaD if len(idsim) == 0: Dp = np.exp(splev(Xp, Dfunc)) if deltaD is None: return Dfunc_spl(Xp, Dp*2) else: return Dfunc_spl(Xp, Dp*10**deltaD) dref, Xref = dref[idref], Xref[idref] dsim, Xsim = dsim[idsim], Xsim[idsim] # Per phase adjustment if not pp: if deltaD is not None: return Dfunc_spl(Xp, Dp*10**deltaD) # Calculate deltaD by phase width # When it comes to first or last phase, data closed to end limits are not considered fdis_ref = disfunc(dref, Xref) fdis_sim = disfunc(dsim, Xsim) X1, X2 = Xr[0], Xr[1] if ph == 0: X1 = Xr[0]*0.9 + Xr[1]*0.1 if ph == Np-1: X2 = Xr[0]*0.1 + Xr[1]*0.9 ref = splev([X1, X2], fdis_ref) sim = splev([X1, X2], fdis_sim) wref = ref[1]-ref[0] wsim = sim[1]-sim[0] Dp *= np.sqrt(wref/wsim) return Dfunc_spl(Xp, Dp) # Point Mode adjustment for i in range(len(Xp)): # X1, X2 is the lower, upper bound to collect profile data # X1, X2 cannot exceed phase bound Xr if Xr[0] < Xr[1]: X1, X2 = max(Xp[i]-r, Xr[0]), min(Xp[i]+r, Xr[1]) else: X1, X2 = max(Xp[i]-r, Xr[1]), min(Xp[i]+r, Xr[0]) # Calculate the gradient inside [X1, X2] by linear fitting fdis_ref = disfunc(dref, Xref) fdis_sim = disfunc(dsim, Xsim) Xf = np.linspace(X1, X2, 10) pref = np.polyfit(splev(Xf, fdis_ref), Xf, 1)[0] psim = np.polyfit(splev(Xf, fdis_sim), Xf, 1)[0] # Adjust DC by gradient difference Dp[i] *= (psim/pref)**rate return Dfunc_spl(Xp, Dp)
11,217
def setup_test(): """setup test""" def create_test_tables(db): """create test tables""" db(""" create table if not exists person ( id integer PRIMARY KEY AUTOINCREMENT, name varchar(100), age integer, kids integer, salary decimal(10,2), birthdate date ) """) def delete_test_tables(db): """drop test tables""" db('drop table if exists person') db = zoom.database.database('sqlite3', ':memory:') delete_test_tables(db) create_test_tables(db) return db
11,218
def validate_workload(args): """ Validate a workload description for JSSPP OSP. :param args: The command line arguments passed to this command. :return: """ logging.info('Processing file %s', args.file.name) logging.info('Validating structural requirements') schema = load_workload_schema() instance = parse_workload(args.file) error = validate(schema, instance) if error is not None: path = '/'.join(map(str, error.absolute_path)) logging.error('File does not match schema at %s: %s', path, error) sys.exit(1) logging.info('Format OK')
11,219
def compute_levenshtein_blocks(seq1, seq2, max_complexity=1e8): """Compute the Levenshtein blocks of insertion, deletion, replacement. """ # TODO: better method for dealing with long sequences? l1, l2 = len(seq1), len(seq2) if l1 * l2 > max_complexity: return [("change", (0, l1), (0, l2))] def block_format(op, s1, e1, s2, e2): if op == "delete": return (op, (s1, e1 + 1), (s2, e2)) if op == "insert": return (op, (s1, e1), (s2, e2 + 1)) else: return (op, (s1, e1 + 1), (s2, e2 + 1)) edits = Levenshtein.editops(seq1, seq2) if len(edits) == 0: return [] bop, s1, s2 = edits[0] e1, e2 = s1, s2 blocks = [] for (op, _e1, _e2) in edits[1:]: continuity = any( [ all([op == "delete", _e1 == e1 + 1, e2 == _e2]), all([op == "insert", _e1 == e1, _e2 == e2 + 1]), all([op == "replace", _e1 == e1 + 1, _e2 == e2 + 1]), ] ) if op == bop and continuity: e1, e2 = _e1, _e2 else: blocks.append(block_format(bop, s1, e1, s2, e2)) bop, s1, s2 = op, _e1, _e2 e1, e2 = s1, s2 blocks.append(block_format(bop, s1, e1, s2, e2)) return blocks
11,220
def mapdict_values(function, dic): """ Apply a function to a dictionary values, creating a new dictionary with the same keys and new values created by applying the function to the old ones. :param function: A function that takes the dictionary value as argument :param dic: A dictionary :return: A new dicitonary with same keys and values changed Example: >>> dic1 = { 'a' : 10, 'b' : 20, 'c' : 30 } >>> mapdict_values(lambda x: x*2, dic1) {'a': 20, 'b': 40, 'c': 60} >>> dic1 {'a': 10, 'b': 20, 'c': 30} """ return dict(map(lambda x: (x[0], function(x[1])), dic.items()))
11,221
async def callback( request: Request, code: str = None, error: Optional[str] = Query(None), db: AsyncSession = Depends(get_db), ): """ Complete the OAuth2 login flow """ client = get_discord_client() with start_span(op="oauth"): with start_span(op="oauth.authorization_token"): # Get the authorization token if code: token = await client.authorize_access_token(request) else: return RedirectResponse(URL("/login").include_query_params(error=error)) with start_span(op="oauth.user_info"): # Get the user's info client.token = token user_info = await client.userinfo(token=token) user_id = int(user_info.get("id")) with start_span(op="permissions"): with start_span(op="permissions.access"): # Get the user's role ids roles = list(map(lambda r: r.id, await get_user_roles(user_id))) # Determine if the user has panel access if (await CONFIG.panel_access_role()) not in roles: return RedirectResponse("/login?error=unauthorized") with start_span(op="permissions.admin"): # Get all the user's guilds async with ClientSession() as session: async with session.get( "https://discord.com/api/v8/users/@me/guilds", headers={"Authorization": f"Bearer {token['access_token']}"}, ) as response: guilds = await response.json() # Determine if the user has admin access is_owner = any( map( lambda g: g.get("id") == str(SETTINGS.discord_guild_id) and g.get("owner"), guilds, ) ) is_admin = (await CONFIG.management_role()) in roles or is_owner # Save the user's info to the database user = User( id=user_id, username=user_info["username"], avatar=user_info["picture"], is_admin=is_admin, ) # Insert and ignore failures try: db.add(user) await db.commit() except IntegrityError: pass # Store the info in the session request.session["logged_in"] = True request.session["user"] = dict(user_info) request.session["is_admin"] = is_admin request.session["expiration"] = dict(token).get("expires_at") return RedirectResponse("/login/complete")
11,222
def set_serial_port(p): """Sets the name/path of the serial/tty port to use for a key+sounder/loop interface Parameters ---------- p : str The 'COM' port for Windows, the 'tty' device path for Mac and Linux """ global serial_port serial_port = noneOrValueFromStr(p) app_config.set(__CONFIG_SECTION, __SERIAL_PORT_KEY, serial_port)
11,223
def align_centroids(config, ref): """Align centroids""" diff_centroids = np.round(ref.mean(axis=0) - config.mean(axis=0)) # diff_centroids = np.round(diff_centroids).astype(int) config = config + diff_centroids return config
11,224
def _make_headers_df(headers_response): """ Parses the headers portion of the watson response and creates the header dataframe. :param headers_response: the ``row_header`` or ``column_header`` array as returned from the Watson response, :return: the completed header dataframe """ headers_df = util.make_dataframe(headers_response) headers_df = headers_df[ ["text", "column_index_begin", "column_index_end", "row_index_begin", "row_index_end", "cell_id", "text_normalized"]] return headers_df
11,225
def getMemInfo() -> CmdOutput: """Returns the RAM size in bytes. Returns: CmdOutput: The output of the command, as a `CmdOutput` instance containing `stdout` and `stderr` as attributes. """ return runCommand(exe_args=ExeArgs("wmic", ["memorychip", "get", "capacity"]))
11,226
def test_add(integration_test_config, service_instance): """ Test esxi add """ if integration_test_config["esxi_manage_test_instance"]: ret = esxi.add( integration_test_config["esxi_manage_test_instance"]["name"], integration_test_config["esxi_manage_test_instance"]["user"], integration_test_config["esxi_manage_test_instance"]["password"], integration_test_config["esxi_manage_test_instance"]["cluster"], integration_test_config["esxi_manage_test_instance"]["datacenter"], verify_host_cert=False, service_instance=service_instance, ) assert ret["state"] == "connected" else: pytest.skip("test requires esxi manage test instance credentials")
11,227
def svn_swig_py_make_editor(*args): """svn_swig_py_make_editor(PyObject * py_editor, apr_pool_t pool)""" return _delta.svn_swig_py_make_editor(*args)
11,228
def register_confirm(request, activation_key): """finish confirmation and active the account Args: request: the http request activation_key: the activation key Returns: Http redirect to successful page """ user_safety = get_object_or_404(UserSafety, activation_key=activation_key) if user_safety.user.is_confirmed: return HttpResponseRedirect('/home/project') if user_safety.key_expires < timezone.now(): return render_to_response('accounts/confirmExpires.html') user = user_safety.user user.is_confirmed = True user.save() return render_to_response('accounts/confirmed.html')
11,229
def get_best_z_index(classifications): """Get optimal z index based on quality classifications Ties are broken using the index nearest to the center of the sequence of all possible z indexes """ nz = len(classifications) best_score = np.min(classifications) top_z = np.argwhere(np.array(classifications) == best_score).ravel() return top_z[np.argmin(np.abs(top_z - (nz // 2)))]
11,230
def _raise_404(): """Raise 404 error""" raise _exc.ObjectNotFoundError("Object not found")
11,231
def mean_z_available(): """docstring for mean_z_available""" if glob.glob("annual_mean_z.nc"): return True return False
11,232
def make_conversations_wb(): """ Create a report of all conversations from system messages """ print "Making Conversations XLSX Report" wb = xl.Workbook() ws = wb.active header = ('id','sent','auto','short','topic','status','delivery_delta','response_delta', 'participant','nurse','total','p_topic','% on topic','order',) widths = {'B':20,'C':28,'D':20,'E':25,'L':30} xl_add_header_row(ws,header,widths) auto_memory = {} cur_system , first_response , p_topic = None , None , None counts, order = collections.defaultdict(int) , [] two_way = cont.Contact.objects.filter(study_group='two-way').prefetch_related('message_set') # for p in two_way.filter(study_id__in=('0003','0803')): for p in two_way.all(): for msg in p.message_set.all().order_by('created'): if msg.is_system is True: if cur_system is not None: if cur_system.auto not in auto_memory: auto_memory[cur_system.auto] = cur_system.get_auto() auto = auto_memory[cur_system.auto] delivery_delta = (cur_system.external_success_time - cur_system.created).total_seconds() / 3600.0 if cur_system.external_success_time else '' if first_response is not None: response_delta = (first_response.created - cur_system.created).total_seconds() / 3600.0 p_topic = first_response.topic else: response_delta, p_topic = '', '' if cur_system.external_status not in ('Success','Sent',''): order = cur_system.reason else: order = ','.join(order) # Append Conversation Row for System Message ws.append(( p.study_id, cur_system.created, cur_system.auto, cur_system.auto_type, auto.comment if auto else '', cur_system.external_status, delivery_delta, response_delta, counts['in'], counts['nurse'], counts['total'], p_topic, float(counts['topic']) / counts['in'] if counts['in'] > 0 else 0, order )) cur_system , first_response = msg , None counts, order = collections.defaultdict(int) , [] elif msg.is_outgoing is True: # Non System Outgoing Message counts['nurse'] += 1 order.append('n') else: # incomming message if first_response is None: first_response = msg counts['in'] += 1 counts['topic'] += 1 if msg.topic == first_response.topic else 0 order.append('p') counts['total'] += 1 # Print last row if cur_system is not None: if cur_system.auto not in auto_memory: auto_memory[cur_system.auto] = cur_system.get_auto() auto = auto_memory[cur_system.auto] delivery_delta = (cur_system.external_success_time - cur_system.created).total_seconds() / 3600.0 if cur_system.external_success_time else '' if first_response is not None: response_delta = (first_response.created - cur_system.created).total_seconds() / 3600.0 p_topic = first_response.topic else: response_delta, p_topic = '', '' if cur_system.external_status not in ('Success','Sent',''): order = cur_system.reason else: order = ','.join(order) ws.append(( p.study_id, cur_system.created, cur_system.auto, cur_system.auto_type, auto.comment if auto else '', cur_system.external_status, delivery_delta, response_delta, counts['in'], counts['nurse'], counts['total'], p_topic, float(counts['topic']) / counts['in'] if counts['in'] > 0 else 0, order )) cur_system , first_response = None , None counts, order = collections.defaultdict(int) , [] wb.save('ignore/conversations.xlsx')
11,233
def wait_for_url(monitor_url, status_code=None, timeout=None): """Blocks until the URL is availale""" if not timeout: timeout = URL_TIMEOUT end_time = time.time() + timeout while (end_time - time.time()) > 0: if is_url(monitor_url, status_code): return True time.sleep(1) LOG.error('URL %s could not be reached after %s seconds', monitor_url, timeout) return False
11,234
def sources_from_arxiv(eprint): """ Download sources on arXiv for a given preprint. :param eprint: The arXiv id (e.g. ``1401.2910`` or ``1401.2910v1``). :returns: A ``TarFile`` object of the sources of the arXiv preprint. """ r = requests.get("http://arxiv.org/e-print/%s" % (eprint,)) file_object = io.BytesIO(r.content) return tarfile.open(fileobj=file_object)
11,235
def nvidia_smi_gpu_memused(): # pragma: no cover """Returns the GPU memory used by the process. (tested locally, cannot be tested on Travis CI bcs no GPU available) Returns ------- int [MiB] """ # if theano.config.device=='cpu': return -2 try: xml = subprocess.Popen(['nvidia-smi', '-q', '-x'], stdout=subprocess.PIPE).communicate()[0] root = ET.fromstring(xml) for gpu in root.findall('gpu'): for proc in gpu.find('processes').findall('process_info'): if int(proc.find('pid').text) == os.getpid(): return int(proc.find('used_memory').text.split(' ')[0]) except: return -1 return -1
11,236
def render(): """ This method renders the HTML webside including the isOnline Status and the last 30 database entries. :return: """ online = isonline() return render_template("index.html", news=News.query.order_by(News.id.desc()).limit(30), online=online)
11,237
def iterate(f: Callable[[_T], _T], x: _T = 0) -> Generator[_T, None, None]: """Iterate produces an infinite sequence of x, f(x), f(f(x)), ... See Clojure's iterate. """ while True: yield x x = f(x)
11,238
def test_serialization(): """ testing that None values are removed when serializing """ bb_1 = t2.TBoundingBox(0.4, 0.3, 0.1, top=None) # type:ignore forcing some None/null values bb_2 = t2.TBoundingBox(0.4, 0.3, 0.1, top=0.2) p1 = t2.TPoint(x=0.1, y=0.1) p2 = t2.TPoint(x=0.3, y=None) # type:ignore geo = t2.TGeometry(bounding_box=bb_1, polygon=[p1, p2]) geo_s = t2.TGeometrySchema() s: str = geo_s.dumps(geo) assert not "null" in s geo = t2.TGeometry(bounding_box=bb_2, polygon=[p1, p2]) s: str = geo_s.dumps(geo) assert not "null" in s
11,239
def write_programs_rst(): """ Genereate the RST. """ parts = [] # Create title parts.append('Program classes') parts.append('=' * len(parts[-1])) parts.append(' ') # Intro parts.append('.. automodule:: ppci.programs') parts.append('') # Add TOC toc_html = programs.get_program_classes_html().replace('#', '#ppci.programs.') parts.append('.. raw:: html\n') parts.extend([' ' + line for line in toc_html.splitlines()]) parts.append('') programs1, programs2, programs3 = programs.get_program_classes_by_group() # Add base classes parts.append('Base program classes') parts.append('-' * len(parts[-1])) parts.append('') for program in (programs.Program, programs.SourceCodeProgram, programs.IntermediateProgram, programs.MachineProgram): parts.append('.. autoclass:: ppci.programs.{}'.format(program.__name__)) parts.append(' :members:\n\n') # Add concrete classes parts.append('Source code programs') parts.append('-' * len(parts[-1])) parts.append('') for program in programs1: parts.append('.. autoclass:: ppci.programs.{}'.format(program.__name__)) parts.append(' :members:\n\n') parts.append('Intermediate programs') parts.append('-' * len(parts[-1])) parts.append('') for program in programs2: parts.append('.. autoclass:: ppci.programs.{}'.format(program.__name__)) parts.append(' :members:\n\n') parts.append('Machine code programs') parts.append('-' * len(parts[-1])) parts.append('') for program in programs3: parts.append('.. autoclass:: ppci.programs.{}'.format(program.__name__)) parts.append(' :members:\n\n') files_to_remove.append(os.path.join('reference', 'programs.rst')) with open(os.path.join(DOC_DIR, 'reference', 'programs.rst'), 'wb') as f: f.write('\n'.join(parts).encode())
11,240
def start_vaurien_httpserver(port): """Start a vaurien httpserver, controlling a fake proxy""" config = get_config() config.registry['proxy'] = FakeProxy() server = WSGIServer(('localhost', int(port)), config.make_wsgi_app(), log=None) server.serve_forever()
11,241
def _layout_graph_up(graph): """ Auto layout the nodes up stream. """ nodes = graph.selected_nodes() or graph.all_nodes() graph.auto_layout_nodes(nodes=nodes, down_stream=False)
11,242
async def execute_request(url): """Method to execute a http request asynchronously """ async with aiohttp.ClientSession() as session: json = await fetch(session, url) return json
11,243
def game_over(remaining_words: List[str]) -> bool: """Return True iff remaining_words is empty. >>> game_over(['dan', 'paul']) False >>> game_over([]) True """ return remaining_words == []
11,244
def read_manifest(instream): """Read manifest file into a dictionary Parameters ---------- instream : readable file like object """ reader = csv.reader(instream, delimiter="\t") header = None metadata = {} for row in reader: if header is None: header = row else: metadata[row[0]] = row[1] return metadata
11,245
def get_backbone_from_model(model:Model, key_chain:list) -> nn.Cell: """Obtain the backbone from a wrapped mindspore Model using the key chain provided. Args: model(Model): A Model instance with wrapped network and loss. key_chain(list[str]): the keys in the right order according to to which we can get backbone. Returns: The desired backbone(nn.Cell).""" network = model.train_network # if network is a WithLossCell if getattr(model, '_loss_fn') is None: assert hasattr(network, '_net') network = getattr(network, '_net') for key in key_chain: assert hasattr(network, key), f'network has no attr named {key}' network = getattr(network, key) return network
11,246
def test_core_init(): """Test initialization""" LOGGER.info("Running core Init test...") name = "Test Timeline" tm = TimelineManager() timeline = tm.create_timeline(name) assert timeline.name == name
11,247
def dv_upper_lower_bound(f): """ Donsker-Varadhan lower bound, but upper bounded by using log outside. Similar to MINE, but did not involve the term for moving averages. """ first_term = f.diag().mean() second_term = logmeanexp_nodiag(f) return first_term - second_term
11,248
def main(): """ Populate the database """ # Projects proj = get_or_create(Project, short_name='CMIP6', full_name='Coupled Model Intercomparison Project Phase 6') proj = get_or_create(Project, short_name='PRIMAVERA', full_name='PRIMAVERA High Resolution Modelling Project') # Activty ID act_id = get_or_create(ActivityId, short_name='HighResMIP', full_name='High Resolution Model Intercomparison ' 'Project') # Experiments HighResMIP experiments = { 'control-1950': "coupled control with fixed 1950's forcing (HighResMIP " "equivalent of pre-industrial control)", 'highres-future': "coupled future 2015-2050 using a scenario as close " "to CMIP5 RCP8.5 as possible within CMIP6", 'hist-1950': "coupled historical 1950-2014", 'highresSST-present': "forced atmosphere experiment for 1950-2014", 'highresSST-future': "forced atmosphere experiment for 2015-2050 using " "SST/sea-ice derived from CMIP5 RCP8.5 simulations " "and a scenario as close to RCP8.5 as possible " "within CMIP6", 'highresSST-LAI': "common LAI dataset within the highresSST-present " "experiment", 'highresSST-smoothed': "smoothed SST version of highresSST-present", 'highresSST-p4K': "uniform 4K warming of highresSST-present SST", 'highresSST-4co2': "highresSST-present SST with 4xCO2 concentrations" } for expt in experiments: _ex = get_or_create(Experiment, short_name=expt, full_name=experiments[expt]) # Institutes institutes = { 'AWI': 'Alfred Wegener Institute, Helmholtz Centre for Polar and Marine ' 'Research, Am Handelshafen 12, 27570 Bremerhaven, Germany', 'CNRM-CERFACS': 'Centre National de Recherches Meteorologiques, Meteo-France, ' 'Toulouse, France) and CERFACS (Centre Europeen de Recherches ' 'et de Formation Avancee en Calcul Scientifique, Toulouse, ' 'France', 'CMCC': 'Centro Euro-Mediterraneo per i Cambiamenti Climatici, Bologna 40127, ' 'Italy', 'EC-Earth-Consortium': 'KNMI, The Netherlands; SMHI, Sweden; DMI, Denmark; ' 'AEMET, Spain; Met Eireann, Ireland; CNR-ISAC, Italy; Instituto de ' 'Meteorologia, Portugal; FMI, Finland; BSC, Spain; Centro de Geofisica, ' 'University of Lisbon, Portugal; ENEA, Italy; Geomar, Germany; Geophysical ' 'Institute, University of Bergen, Norway; ICHEC, Ireland; ICTP, Italy; ' 'IMAU, The Netherlands; IRV, Sweden; Lund University, Sweden; ' 'Meteorologiska Institutionen, Stockholms University, Sweden; Niels ' 'Bohr Institute, University of Copenhagen, Denmark; NTNU, Norway; SARA, ' 'The Netherlands; Unite ASTR, Belgium; Universiteit Utrecht, The Netherlands; ' 'Universiteit Wageningen, The Netherlands; University College Dublin, Ireland; ' 'Vrije Universiteit Amsterdam, the Netherlands; University of Helsinki, Finland; ' 'KIT, Karlsruhe, Germany; USC, University of Santiago de Compostela, Spain; ' 'Uppsala Universitet, Sweden; NLeSC, Netherlands eScience Center, The Netherlands', 'MPI-M': 'Max Planck Institute for Meteorology, Hamburg 20146, Germany', 'MOHC': 'Met Office Hadley Centre, Fitzroy Road, Exeter, Devon, ' 'EX1 3PB, UK', 'ECMWF': 'ECMWF (European Centre for Medium-Range Weather Forecasts, Reading ' 'RG2 9AX, United Kingdom)' } for inst in institutes: _inst = get_or_create(Institute, short_name=inst, full_name=institutes[inst]) # Models models = { 'AWI-CM-1-0-LR': 'AWI-CM-1-0-LR', 'AWI-CM-1-0-HR': 'AWI-CM-1-0-HR', 'CNRM-CM6-1-HR': 'CNRM-CM6-1-HR', 'CNRM-CM6-1': 'CNRM-CM6-1', 'CMCC-CM2-HR4': 'CMCC-CM2-HR4', 'CMCC-CM2-VHR4': 'CMCC-CM2-VHR4', 'MPIESM-1-2-HR': 'MPIESM-1-2-HR', 'MPIESM-1-2-XR': 'MPIESM-1-2-XR', 'HadGEM3-GC31-HM': 'HadGEM3-GC31-HM', 'HadGEM3-GC31-MM': 'HadGEM3-GC31-MM', 'HadGEM3-GC31-LM': 'HadGEM3-GC31-LM', 'ECMWF-IFS-LR': 'ECMWF-IFS-LR', 'ECMWF-IFS-HR': 'ECMWF-IFS-HR', 'EC-Earth3-HR' :'EC-Earth3-HR', 'EC-Earth3-LR': 'EC-Earth3-LR' } for model in models: _mdl = get_or_create(ClimateModel, short_name=model, full_name=models[model])
11,249
def create_training_files_for_document( file_name, key_field_names, ground_truth_df, ocr_data, pass_number): """ Create the ocr.json file and the label file for a document :param file_path: location of the document :param file_name: just the document name.ext :param key_field_names: names of the key fields to extract :param ocr_data: Previously OCR form :param pass_number: Are we processing word level or both word and line level """ extraction_file_name = file_name[:-4] + '.ocr.json' # Now we go and reverse search the form for the Ground Truth values key_field_data = find_anchor_keys_in_form( df_gt=ground_truth_df, filename=extraction_file_name, data=ocr_data, anchor_keys=key_field_names, pass_number=pass_number) print(f"key_field_data {len(key_field_data)} {key_field_data} {file_name}") label_file, unique_fields_extracted = create_label_file( file_name, key_field_names, key_field_data[extraction_file_name] ) return ocr_data, label_file, unique_fields_extracted
11,250
def incoming(ui, repo, source="default", **opts): """show new changesets found in source Show new changesets found in the specified path/URL or the default pull location. These are the changesets that would have been pulled if a pull at the time you issued this command. See pull for valid source format details. .. container:: verbose With -B/--bookmarks, the result of bookmark comparison between local and remote repositories is displayed. With -v/--verbose, status is also displayed for each bookmark like below:: BM1 01234567890a added BM2 1234567890ab advanced BM3 234567890abc diverged BM4 34567890abcd changed The action taken locally when pulling depends on the status of each bookmark: :``added``: pull will create it :``advanced``: pull will update it :``diverged``: pull will create a divergent bookmark :``changed``: result depends on remote changesets From the point of view of pulling behavior, bookmark existing only in the remote repository are treated as ``added``, even if it is in fact locally deleted. .. container:: verbose For remote repository, using --bundle avoids downloading the changesets twice if the incoming is followed by a pull. Examples: - show incoming changes with patches and full description:: hg incoming -vp - show incoming changes excluding merges, store a bundle:: hg in -vpM --bundle incoming.hg hg pull incoming.hg - briefly list changes inside a bundle:: hg in changes.hg -T "{desc|firstline}\\n" Returns 0 if there are incoming changes, 1 otherwise. """ if opts.get('graph'): cmdutil.checkunsupportedgraphflags([], opts) def display(other, chlist, displayer): revdag = cmdutil.graphrevs(other, chlist, opts) showparents = [ctx.node() for ctx in repo[None].parents()] cmdutil.displaygraph(ui, revdag, displayer, showparents, graphmod.asciiedges) hg._incoming(display, lambda: 1, ui, repo, source, opts, buffered=True) return 0 if opts.get('bundle') and opts.get('subrepos'): raise util.Abort(_('cannot combine --bundle and --subrepos')) if opts.get('bookmarks'): source, branches = hg.parseurl(ui.expandpath(source), opts.get('branch')) other = hg.peer(repo, opts, source) if 'bookmarks' not in other.listkeys('namespaces'): ui.warn(_("remote doesn't support bookmarks\n")) return 0 ui.status(_('comparing with %s\n') % util.hidepassword(source)) return bookmarks.incoming(ui, repo, other) repo._subtoppath = ui.expandpath(source) try: return hg.incoming(ui, repo, source, opts) finally: del repo._subtoppath
11,251
def test_project_update_role_points(): """Test that relation to project roles are created for stories not related to those roles. The "relation" is just a mere `RolePoints` relation between the story and the role with points set to the project's null-point. """ project = f.ProjectFactory.create() related_role = f.RoleFactory.create(project=project, computable=True) null_points = f.PointsFactory.create(project=project, value=None) user_story = f.UserStoryFactory(project=project) new_related_role = f.RoleFactory.create(project=project, computable=True) assert user_story.role_points.count() == 1 assert user_story.role_points.filter(role=new_related_role, points=null_points).count() == 0 project.update_role_points() assert user_story.role_points.count() == 2 assert user_story.role_points.filter(role=new_related_role, points=null_points).count() == 1
11,252
def timeout(limit=5): """ Timeout This decorator is used to raise a timeout error when the given function exceeds the given timeout limit. """ @decorator def _timeout(func, *args, **kwargs): start = time.time() result = func(*args, **kwargs) duration = time.time() - start if duration > limit: msg = f"Function {func.__name__} exceeded timeout limit ({limit} seconds)" raise TimeoutError(msg) return result return _timeout
11,253
def main(): """Export all the file options to pdf """ setup() LINE_STYLE["ls"] = "--" for options in FILE_OPTIONS: export_grid_plot(**options)
11,254
def puts(n, s): """连续打印输出n个s""" for _ in range(n): print(s, end='')
11,255
def OIII4363_flux_limit(combine_flux_file: str, verbose: bool = False, log: Logger = log_stdout()) -> \ Union[None, np.ndarray]: """ Determine 3-sigma limit on [OIII]4363 based on H-gamma measurements :param combine_flux_file: Filename of ASCII file containing emission-line flux measurements :param verbose: Write verbose message to stdout. Default: file only :param log: logging.Logger object :return: Array containing 3-sigma flux limit """ log_verbose(log, "starting ...", verbose=verbose) try: combine_fits = asc.read(combine_flux_file) except FileNotFoundError: log.warning(f"File not found! {combine_flux_file}") return Hgamma = combine_fits['HGAMMA_Flux_Gaussian'].data Hgamma_SN = combine_fits['HGAMMA_S/N'].data flux_limit = (Hgamma / Hgamma_SN) * 3 log_verbose(log, "finished.", verbose=verbose) return flux_limit
11,256
def convert_units_co2(ds,old_data,old_units,new_units): """ Purpose: General purpose routine to convert from one set of CO2 concentration units to another. Conversions supported are: umol/m2/s to gC/m2 (per time step) gC/m2 (per time step) to umol/m2/s mg/m3 to umol/mol mgCO2/m3 to umol/mol umol/mol to mg/m3 mg/m2/s to umol/m2/s mgCO2/m2/s to umol/m2/s Usage: new_data = qcutils.convert_units_co2(ds,old_data,old_units,new_units) where ds is a data structure old_data (numpy array) is the data to be converted old_units (string) is the old units new_units (string) is the new units Author: PRI Date: January 2016 """ ts = int(ds.globalattributes["time_step"]) if old_units=="umol/m2/s" and new_units=="gC/m2": new_data = old_data*12.01*ts*60/1E6 elif old_units=="gC/m2" and new_units=="umol/m2/s": new_data = old_data*1E6/(12.01*ts*60) elif old_units in ["mg/m3","mgCO2/m3"] and new_units=="umol/mol": Ta,f,a = GetSeriesasMA(ds,"Ta") ps,f,a = GetSeriesasMA(ds,"ps") new_data = mf.co2_ppmfrommgpm3(old_data,Ta,ps) elif old_units=="umol/mol" and new_units in ["mg/m3","mgCO2/m3"]: Ta,f,a = GetSeriesasMA(ds,"Ta") ps,f,a = GetSeriesasMA(ds,"ps") new_data = mf.co2_mgpm3fromppm(old_data,Ta,ps) elif old_units in ["mg/m2/s","mgCO2/m2/s"] and new_units=="umol/m2/s": new_data = mf.Fc_umolpm2psfrommgpm2ps(old_data) else: msg = " Unrecognised conversion from "+old_units+" to "+new_units log.error(msg) new_data = numpy.ma.array(old_data,copy=True,mask=True) return new_data
11,257
def install_shutdown_handlers(function, override_sigint=True): """Install the given function as a signal handler for all common shutdown signals (such as SIGINT, SIGTERM, etc). If override_sigint is ``False`` the SIGINT handler won't be install if there is already a handler in place (e.g. Pdb) """ signal.signal(signal.SIGTERM, function) if signal.getsignal(signal.SIGINT) == signal.default_int_handler or \ override_sigint: signal.signal(signal.SIGINT, function) # Catch Ctrl-Break in windows if hasattr(signal, "SIGBREAK"): signal.signal(signal.SIGBREAK, function)
11,258
def query_urlhaus(session, provided_ioc, ioc_type): """ """ uri_dir = ioc_type if ioc_type in ["md5_hash", "sha256_hash"]: uri_dir = "payload" api = "https://urlhaus-api.abuse.ch/v1/{}/" resp = session.post(api.format(uri_dir), timeout=180, data={ioc_type: provided_ioc}) ioc_dicts = [] if resp.status_code == 200 and resp.text != "": resp_content = resp.json() if ioc_type == "host": if "urls" not in resp_content.keys() or len(resp_content["urls"]) == 0: ioc_dicts.append({"no data": provided_ioc}) return ioc_dicts for url in resp_content["urls"]: ioc_dict = { "provided_ioc": provided_ioc, "host": resp_content.get("host", None), "firstseen (host)": resp_content.get("firstseen", None), "urlhaus_reference (host)": resp_content.get("urlhaus_reference", None), "url": url.get("url", None), "url_status": url.get("url_status", None), "date_added (url)": url.get("date_added", None), "urlhaus_reference (url)": url.get("urlhaus_reference", None) } if url["tags"] != None: ioc_dict.update({ "tags (url)": ",".join(url.get("tags", None)) }) ioc_dicts.append(ioc_dict) elif ioc_type == "url": if "payloads" not in resp_content.keys() or len(resp_content["payloads"]) == 0: ioc_dicts.append({"invalid": provided_ioc}) return ioc_dicts for payload in resp_content["payloads"]: ioc_dict = { "provided_ioc": provided_ioc, "host": resp_content.get("host", None), "url": resp_content.get("url", None), "url_status": resp_content.get("url_status", None), "date_added (url)": resp_content.get("date_added", None), "urlhaus_reference (url)": resp_content.get("urlhaus_reference", None), "filename (payload)": payload.get("filename", None), "content_type (payload)": payload.get("content_type", None), "response_size (payload)": payload.get("response_size", None), "md5_hash (payload)": payload.get("response_md5", None), "sha256_hash (payload)": payload.get("response_sha256", None), "firstseen (payload)": payload.get("firstseen", None), "signature (payload)": payload.get("signature", None) } if resp_content["tags"] != None: ioc_dict.update({ "tags (url)": ",".join(resp_content.get("tags", None)) }) if payload["virustotal"] != None: ioc_dict.update({ "vt_result (payload)": payload["virustotal"].get("result", None), "vt_link (payload)": payload["virustotal"].get("link", None) }) ioc_dicts.append(ioc_dict) elif ioc_type in ["md5_hash", "sha256_hash"]: if len(resp_content["urls"]) == 0: ioc_dicts.append({"invalid": provided_ioc}) return ioc_dicts for url in resp_content["urls"]: ioc_dict = { "provided_ioc": provided_ioc, "content_type (payload)": resp_content.get("content_type", None), "file_size (payload)": resp_content.get("file_size", None), "md5_hash (payload)": resp_content.get("md5_hash", None), "sha256_hash (payload)": resp_content.get("sha256_hash", None), "firstseen (payload)": resp_content.get("firstseen", None), "lastseen (payload)": resp_content.get("lastseen", None), "signature (payload)": resp_content.get("signature", None), "url": url.get("url", None), "url_status": url.get("url_status", None), "filename (url)": url.get("filename", None), "firstseen (url)": url.get("firstseen", None), "lastseen (url)": url.get("lastseen", None), "urlhaus_reference (url)": url.get("urlhaus_reference", None) } if resp_content["virustotal"] != None: ioc_dict.update({ "vt_result (payload)": resp_content["virustotal"].get("result", None), "vt_link (payload)": resp_content["virustotal"].get("link", None) }) ioc_dicts.append(ioc_dict) return ioc_dicts return [{"invalid": provided_ioc}]
11,259
def arrow_to_json(data): """ Convert an arrow FileBuffer into a row-wise json format. Go via pandas (To be revisited!!) """ reader = pa.ipc.open_file(data) try: frame = reader.read_pandas() return frame.to_json(orient='records') except: raise DataStoreException("Unable to convert to JSON")
11,260
def get_wmc_pathname(subject_id, bundle_string): """Generate a valid pathname of a WMC file given subject_id and bundle_string (to resolve ACT vs noACT). The WMC file contrains the bundle-labels for each streamline of the corresponding tractogram. """ global datadir ACT_string = 'ACT' if bundle_string in noACT_list: ACT_string = 'noACT' try: pathname = next(pathlib.Path(f'{datadir}/sub-{subject_id}/').glob(f'dt-neuro-wmc.tag-{ACT_string}.id-*/classification.mat')) return pathname except StopIteration: print('WMC file not available!') raise FileNotFoundError
11,261
def _union_polygons(polygons, precision = 1e-4, max_points = 4000): """ Performs the union of all polygons within a PolygonSet or list of polygons. Parameters ---------- polygons : PolygonSet or list of polygons A set containing the input polygons. precision : float Desired precision for rounding vertex coordinates. max_points : int The maximum number of vertices within the resulting polygon. Returns ------- unioned : polygon The result of the union of all the polygons within the input PolygonSet. """ polygons = _merge_floating_point_errors(polygons, tol = precision/1000) unioned = gdspy.boolean(polygons, [], operation = 'or', precision = precision, max_points = max_points) return unioned
11,262
def check_aliens_bottom(ai_settings, screen, stats, sb, ship, aliens, bullets): """Verifica se algum alienígena alcançou a parte inferior da tela.""" screen_rect = screen.get_rect() for alien in aliens.sprites(): if alien.rect.bottom >= screen_rect.bottom: # Trata esse caso do mesmo modo que é feito quando a espaçonave é atingida ship_hit(ai_settings, screen, stats, sb, ship, aliens,bullets) break
11,263
def parse_or_none( field: str, field_name: str, none_value: str, fn: Callable[[str, str], T], ) -> Optional[T]: """ If the value is the same as the none value, will return None. Otherwise will attempt to run the fn with field and field name as the first and 2nd arguments. """ if field == none_value: return None try: val = fn(field, field_name) except LineParseError as e: msg = e.message + ( f"\nThe value may also be '{none_value}', which will be" "interpreted as None." ) raise LineParseError(msg) return val
11,264
def get_lattice_parameter(elements, concentrations, default_title): """Finds the lattice parameters for the provided atomic species using Vagars law. :arg elements: A dictionary of elements in the system and their concentrations. :arg title: The default system title. :arg concentrations: The concentrations of each element. """ if elements == None: lat_param = 1.0 title = default_title else: if len(elements) != len(concentrations): raise ValueError("You have provided {} element names when {} elements are present " "in the system. Please provide the correct number of elements." .format(len(elements),len(concentrations))) else: title = "" lat_param = 0 for i in range(len(elements)): lat_param += concentrations[i]*all_elements[elements[i]] if concentrations[i] > 0: title += " {} ".format(elements[i]) lat_param = float(lat_param) / sum(concentrations) title = "{0} {1}\n".format(default_title.strip(),title) return lat_param, title
11,265
def tokenize(text): """ Tokenizes,normalizes and lemmatizes a given text. Input: text: text string Output: - array of lemmatized and normalized tokens """ def is_noun(tag): return tag in ['NN', 'NNS', 'NNP', 'NNPS'] def is_verb(tag): return tag in ['VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ'] def is_adverb(tag): return tag in ['RB', 'RBR', 'RBS'] def is_adjective(tag): return tag in ['JJ', 'JJR', 'JJS'] def penn_to_wn(tag): if is_adjective(tag): return wn.ADJ elif is_noun(tag): return wn.NOUN elif is_adverb(tag): return wn.ADV elif is_verb(tag): return wn.VERB return wn.NOUN tokens = word_tokenize(text.lower()) #split words into tokens and turn thwm into lower case tokens = [w for w in tokens if (w not in stopwords.words("english") and w not in string.punctuation)] # remove stopwords and punctuation tagged_words = pos_tag(tokens) #tag the tokens lemmed = [WordNetLemmatizer().lemmatize(w.lower(), pos=penn_to_wn(tag)) for (w,tag) in tagged_words] #lemmatize the tagged words if len(lemmed) == 0: #no lemmatized word should have zero length return ["error"] return lemmed
11,266
def bandpass_filterbank(bands, fs=1.0, order=8, output="sos"): """ Create a bank of Butterworth bandpass filters Parameters ---------- bands: array_like, shape == (n, 2) The list of bands ``[[flo1, fup1], [flo2, fup2], ...]`` fs: float, optional Sampling frequency (default 1.) order: int, optional The order of the IIR filters (default: 8) output: {'ba', 'zpk', 'sos'} Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or second-order sections ('sos'). Default is 'ba'. Returns ------- b, a : ndarray, ndarray Numerator (b) and denominator (a) polynomials of the IIR filter. Only returned if output='ba'. z, p, k : ndarray, ndarray, float Zeros, poles, and system gain of the IIR filter transfer function. Only returned if output='zpk'. sos : ndarray Second-order sections representation of the IIR filter. Only returned if output=='sos'. """ filters = [] nyquist = fs / 2.0 for band in bands: # remove bands above nyquist frequency if band[0] >= nyquist: raise ValueError("Bands should be below Nyquist frequency") # Truncate the highest band to Nyquist frequency norm_band = np.minimum(0.99, np.array(band) / nyquist) # Compute coefficients coeffs = butter(order / 2, norm_band, "bandpass", output=output) filters.append(coeffs) return filters
11,267
def get_couch_client(https: bool = False, host: str = 'localhost', port: int = 5984, request_adapter: BaseHttpClient = HttpxCouchClient, **kwargs) -> CouchClient: """ Initialize CouchClient Parameters ---------- https: bool = False Schema type. Use https if value is True host: str = 'localhost' CouchDB host port: int = 5984 CouchDB port request_adapter: BaseHttpClient = HttpxCouchClient Http client adapter Returns ------- CouchClient CouchDB API realisation """ schema = 'http' if https: schema += 's' url = f'{schema}://{host}' if port: url += f':{port}' http_client = request_adapter.get_client(url, **kwargs) return CouchClient(http_client=http_client)
11,268
def test_cases( k=(5, 10), batch_size=(3, 16), num_queries=(3, 15, 16), num_candidates=(1024, 128), indices_dtype=(np.str, None), use_exclusions=(True, False)) -> Iterator[Dict[str, Any]]: """Generates test cases. Generates all possible combinations of input arguments as test cases. Args: k: The number of candidates to retrieve. batch_size: The query batch size. num_queries: Number of queries. num_candidates: Number of candidates. indices_dtype: The type of indices. use_exclusions: Whether to test exclusions. Yields: Keyword argument dicts. """ keys = ("k", "batch_size", "num_queries", "num_candidates", "indices_dtype", "use_exclusions") for values in itertools.product(k, batch_size, num_queries, num_candidates, indices_dtype, use_exclusions): yield dict(zip(keys, values))
11,269
def compute_rmse(loss_mse): """ Computes the root mean squared error. Args: loss_mse: numeric value of the mean squared error loss Returns: loss_rmse: numeric value of the root mean squared error loss """ return np.sqrt(2 * loss_mse)
11,270
def __detect_geometric_decomposition(pet: PETGraphX, root: CUNode) -> bool: """Detects geometric decomposition pattern :param pet: PET graph :param root: root node :return: true if GD pattern was discovered """ for child in pet.subtree_of_type(root, NodeType.LOOP): if not (child.reduction or child.do_all): return False for child in pet.direct_children_of_type(root, NodeType.FUNC): for child2 in pet.direct_children_of_type(child, NodeType.LOOP): if not (child2.reduction or child2.do_all): return False return True
11,271
def add_two_frags_together(fragList, atm_list, frag1_id, frag2_id): """Combine two fragments in fragList.""" new_id = min(frag1_id, frag2_id) other_id = max(frag1_id, frag2_id) new_fragList = fragList[:new_id] # copy up to the combined one new_frag = { # combined frag 'ids': fragList[frag1_id]['ids'] + fragList[frag2_id]['ids'], 'syms': fragList[frag1_id]['syms'] + fragList[frag2_id]['syms'], 'grp': new_id, 'chrg': fragList[frag1_id]['chrg'] + fragList[frag2_id]['chrg'], 'mult': fragList[frag1_id]['mult'] + fragList[frag2_id]['mult'] - 1, 'name': fragList[new_id]['name'], } new_frag = add_centroids([new_frag], atm_list) new_fragList.extend(new_frag) # add new frag # add up to removed frag new_fragList.extend(fragList[new_id+1:other_id]) # change rest of values for i in range(other_id+1,len(fragList)): fragList[i]['grp'] = i-1 fragList[i]['name'] = f"frag{i-1}" new_fragList.append(fragList[i]) for i in range(len(new_fragList)): if i != new_fragList[i]["grp"]: print(i, "does not") return new_fragList, new_id
11,272
def load_dataset( file: str, out_dir: str = "/tmp", download: bool = True, url: str = None, labels: str = "labels", verbose: int = 2, ) -> Tuple[ndarray, ndarray, ndarray, ndarray]: """Load Dataset from storage or cloud h5 format Args: file (str): File name or file path if local (tar gzipped, file extension not necessary) out_dir (str, optional): Location to save the dataset (or open if local). Defaults to '/tmp'. download (bool, optional): Whether to download from repo. If false, 'file' should be the path to the tar file. Defaults to 'True'. url (str, optional): URL of cloud storage pointing to file. Defaults to None. labels (str, optional): Key of labels in hdf5 file verbose (int, optional): Verbosity level: 2 is most, 0 is none. Defaults to 2. Returns: Tuple[ndarray, ndarray, ndarray, ndarray]: X, y train, X, y test """ file += ".tar.gz" if not file.endswith(".tar.gz") else "" location = join(out_dir, file) url = ( url if url else f"https://storage.gorchilov.net/datasets/{file.split('/')[-1]}" ) # get from cloud if not exists(location) and download: res = get(url, allow_redirects=True, stream=True) with open(location, "wb") as f: if verbose == 2 and "Content-Length" in head(url).headers: filesize = int(head(url).headers["Content-Length"]) with tqdm( unit="B", unit_scale=True, unit_divisor=1024, total=filesize * 1024, file=stdout, desc=file, ) as progress: for chunk in res.iter_content(chunk_size=1024): datasize = f.write(chunk) progress.update(datasize) else: f.write(res.content) if verbose > 0: print("Finished downloading file") # open tarball tar = tarfile.open(location, "r:gz") # get filenames from tarball files = list(filter(lambda x: x.name[0] != ".", tar.getmembers())) train_filename = join( out_dir, next(filter(lambda x: "train" in x.name, files)).name, ) test_filename = join(out_dir, next(filter(lambda x: "test" in x.name, files)).name) # extract files if not already if not exists(train_filename) or not exists(test_filename): tar.extractall(path=out_dir) if verbose > 0: print("Extracted tarball") tar.close() train_file = File(train_filename, mode="r") test_file = File(test_filename, mode="r") X_train = train_file["data"][:] y_train = train_file[labels][:] train_file.close() X_test = test_file["data"][:] y_test = test_file[labels][:] test_file.close() return (X_train, y_train, X_test, y_test)
11,273
def concatenate(arrays, axis=0): """ Joins a sequence of tensors along an existing axis. Args: arrays: Union[Tensor, tuple(Tensor), list(Tensor)], a tensor or a list of tensors to be concatenated. axis (int, optional): The axis along which the tensors will be joined, if axis is None, tensors are flattened before use. Default is 0. Returns: Tensor, a tensor concatenated from a tensor or a list of tensors. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import mindspore.numpy as np >>> x1 = np.ones((1,2,3)) >>> x2 = np.ones((1,2,1)) >>> x = np.concatenate((x1, x2), axis=-1) >>> print(x.shape) (1, 2, 4) """ array_type = F.typeof(arrays) if _check_is_tensor(array_type): # if the input is a single tensor # if only one tensor is provided, it is treated as a tuple along the # first dimension. For example, a tensor of shape (3,4,5) will be treated # as: tuple(tensor_1(4,5), tensor_2(4,5), tensor_3(4,5)) if axis is None: return ravel(arrays) arr_shape = F.shape(arrays) _check_axes_range((axis,), len(arr_shape)) # move axis 0 to the disiganated position, while keep other axes' relative # positions unchanged new_axes, new_shape = _move_axes_for_concatenate(arr_shape, axis) arrays = transpose(arrays, new_axes) arrays = reshape(arrays, new_shape) return arrays flattened_arrays = () if axis is None: for arr in arrays: flattened_arrays += (ravel(arr),) axis = -1 return P.Concat(axis)(flattened_arrays) arr_shape = F.shape(arrays[0]) _check_axes_range((axis,), len(arr_shape)) # if only one tensor in the tuple/list, return the tensor itself if len(arrays) == 1: return arrays[0] return P.Concat(axis)(arrays)
11,274
def build_hdf5( save_file, video_file, label_file=None, pose_algo=None, batch_size=128, xpix=None, ypix=None, label_likelihood_thresh=0.9, zscore=True): """Build Behavenet-style HDF5 file from video file and optional label file. This function provides a basic example for how to convert raw video and label files into the processed version required by Behavenet. In doing so no additional assumptions are made about a possible trial structure; equally-sized batches are created. For more complex data, users will need to adapt this function to suit their own needs. Parameters ---------- save_file : :obj:`str` absolute file path of new HDF5 file; the directory does not need to be created beforehand video_file : :obj:`str` absolute file path of the video (.mp4, .avi) label_file : :obj:`str`, optional absolute file path of the labels; current formats include DLC/DGP csv or h5 files pose_algo : :obj:`str`, optional 'dlc' | 'dgp' batch_size : :obj:`int`, optional uniform batch size of data xpix : :obj:`int`, optional if not None, video frames will be reshaped before storing in the HDF5 ypix : :obj:`int`, optional if not None, video frames will be reshaped before storing in the HDF5 label_likelihood_thresh : :obj:`float`, optional likelihood threshold used to define masks; any labels/timepoints with a likelihood below this value will be set to NaN zscore : :obj:`bool`, optional individually z-score each label before saving in the HDF5 """ # load video capture video_cap = cv2.VideoCapture(video_file) n_total_frames = video_cap.get(cv2.CAP_PROP_FRAME_COUNT) xpix_og = int(video_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) ypix_og = int(video_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) # load labels if label_file is not None: labels, masks = load_raw_labels( label_file, pose_algo=pose_algo, likelihood_thresh=label_likelihood_thresh) # error check n_total_labels = labels.shape[0] assert n_total_frames == n_total_labels, 'Number of frames does not match number of labels' else: labels = None n_trials = int(np.ceil(n_total_frames / batch_size)) trials = np.arange(n_trials) timestamps = np.arange(n_total_frames) # compute z-score params if label_file is not None and zscore: means = np.nanmean(labels, axis=0) stds = np.nanstd(labels, axis=0) else: means = None stds = None # create directory for hdf5 if it doesn't already exist if not os.path.exists(os.path.dirname(save_file)): os.makedirs(os.path.dirname(save_file)) with h5py.File(save_file, 'w', libver='latest', swmr=True) as f: # single write multi-read f.swmr_mode = True # create image group group_i = f.create_group('images') if label_file is not None: # create labels group (z-scored) group_l = f.create_group('labels') # create label mask group group_m = f.create_group('labels_masks') # create labels group (not z-scored, but downsampled if necessary) group_ls = f.create_group('labels_sc') # create a dataset for each trial within groups for tr_idx, trial in enumerate(trials): # find video timestamps during this trial trial_beg = trial * batch_size trial_end = (trial + 1) * batch_size ts_idxs = np.where((timestamps >= trial_beg) & (timestamps < trial_end))[0] # ---------------------------------------------------------------------------- # image data # ---------------------------------------------------------------------------- # collect from video capture, downsample frames_tmp = get_frames_from_idxs(video_cap, ts_idxs) if xpix is not None and ypix is not None: # Nones to add batch/channel dims frames_tmp = [cv2.resize(f[0], (xpix, ypix))[None, None, ...] for f in frames_tmp] else: frames_tmp = [f[None, ...] for f in frames_tmp] group_i.create_dataset( 'trial_%04i' % tr_idx, data=np.vstack(frames_tmp), dtype='uint8') # ---------------------------------------------------------------------------- # label data # ---------------------------------------------------------------------------- if label_file is not None: # label masks group_m.create_dataset('trial_%04i' % tr_idx, data=masks[ts_idxs], dtype='float32') # label data (zscored, masked) labels_tmp = (labels[ts_idxs] - means) / stds labels_tmp[masks[ts_idxs] == 0] = 0 # pytorch doesn't play well with nans assert ~np.any(np.isnan(labels_tmp)) group_l.create_dataset('trial_%04i' % tr_idx, data=labels_tmp, dtype='float32') # label data (non-zscored, masked) labels_tmp = labels[ts_idxs] labels_tmp = resize_labels(labels_tmp, xpix, ypix, xpix_og, ypix_og) labels_tmp[masks[ts_idxs] == 0] = 0 group_ls.create_dataset('trial_%04i' % tr_idx, data=labels_tmp, dtype='float32')
11,275
def six_nine(): """ For all lockers, they should be open and shut an even number of times leaving all lockers closed (except for a few). Since each locker will be toggled once for each of its factors up to the square root, and again for each factor after the square root, the number of toggles will always be even and all lockers will be shut. Except when we have a square number. Since a square number has an “extra” root, that root will only be toggled once and will not have a corresponding number to “undo” its toggle. This will leave all the square numbers open, and no others. So, that becomes the number of square numbers less than or equal to 100, which is 10 lockers. """ pass
11,276
def get_answer(): """ get answer """ # logger M_LOG.info("get_answer") # exist answer in dict ? if "answer" in gdct_data: # convert to JSON l_json = json.dumps(gdct_data["answer"]) M_LOG.debug("Requested answer: %s", str(l_json)) # remove answer from dict del gdct_data["answer"] # return ok return flask.Response(l_json, status=200, mimetype="application/json") M_LOG.debug("No answer yet...") # return error return flask.Response(status=503)
11,277
def default_todo_data(): """Словарь с данными пользователя поумолчанию""" return {"title": "Молоко", "description": "Купить молоко в Ашане 200 литров", "created_datetime": "2041-08-12T00:00:00.000Z"}
11,278
def generate_new_key(access_key, secret_key, user_to_rotate): """generates a new key pair and returns the access key and secret key""" LOGGER.info("Begin generate new key") iam_client = boto3.client('iam', aws_access_key_id=access_key, aws_secret_access_key=secret_key) resp = iam_client.create_access_key(UserName=user_to_rotate) LOGGER.debug(resp) LOGGER.info("End generate new key") return resp['AccessKey']['AccessKeyId'].strip(), resp['AccessKey']['SecretAccessKey'].strip()
11,279
def withCHID(fcn): """decorator to ensure that first argument to a function is a Channel ID, ``chid``. The test performed is very weak, as any ctypes long or python int will pass, but it is useful enough to catch most accidental errors before they would cause a crash of the CA library. """ # It may be worth making a chid class (which could hold connection # data of _cache) that could be tested here. For now, that # seems slightly 'not low-level' for this module. def wrapper(*args, **kwds): "withCHID wrapper" if len(args)>0: chid = args[0] args = list(args) if isinstance(chid, int): args[0] = chid = dbr.chid_t(args[0]) if not isinstance(chid, dbr.chid_t): msg = "%s: not a valid chid %s %s args %s kwargs %s!" % ( (fcn.__name__, chid, type(chid), args, kwds)) raise ChannelAccessException(msg) return fcn(*args, **kwds) wrapper.__doc__ = fcn.__doc__ wrapper.__name__ = fcn.__name__ wrapper.__dict__.update(fcn.__dict__) return wrapper
11,280
def current_user_get(): """ユーザー情報取得 user info get Returns: Response: HTTP Respose """ app_name = multi_lang.get_text("EP020-0001", "ユーザー情報:") exec_stat = multi_lang.get_text("EP020-0017", "取得") error_detail = "" try: globals.logger.debug('#' * 50) globals.logger.debug('CALL {}'.format(inspect.currentframe().f_code.co_name)) globals.logger.debug('#' * 50) ret_user = user_get() return jsonify({"result": "200", "info": ret_user}), 200 except common.UserException as e: return common.server_error_to_message(e, app_name + exec_stat, error_detail) except Exception as e: return common.server_error_to_message(e, app_name + exec_stat, error_detail)
11,281
def asin(a: Dual) -> Dual: """inverse of sine or arcsine of the dual number a, using math.asin(x)""" if abs(a.value) >= 1: raise ValueError('Arcsin cannot be evaluated at {}.'.format(a.value)) value = np.arcsin(a.value) ders = dict() for k,v in a.ders.items(): ders[k] = 1/(np.sqrt(1-a.value**2))*v return Dual(value, ders)
11,282
def hydrotopeQ(cover,hydrotopemap): """Get mean values of the cover map for the hydrotopes""" grass.message(('Get mean hydrotope values for %s' %cover)) tbl = grass.read_command('r.univar', map=cover, zones=hydrotopemap, flags='gt').split('\n')[:-1] #:-1 as last line hast line break] tbl = [tuple(l.split('|')) for l in tbl] tbl = np.array(tbl[1:], dtype=list(zip(tbl[0],['S250']*len(tbl[0])))) tbl = np.array(list(zip(tbl['zone'],tbl['mean'])), dtype=[('cat',np.int64),('mean',np.float64)]) return tbl[np.isfinite(tbl['mean'])]
11,283
def write_genfile(h1, he4, n14, qb, acc_mult, numerical_params, geemult, path, header, qnuc, t_end, accdepth, accrate0, accmass, lumdata=0, accrate1_str='', nuc_heat=False, setup_test=False, substrate_off=True): """======================================================== Creates a model generator file with the given params inserted ======================================================== numerical_params : {} dict specifying misc. kepler params (see grids/config/default.ini) h1 = flt : hydrogen mass fraction he4 = flt : helium " " z = flt : metals " " qb = flt : base heating (MeV/nucleon) qnuc = flt : nuclear heating (MeV/nucleon, for thermal setup) lumdata = int : switch for time-dependent base-flux (0/1 = off/on) accrate0 = flt : accretion rate at model start (as fraction of Eddington), for getting profile into equilibrium (essentially setting base flux) accrate1_str = str : optional string to redefine accrate (-1 = time-dependent) path = str : target directory for generator file ========================================================""" genpath = os.path.join(path, 'xrb_g') qnuc_str1 = '' qnuc_str2 = '' kill_setup = '' bmasslow = 'p bmasslow 2.8000000199990d33' if nuc_heat: qnuc_str1 = f""" c Convert qnuc from MeV/nucleon to erg/g, then to erg/s (with accrate) c (Note accrate is in Msun/yr) o qnuc {qnuc:.2f} def o qnuc {{1.602e-6}} * o qnuc {{accrate}} * o qnuc {{1.99e33 * 5.979e23 / 3.156e7}} * p xheatl {{qnuc}} p xheatym 1.e21 p xheatdm 2.e20""" qnuc_str2 = "p xheatl 0." if setup_test: kill_setup = "end\n" if not substrate_off: bmasslow = '' with open(genpath, 'w') as f: f.write(f"""c ============================================== c {header} c ============================================== net 1 h1 he3 he4 n14 c12 o16 ne20 mg24 net 1 si28 s32 ar36 ca40 ti44 cr48 fe52 net 1 ni56 fe54 pn1 nt1 m nstar 1.00 fe54 c He star abundances m acret {h1:.4f} h1 {he4:.4f} he4 0.0 c12 {n14:.4f} n14 0. o16 c THIS GRID FOR He ACCRETION g 0 2.0000e25 1 nstar 4.0e+8 1.0e+9 g 1 1.9000e25 1 nstar 4.0e+8 1.0e+9 g 40 1.0000e22 1 nstar 4.0e+8 1.0e+8 g 50 1.0000e21 1 nstar 4.0e+8 1.0e+8 g 51 8.0000e20 1 acret 2.0e+8 1.0e+8 g 54 2.0000e20 1 acret 1.0e+8 1.0e+6 g 55 0. 1 acret 5.0e+7 1.0e+4 dstat genburn rpabg mapburn p geemult {geemult:.5f} p 1 1.e-4 p 5 40 p 6 .05 p 7 .05 p 8 .10 p 9 .1 p 10 .99 p 14 1000000 p 16 100000 p 18 10 p 28 2 p 39 50. p 40 2. p 46 .15 p 47 3.e-3 p 48 1. p 49 1.e+50 p thickfac {numerical_params['thickfac']:.2f} p 53 .1 p 54 2. p 55 10. p 59 .05 p 60 1.0e+06 p 61 2.8e+33 p 62 1.6e+34 p 65 1.0e+99 p 70 1.e+99 p 73 1.e+99 p 75 1.e+99 p 80 .25 p 82 1.e+6 p 83 1.e+4 p 84 2.e-5 p 86 0 p 87 0 p 93 51 p 88 1.e+14 p 105 3.e+9 p 132 6 p 138 .33 p 139 .5 p 144 1.3 p 146 .0 p 147 .0 p 148 0. p 150 .01 p 151 .015 p 152 .03 p 156 100 p 159 5 p 160 0 p 189 .02 p 206 .003 p 42 14001000 p 199 -1. p 388 1 p 377 0 p 233 1.1e8 p 299 100000 p 265 -1 c p 425 0. p 64 1 p 405 -1.d0 p 406 -1.d0 p 420 -1.d0 p 64 1 p 434 1 p 443 2 c p 419 2.80000019999895D33 {bmasslow} p 147 1. p 146 1. p 233 1.d7 p 65 1.d7 p 211 1.75d-9 p minzone {numerical_params['minzone']:.0f} p 119 40 p 132 4 p 336 1.5d19 p 445 1.d20 p 437 10 p 376 1 p 11 1.d-8 p 12 1.d-8 p 128 1.d-4 p 137 1 c no h/he burn dumps p 454 -1. p 456 -1. # Overwrite hard-coded rates with bdat p ibdatov {numerical_params['ibdatov']:.0f} c======================================================================= c Now follows the command file c======================================================================= //* c .... accretion rate 1.75D-8 (L/Ledd) * 1.7/(X + 1) p accrate 1.75D-8 c .... substrate luminosity - accrate * 6.0816737e+43 * (Q/MeV) c .... 1.0642929e+36 (L/Ledd) * (Q/MeV) p xlum0 1.0642929e+36 c ------------------------- c substrate L, Q/MeV p xlum0 {qb:.6f} * c ....... c ..... SCALE to He/C/O L_Edd accretion: factor 1.7 / (X + 1) c --- h mass fraction --- o x {h1:.6f} def # o xeddf {{1.7 / (1. + x)}} def # p accrate {{xeddf}} * # p xlum0 {{xeddf}} * c set fraction of Eddington accretion rate o xledd {accrate0:.6f} def p accrate {{xledd}} * p xlum0 {{xledd}} * c apply anisotropy multiplier/factor o accmult {acc_mult:.6f} def p accrate {{accmult}} * p xlum0 {{accmult}} * c ------------------------- c get model in equilibrium p ncnvout 0 p nstop 1000000000 p tnucmin 1.d10 p tnumin 1.d7 p accmass 1.d13 p optconv 0.67 p 521 0 p 520 1 c for APPROX ONLY p jp0 0 p 132 4 c plot refresh time (s) p ipdtmin 0 c plot c ================================ c MATCH TO accmass @xm(jm)<{accmass:.4e} p 52 20 p accdepth 1.d99 p iterbarm 999999 c ......................... {qnuc_str1} c ================================= @time>1.d17 {qnuc_str2} p ncnvout {numerical_params['cnv']} c overwrites accreted composition (if need to change) compsurb {n14:.6f} n14 {he4:.6f} he4 {h1:.6f} h1 p xlum0 1. * p lumdata {lumdata} {accrate1_str} c multiplier (only on time-dependent files!) p accratef {acc_mult:.6f} p xl0ratef {acc_mult:.6f} c use accdepth 5.d20 for He c use accdepth 1.d20 for H p accdepth {accdepth:.4e} mapsurfb p ibwarn 0 zerotime p toffset 0. setcycle 0 cutbin resetacc d # p lburn {numerical_params['lburn']} p 1 1. p 521 1 p tnucmin 1.d7 p 86 1 p 87 1 p 452 0 p zonermin {numerical_params['zonermin']:.4f} p zonermax {numerical_params['zonermax']:.4f} p zonemmax 1.d99 p ddmin 1.d4 c decretion p decrate -1.D0 p idecmode 1 p jshell0 0 p ipup 5 c some other stuff c p 69 5.d18 p pbound {{6.67259e-8 * zm(0) * xm(0) / (4. * 3.14159 * rn(0) ^ 4 ) * 0.5}} p 132 11 p nsdump {numerical_params['nsdump']:.0f} p nstop {numerical_params['nstop']:.0f} p abunlim 0.01 {kill_setup} @time>{t_end:.4e} d # end""")
11,284
def convert_to_seconds(duration_str): """ return duration in seconds """ seconds = 0 if re.match(r"[0-9]+$", duration_str): seconds = int(duration_str) elif re.match(r"[0-9]+s$", duration_str): seconds = int(duration_str[:-1]) elif re.match(r"[0-9]+m$", duration_str): seconds = 60 * int(duration_str[:-1]) elif re.match(r"[0-9]+h$", duration_str): seconds = 3600 * int(duration_str[:-1]) elif re.match(r"[0-9]+d$", duration_str): seconds = 84600 * int(duration_str[:-1]) return seconds
11,285
def iter_extensions(extension): """ Depth-first iterator over sub-extensions on `extension`. """ for _, ext in inspect.getmembers(extension, is_extension): for item in iter_extensions(ext): yield item yield ext
11,286
def get_chunk_n_rows(row_bytes: int, working_memory: Num, max_n_rows: int = None) -> int: """Calculates how many rows can be processed within working_memory Parameters ---------- row_bytes : int The expected number of bytes of memory that will be consumed during the processing of each row. working_memory : int or float, optional The number of rows to fit inside this number of MiB will be returned. max_n_rows : int, optional The maximum return value. Returns ------- int or the value of n_samples Warns ----- Issues a UserWarning if ``row_bytes`` exceeds ``working_memory`` MiB. """ chunk_n_rows = int(working_memory * (2 ** 20) // row_bytes) if max_n_rows is not None: chunk_n_rows = min(chunk_n_rows, max_n_rows) if chunk_n_rows < 1: # Could not adhere to working_memory config. chunk_n_rows = 1 return chunk_n_rows
11,287
def elasticsearch_ispartial_log(line): """ >>> line1 = ' [2018-04-03T00:22:38,048][DEBUG][o.e.c.u.c.QueueResizingEsThreadPoolExecutor] [search17/search]: there were [2000] tasks in [809ms], avg task time [28.4micros], EWMA task execution [790nanos], [35165.36 tasks/s], optimal queue is [35165], current capacity [1000]' >>> line2 = ' org.elasticsearch.ResourceAlreadyExistsException: index [media_corpus_refresh/6_3sRAMsRr2r63J6gbOjQw] already exists' >>> line3 = ' at org.elasticsearch.cluster.metadata.MetaDataCreateIndexService.validateIndexName(MetaDataCreateIndexService.java:151) ~[elasticsearch-6.2.0.jar:6.2.0]' >>> elasticsearch_ispartial_log(line1) False >>> elasticsearch_ispartial_log(line2) True >>> elasticsearch_ispartial_log(line3) True """ match_result = [] for p in LOG_BEGIN_PATTERN: if re.match(p, line) != None: return False return True
11,288
def find_signal_analysis(prior, sparsity, sigma_data): """ Generates a signal using an analytic prior. Works only with square and overcomplete full-rank priors. """ N, L = prior.shape k = np.sum(np.random.random(L) > (1 - sparsity)) V = np.zeros(shape=(L, L - k)) while np.linalg.matrix_rank(V) != L - k: s = np.random.permutation(N) V = prior[s[:L - k]] x = np.random.normal(scale=sigma_data, size=(L)) x / np.linalg.norm(x) x -= np.linalg.pinv(V) @ V @ x return x
11,289
def download_images(sorted_urls) -> List: """Download images and convert to list of PIL images Once in an array of PIL.images we can easily convert this to a PDF. :param sorted_urls: List of sorted URLs for split financial disclosure :return: image_list """ async def main(urls): image_list = [] loop = asyncio.get_event_loop() futures = [loop.run_in_executor(None, requests.get, url) for url in urls] for response in await asyncio.gather(*futures): image_list.append(response.content) return image_list loop = asyncio.get_event_loop() image_list = loop.run_until_complete(main(sorted_urls)) return image_list
11,290
def get_molecules(struct, bonds_kw={"mult":1.20, "skin":0.0, "update":False}, ret="idx"): """ Returns the index of atoms belonging to each molecule in the Structure. """ bonds = struct.get_bonds(**bonds_kw) ## Build connectivity matrix graph = np.zeros((struct.geometry.shape[0],struct.geometry.shape[0])) for atom_idx,bonded_idx_list in enumerate(bonds): for bond_idx in bonded_idx_list: graph[atom_idx][bonded_idx_list] = 1 graph = csr_matrix(graph) n_components, component_list = connected_components(graph) molecule_idx_list = [np.where(component_list == x)[0] for x in range(n_components)] if ret == "idx": return molecule_idx_list elif ret == "struct": ## Returns list of structures geo = struct.get_geo_array() ele = struct.geometry["element"] molecule_struct_list = [] for idx,entry in enumerate(molecule_idx_list): mol_geo = geo[entry] mol_ele = ele[entry] mol = Structure.from_geo(mol_geo,mol_ele) mol.struct_id = "{}_molecule_{}".format(struct.struct_id, idx) molecule_struct_list.append(mol) return molecule_struct_list else: ## Returns list of structures geo = struct.get_geo_array() ele = struct.geometry["element"] molecule_struct_dict = {} for idx,entry in enumerate(molecule_idx_list): mol_geo = geo[entry] mol_ele = ele[entry] mol = Structure.from_geo(mol_geo,mol_ele) mol.struct_id = "{}_molecule_{}".format(struct.struct_id, idx) molecule_struct_dict[mol.struct_id] = mol return molecule_struct_dict
11,291
def cdivs(a,b,c,d,e,f,al1,al2,al3,x11,x21,x22,x23,x31,x32,x33): """Finds the c divides conditions for the symmetry preserving HNFs. Args: a (int): a from the HNF. b (int): b from the HNF. c (int): c from the HNF. d (int): d from the HNF. e (int): e from the HNF. f (int): f from the HNF. al1 (numpy.array): array of alpha1 values from write up. al2 (numpy.array): array of alpha2 values from write up. al3 (numpy.array): array of alpha3 values from write up. x11 (numpy.array): array of pg values for x(1,1) spot. x21 (numpy.array): array of pg values for x(2,1) spot. x22 (numpy.array): array of pg values for x(2,2) spot. x23 (numpy.array): array of pg values for x(2,3) spot. x31 (numpy.array): array of pg values for x(3,1) spot. x32 (numpy.array): array of pg values for x(3,2) spot. x33 (numpy.array): array of pg values for x(3,3) spot. Returns: HNFs (list of lists): The symmetry preserving HNFs. """ HNFs = [] if np.allclose(x23,0): if b == None: # find the b values, d and e still unkown if not np.allclose(al3, 0): N=0 at = al3[np.nonzero(al3)] val = np.unique(N*c/at) while any(abs(val) <c): for v in val: if v < c and v >= 0 and np.allclose(v%1==0): b = v c1 = a*x21 + b*(x22-al1-x11) c2 =(-b*al2) if np.allclose(c1%c,0) and np.allclose(c2%c,0): be1 = c1/c be2 =c2/c tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33) for t in tHNFs: HNFs.append(t) N += 1 val = np.unique(N*c/at) elif not np.allclose(al2,0): N=0 at = al2[np.nonzero(al2)] val = np.unique(N*c/at) while any(abs(val) <c): for v in val: if v < c and v>=0 and np.allclose(v%1,0): b = v c1 = a*x21 + b*(x22-al1-x11) c3 =(-b*al3) if np.allclose(c1%c,0) and np.allclose(c3%c,0): be1 = c1/c be2 =-b*al2/c tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33) for t in tHNFs: HNFs.append(t) N += 1 val = np.unique(N*c/at) else: if not np.allclose((x22-x11-al1),0): N=0 xt = (x22-x11-al1) xt = xt[np.nonzero(xt)] val = np.unique(np.reshape(np.outer(N*c-a*x21,1/xt),len(x21)*len(xt))) while any(abs(val) <c): for v in val: if v < c and v>=0 and np.allclose(v%1,0): b = v c2 = -b*al2 c3 =(-b*al3) if np.allclose(c2%c,0) and np.allclose(c3%c,0): be1 = (a*x21+b*(x22-x11-al1))/c be2 =-b*al2/c tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33) for t in HNFs: HNFs.append(t) N += 1 xt = (x22-x11-al1) xt = xt[np.nonzero(xt)] val = np.unique(np.reshape(np.outer(N*c-a*x21,1/xt),len(x21)*len(xt))) else: c1 = a*x21 c2 = 0 c3 = 0 if np.allclose(c1%c,0) and np.allclose(c2%c,0) and np.allclose(c3%c,0): tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33) for t in HNFs: HNFs.append(t) else: c1 = a*x21 + b*(x22-al1-x11) c2 = (-b*al2) c3 = (-b*a13) if np.allclose(c1%c,0) and np.allclose(c2%c,0) and np.allclose(c3%c,0): tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33) for t in HNFs: HNFs.append(t) else: if np.allclose(al3,0): if np.allclose((f*x23)%c,0): if b == None and e == None and d == None: if np.allclose(al3,0) and np.allclose(al2,0) and np.allclose(al3,0): N = 0 xt = x23[np.nonzero(x23)] val = np.unique(N*c/xt) while any(abs(val)<f): for v in val: if v <f and v>=0 and np.allclose(v%1,0): e = v for b in range(c): N2 =0 xt = x23[np.nonzero(x23)] val2 = np.unique(np.reshape(np.outer((N2*c-a*x21-b*(x22-x11)),1/xt),len(x22)*len(xt))) while any(abs(val2)<f): for v2 in val2: if v2 <f and v2>=0 and np.allclose(v2%1,0): d = v2 be1 = (a*x21+b*(x22-x11)+d*x23)/c be2 = e*x23/c tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33) for t in tHNFs: HNFs.appned(t) N2 += 1 xt = x23[np.nonzero(x23)] val2 = np.unique(np.reshape(np.outer((N2*c-a*x21-b*(x22-x11)),1/xt),len(x22)*len(xt))) N += 1 val = np.unique(N*c/xt) elif not np.allclose(al3,0): N = max(np.round(f*x23/c)) at = al3[np.nonzero(al3)] val = np.unique(np.reshape(np.outer(-N*c+f*x23,1/at),len(x23)*len(al3))) while any(abs(val) < c): for v in val: if v < c and v>=0 and np.allclose(v%1,0): b = v N2 = min(np.round(-b*al2/c)) xt = x23[np.nonzero(x23)] val2 = np.unique(np.reshape(np.outer(N2*c+b*al2,1/xt),len(xt)*len(al2))) while any(abs(val2)<f): for v2 in val2: if v2 <f and v2>=0 and np.allclose(v2%1,0): e = v2 N3 = min(np.round((a*x21+b*(x22-x11-al1))/c)) xt = x23[np.nonzero(x23)] val3 = np.unique(np.reshape(np.outer(N3*c-a*x21-b*(x22-x11-al1),1/xt),len(xt)*len(x22))) while any(abs(val2)<f): for v3 in val3: if v3 <f and v3>=0 and np.allclose(v3%1,0): d = v3 be1 = (a*x21+b*(x22-x11-al1)+d*x23)/c be2 = (e*x32-b*al2)/c tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33) for t in tHNFs: HNFs.append(t) N3 += 1 xt = x23[np.nonzero(x23)] val3 = np.unique(np.reshape(np.outer(N3*c-a*x21-b*(x22-x11-al1),1/xt),len(xt)*len(x22))) N2 += 1 xt = x23[np.nonzero(x23)] val2 = np.unique(np.reshape(np.outer(N2*c+b*al2,1/xt),len(x22)*len(xt))) N -= 1 at = al3[np.nonzero(al3)] val = np.unique(np.reshape(np.outer(-N*c+f*x23,1/at),len(x23)*len(at))) else: for b in range(c): N2 = min(np.round(-b*al2/c)) xt = x23[np.nonzero(x23)] val2 = np.unique(np.reshape(np.outer(N2*c+b*al2,1/xt),len(xt)*len(al2))) while any(abs(val2)<f): for v2 in val2: if v2 <f and v2 >= 0 and np.allclose(v2%1,0): e = v2 N3 = min(np.round((a*x21+b*(x22-x11-al1))/c)) xt = x23[np.nonzero(x23)] val3 = np.unique(np.reshape(np.outer(N3*c-a*x21-b*(x22-x11-al1),1/xt),len(x22)*len(xt))) while any(abs(val2)<f): for v3 in val3: if v3 <f and v3 >= 0 and np.allclose(v3%1,0): d = v3 be1 = (a*x21+b*(x22-x11-al1)+d*x23)/c be2 = (e*x32-b*al2)/c tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33) for t in tHNFs: HNFs.append(t) N3 += 1 xt = x23[np.nonzero(x23)] val3 = np.unique(np.reshape(np.outer(N3*c-a*x21-b*(x22-x11-al1),1/xt),len(xt)*len(x22))) N2 += 1 xt = x23[np.nonzero(x23)] val2 = np.unique(np.reshape(np.outer(N2*c+b*al2,1/xt),len(al2)*len(xt))) elif b == None: if not np.allclose(al3,0): N = max(np.round(f*x23/c)) at = al3[np.nonzero(al3)] val = np.unique(np.reshape(np.outer(-N*c+f*x23,1/at),len(x23)*len(at))) while any(abs(val) < c): for v in val: if v < c and v>= 0 and np.allclose(v%1,0): b = v c1 = a*x21+b*(x22-x11-al1)+d*x23 c2 = -b*al2+e*x23 if np.allclose(c1%c,0) and np.allclose(c2%c,0): be1 = c1/c be2 = c2/c tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33) for t in tHNFs: HNFs.append(t) N -= 1 at = al3[np.nonzero(al3)] val = np.unique(np.reshape(np.outer(-N*c+f*x23,1/at),len(x23)*len(at))) elif not np.allclose(al2,0): N = max(np.round(e*x23/c)) at = al2[np.nonzero(al2)] val = np.unique(np.reshape(np.outer(-N*c+e*x23,1/at),len(x23)*len(at))) while any(abs(val) < c): for v in val: if v < c and v>= 0 and np.allclose(v%1,0): b = v c1 = a*x21+b*(x22-x11-al1)+d*x23 c2 = -b*al2+e*x23 if np.allclose(c1%c,0) and np.allclose(c2%c,0): be1 = c1/c be2 = c2/c tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33) for t in tHNFs: HNFs.append(t) N -= 1 at = al2[np.nonzero(al2)] val = np.unique(np.reshape(np.outer(-N*c+e*x23,1/at),len(x23)*len(at))) else: if not np.allclose((x22-x11-al1),0): N = min(np.round((a*x21-d*x23)/c)) xt = (x22-x11-al1) xt = xt[np.nonzero(xt)] val = np.unique(np.reshape(np.outer(N*c-a*x21sd*x23,1/xt),len(x23)*len(xt))) while any(abs(val) < c): for v in val: if v < c and v>=0 and np.allclose(v%1,0): b = v c1 = a*x21+b*(x22-x11-al1)+d*x23 c2 = -b*al2+e*x23 if np.allclose(c1%c,0) and np.allclose(c2%c,0): be1 = c1/c be2 = c2/c tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33) for t in tHNFs: HNFs.append(t) N += 1 xt = (x22-x11-al1) xt = xt[np.nonzero(xt)] val = np.unique(np.reshape(np.outer(N*c-a*x21sd*x23,1/xt),len(x23)*len(xt))) else: c1 = a*x21+d*x23 c2 = e*x23 c3 = f*x23 if np.allclose(c1%c,0) and np.allclose(c2%c,0) and np.allclose(c3%c,0): tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33) for t in tHNFs: HNFs.append(t) elif d == None and e == None: N2 = min(np.round(-b*al2/c)) xt = x23[np.nonzero(x23)] val2 = np.unique(np.reshape(np.outer(N2*c+b*al2,1/xt),len(xt)*len(al2))) while any(abs(val2)<f): for v2 in val2: if v2 <f and v2>=0 and np.allclose(v2%1,0): e = v2 N3 = min(np.round((a*x21+b*(x22-x11-al1))/c)) xt = x23[np.nonzero(x23)] val3 = np.unique(np.reshape(np.outer(N3*c-a*x21-b*(x22-x11-al1),1/xt),len(x22)*len(xt))) while any(abs(val3)<f): for v3 in val3: if v3 <f and v3>=0 and np.allclose(v3%1,0): d = v3 be1 = (a*x21+b*(x22-x11-al1)+d*x23)/c be2 = (e*x32-b*al2)/c tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33) for t in tHNFs: HNFs.append(t) N3 += 1 xt = x23[np.nonzero(x23)] val3 = np.unique(np.reshape(np.outer(N3*c-a*x21-b*(x22-x11-al1),1/xt),len(x22)*len(xt))) N2 += 1 xt = x23[np.nonzero(x23)] val2 = np.unique(np.reshape(np.outer(N2*c+b*al2,1/xt),len(xt)*len(al2))) else: c1 = a*x21+b*(x22-al1-x11)+d*x23 c2 = -b*al2+e*x23 c3 = -b*al3+f*x23 if np.allclose(c1%c,0) and np.allclose(c2%c,0) and np.allclose(c3%c,0): be1 = c1/c be2 = c2/c tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33) for t in tHNFs: HNFs.append(t) # else: # print("f: ",f) # print("c: ",c) # print("x32: ",x32) # print("failed f*x32/c") else: if b==None and d==None and e==None: N = max(np.round(f*x23/c)) at = al3[np.nonzero(al3)] val = np.unique(np.reshape(np.outer(-N*c+f*x23,1/at),len(x23)*len(at))) while any(abs(val) < c): for v in val: if v < c and v>= 0 and np.allclose(v%1,0): b = v N2 = min(np.round(-b*al2/c)) xt = x23[np.nonzero(x23)] val2 = np.unique(np.reshape(np.outer(N2*c+b*al2,1/xt),len(xt)*len(al2))) while any(abs(val2)<f): for v2 in val2: if v2 <f and v2>=0 and np.allclose(v2%1,0): e = v2 N3 = min(np.round((a*x21+b*(x22-x11-al1))/c)) xt = x23[np.nonzero(x23)] val3 = np.unique(np.reshape(np.outer(N3*c-a*x21-b*(x22-x11-al1),1/xt),len(x22)*len(xt))) while any(abs(val3)<f): for v3 in val3: if v3 <f and v3>=0 and np.allclose(v3%1,0): d = v3 c1 = a*x21+b*(x22-x11-al1)+d*x23 c2 = -b*al2+e*x23 if np.allclose(c1%c,0) and np.allclose(c2%c,0): be1 = c1/c be2 = c2/c tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33) for t in tHNFs: HNFs.append(t) N3 += 1 xt = x23[np.nonzero(x23)] val3 = np.unique(np.reshape(np.outer(N3*c-a*x21-b*(x22-x11-al1),1/xt),len(x22)*len(xt))) N2 += 1 xt = x23[np.nonzero(x23)] val2 = np.unique(np.reshape(np.outer(N2*c+b*al2,1/xt),len(xt)*len(al2))) N -= 1 at = al3[np.nonzero(al3)] val = np.unique(np.reshape(np.outer(-N*c+f*x23,1/at),len(x23)*len(at))) elif b==None: N = max(np.round(f*x23/c)) at = al3[np.nonzero(al3)] val = np.unique(np.reshape(np.outer(-N*c+f*x23,1/at),len(x23)*len(at))) while any(abs(val) < c): for v in val: if v < c and v>= 0 and np.allclose(v%1,0): b = v c1 = a*x21+b*(x22-x11-al1)+d*x23 c2 = -b*al2+e*x23 if np.allclose(c1%c,0) and np.allclose(c2%c,0): be1 = c1/c be2 = c2/c tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33) for t in tHNFs: HNFs.append(t) N -= 1 at = al3[np.nonzero(al3)] val = np.unique(np.reshape(np.outer(-N*c+f*x23,1/at),len(x23)*len(at))) elif d==None and e==None: N2 = min(np.round(-b*al2/c)) xt = x23[np.nonzero(x23)] val2 = np.unique(np.reshape(np.outer(N2*c+b*al2,1/xt),len(xt)*len(al2))) while any(abs(val2)<f): for v2 in val2: if v2 <f and v2>=0 and np.allclose(v2%1,0): e = v2 N3 = min(np.round((a*x21+b*(x22-x11-al1))/c)) xt = x23[np.nonzero(x23)] val3 = np.unique(np.reshape(np.outer(N3*c-a*x21-b*(x22-x11-al1),1/xt),len(x22)*len(xt))) while any(abs(val3)<f): for v3 in val3: if v3 <f and v3>=0 and np.allclose(v3%1,0): d = v3 c1 = a*x21+b*(x22-x11-al1)+d*x23 c2 = -b*al2+e*x23 if np.allclose(c1%c,0) and np.allclose(c2%c,0): be1 = c1/c be2 = c2/c tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33) for t in tHNFs: HNFs.append(t) N3 += 1 xt = x23[np.nonzero(x23)] val3 = np.unique(np.reshape(np.outer(N3*c-a*x21-b*(x22-x11-al1),1/xt),len(x22)*len(xt))) N2 += 1 xt = x23[np.nonzero(x23)] val2 = np.unique(np.reshape(np.outer(N2*c+b*al2,1/xt),len(xt)*len(al2))) else: be1 = c1/c be2 = c2/c tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33) for t in tHNFs: HNFs.append(t) return HNFs
11,292
def numeric_field_list(model_class): """Return a list of field names for every numeric field in the class.""" def is_numeric(type): return type in [BigIntegerField, DecimalField, FloatField, IntegerField, PositiveIntegerField, PositiveSmallIntegerField, SmallIntegerField] fields = [] for (field, type) in field_list(model_class): if is_numeric(type): fields += [field] return fields
11,293
def _get_lambda_source_code(lambda_fn, src): """Attempt to find the source code of the ``lambda_fn`` within the string ``src``.""" def gen_lambdas(): def gen(): yield src + "\n" g = gen() step = 0 tokens = [] for tok in tokenize.generate_tokens(getattr(g, "next", getattr(g, "__next__", None))): if step == 0: if tok[0] == tokenize.NAME and tok[1] == "lambda": step = 1 tokens = [tok] level = 0 elif step == 1: if tok[0] == tokenize.NAME: tokens.append(tok) step = 2 else: step = 0 elif step == 2: if tok[0] == tokenize.OP and tok[1] == ":": tokens.append(tok) step = 3 else: step = 0 elif step == 3: if level == 0 and (tok[0] == tokenize.OP and tok[1] in ",)" or tok[0] == tokenize.ENDMARKER): yield tokenize.untokenize(tokens).strip() step = 0 else: tokens.append(tok) if tok[0] == tokenize.OP: if tok[1] in "[({": level += 1 if tok[1] in "])}": level -= 1 assert not tokens actual_code = lambda_fn.__code__.co_code for lambda_src in gen_lambdas(): try: fn = eval(lambda_src, globals(), locals()) if fn.__code__.co_code == actual_code: return lambda_src.split(":", 1)[1].strip() except Exception: pass return "<lambda>"
11,294
def retrieve_keycloak_public_key_and_algorithm(token_kid: str, oidc_server_url: str) -> (str, str): """ Retrieve the public key for the token from keycloak :param token_kid: The user token :param oidc_server_url: Url of the server to authorize with :return: keycloak public key and algorithm """ handle = f'{oidc_server_url}/protocol/openid-connect/certs' logger.info(f'Getting public key for the kid={token_kid} from the keycloak...') r = requests.get(handle) if r.status_code != 200: error = "Could not get certificates from the keycloak. " \ "Reason: [{}]: {}".format(r.status_code, r.text) logger.error(error) raise ValueError(error) try: json_response = r.json() except Exception: error = "Could not retrieve the public key. " \ "Got unexpected response: '{}'".format(r.text) logger.error(error) raise ValueError(error) try: matching_key = next((item for item in json_response.get('keys') if item['kid'] == token_kid), None) matching_key_json = json.dumps(matching_key) public_key = RSAAlgorithm.from_jwk(matching_key_json) except Exception as e: error = f'Invalid public key!. Reason: {e}' logger.error(error) raise ValueError(error) logger.info(f'The public key for the kid={token_kid} has been fetched.') return matching_key.get('alg'), public_key
11,295
def read_conformations(filename, version="default", sep="\t", comment="#", encoding=None, mode="rb", **kw_args): """ Extract conformation information. Parameters ---------- filename: str Relative or absolute path to file that contains the RegulonDB information. Returns ------- """ kw_args["mode"] = mode kw_args["encoding"] = encoding conformations = list() with open_file(filename, **kw_args) as (file_h, ext): iter_rowset = FILE_PARSERS.get(ext, iter_rowset_flat_file) for row in iter_rowset(file_h): tf_id = row["transcription_factor_id"] try: t_factor = elem.TranscriptionFactor[tf_id, version] except KeyError: LOGGER.warn("unknown transcription factor %s", tf_id) LOGGER.warn("Please parse transcription factor information before"\ " parsing conformations.") continue conf = elem.Conformation( unique_id=row["conformation_id"], name_space=version, tf=t_factor, state=row["final_state"], interaction=row["interaction_type"], conformation_type=row.get("conformation_type", None), # version dependent apo_holo=row.get("apo_holo_conformation", None) # version dependent ) t_factor.conformations.add(conf) conformations.append(conf) return conformations
11,296
def maskRipple(inRpl, outFile, mask): """maskRipple(inRpl, outFile, mask) Sets the individual data items to zero based on the specified mask. If mask.getRGB(c,r)>0 / then copy the contents at(c,r) of inRpl to outFile.rpl. Otherwise the contents of outFile / is set to all zeros.""" outRpl = "%s.rpl" % outFile outRaw = "%s.raw" % outFile len = rpl.getDepth() ty = rpl.getDataType() res = ept.RippleFile(rpl.getColumns(), rpl.getRows(), rpl.getDepth(), rpl.getDataType(), rpl.getDataSize(), ept.RippleFile.DONT_CARE_ENDIAN, outRpl, outRaw) zero = (0) * len for c in xrange(0, rpl.getColumns()): for r in xrange(0, rpl.getRows()): rpl.setPosition(c, r) res.setPosition(c, r) if mask.getRGB(c, r) > 0: if ty == rpl.FLOAT: res.write(rpl.readDouble(len)) else: res.write(rpl.readInt(len)) return res
11,297
def GetCurrentScene() -> Scene: """ Returns current scene. Raises SpykeException if current scene is not set. """ if not _currentScene: raise SpykeException("No scene is set current.") return _currentScene
11,298
def Login(): """Performs interactive login and caches authentication token. Returns: non-zero value on error. """ ConfirmUserAgreedToS() parser = argparse.ArgumentParser() parser.add_argument('--browser', action='store_true', help=('Use browser to get goma OAuth2 token.')) options = parser.parse_args(sys.argv[2:]) config = GomaOAuth2Config() config.update(DefaultOAuth2Config()) func = GetAuthorizationCodeViaCommandLine if options.browser: func = GetAuthorizationCodeViaBrowser config['refresh_token'] = GetRefreshToken(func, config) err = VerifyRefreshToken(config) if err: sys.stderr.write(err + '\n') return 1 config.Save() flags = configFlags(config) for k in flags: if k not in os.environ: os.environ[k] = flags[k] if not CheckPing(): return 1 return 0
11,299