content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def plot_karyotype_summary(haploid_coverage, chromosomes, chrom_length, output_dir, bed_filename, bed_file_sep=',', binsize=1000000, overlap=50000, cov_min=5, cov_max=200, min_PL_length=3000000, chroms_with_text=None): """ Plots karyotype summary for the whole genome with data preparation. :param haploid_coverage: the average coverage of haploid regions (or the half of that of diploid regions) :param chromosomes: list of chromosomes in the genome (list of str) :param chrom_length: list of chromosome lengths (list of int) :param output_dir: the path to the directory where PE_fullchrom_[chrom].txt files are located (str) :param bed_filename: the path to the bed file of the sample with ploidy and LOH information (str) :param bed_file_sep: bed file separator (default: ',') (str) :param binsize: the binsize used for moving average (default: 1000000) (int) :param overlap: the overlap used for moving average (default: 50000) (int, smaller than binsize) :param cov_min: the minimum coverage for a position to be included (default: 5) (int) :param cov_max: the maximum coverage for a position to be included (default: 2000) (int) :param min_PL_length: the minimal length of a region to be plotted (default: 3000000) (int) :param chroms_with_text: the list of chromosomes to be indicated with text on the plot (list of str) (If there are many short chromosomes or they have long names, it is useful to only indicate a few with text on the plot.) :returns: a matplotlib figure """ real_pos, dr, dr_25, dr_75, baf, baf_25, baf_75 = __get_BAF_and_DR(avg_dip_cov=haploid_coverage * 2, chroms=chromosomes, chrom_length_list=chrom_length, datadir=output_dir, binsize=binsize, overlap=overlap, cov_min=cov_min, cov_max=cov_max) s0, s1, loh_pos, loh = __get_PL_and_LOH(bed_filename=bed_filename, chroms=chromosomes, chrom_lenght_list=chrom_length, bed_file_sep=bed_file_sep, numtoplot=5000, minlength=min_PL_length) f = __plot_karyotype(real_pos=real_pos, dr=dr, dr_25=dr_25, dr_75=dr_75, baf=baf, baf_25=baf_25, baf_75=baf_75, s0=s0, s1=s1, loh_pos=loh_pos, loh=loh, all_chroms=chromosomes, chrom_length_list=chrom_length, chroms_with_text=chroms_with_text) return f
5234b2ac9e459cfe445be6820abb97821503f554
3,649,200
import os import bisect def create_percentile_rasters( raster_path, output_path, units_short, units_long, start_value, percentile_list, aoi_shape_path): """Creates a percentile (quartile) raster based on the raster_dataset. An attribute table is also constructed for the raster_dataset that displays the ranges provided by taking the quartile of values. The following inputs are required: raster_path - A uri to a gdal raster dataset with data of type integer output_path - A String for the destination of new raster units_short - A String that represents the shorthand for the units of the raster values (ex: kW/m) units_long - A String that represents the description of the units of the raster values (ex: wave power per unit width of wave crest length (kW/m)) start_value - A String representing the first value that goes to the first percentile range (start_value - percentile_one) percentile_list - a python list of the percentiles ranges ex: [25, 50, 75, 90] aoi_shape_path - a uri to an OGR polygon shapefile to clip the rasters to return - Nothing """ LOGGER.debug('Create Perctile Rasters') # If the output_path is already a file, delete it if os.path.isfile(output_path): os.remove(output_path) def raster_percentile(band): """Operation to use in vectorize_datasets that takes the pixels of 'band' and groups them together based on their percentile ranges. band - A gdal raster band returns - An integer that places each pixel into a group """ return bisect(percentiles, band) # Get the percentile values for each percentile percentiles = calculate_percentiles_from_raster( raster_path, percentile_list) LOGGER.debug('percentiles_list : %s', percentiles) # Get the percentile ranges as strings so that they can be added to a output # table percentile_ranges = create_percentile_ranges( percentiles, units_short, units_long, start_value) # Add the start_value to the beginning of the percentiles so that any value # before the start value is set to nodata percentiles.insert(0, int(start_value)) # Set nodata to a very small negative number nodata = -9999919 pixel_size = pygeoprocessing.geoprocessing.get_cell_size_from_uri(raster_path) # Classify the pixels of raster_dataset into groups and write # then to output pygeoprocessing.geoprocessing.vectorize_datasets( [raster_path], raster_percentile, output_path, gdal.GDT_Int32, nodata, pixel_size, 'intersection', assert_datasets_projected=False, aoi_uri=aoi_shape_path) # Create percentile groups of how percentile ranges are classified # using bisect function on a raster percentile_groups = np.arange(1, len(percentiles) + 1) # Get the pixel count for each group pixel_count = count_pixels_groups(output_path, percentile_groups) LOGGER.debug('number of pixels per group: : %s', pixel_count) # Initialize a dictionary where percentile groups map to a string # of corresponding percentile ranges. Used to create RAT perc_dict = {} for index in xrange(len(percentile_groups)): perc_dict[percentile_groups[index]] = percentile_ranges[index] col_name = "Val_Range" pygeoprocessing.geoprocessing.create_rat_uri(output_path, perc_dict, col_name) # Initialize a dictionary to map percentile groups to percentile range # string and pixel count. Used for creating CSV table table_dict = {} for index in xrange(len(percentile_groups)): table_dict[index] = {} table_dict[index]['id'] = percentile_groups[index] table_dict[index]['Value Range'] = percentile_ranges[index] table_dict[index]['Pixel Count'] = pixel_count[index] attribute_table_uri = output_path[:-4] + '.csv' column_names = ['id', 'Value Range', 'Pixel Count'] create_attribute_csv_table(attribute_table_uri, column_names, table_dict)
69f8ea165141ceac4f0f272e36662bbd820b8e0a
3,649,201
import torch def probs_to_mu_sigma(probs): """Calculate mean and covariance matrix for each channel of probs tensor of keypoint probabilites [N, C, H, W] mean calculated on a grid of scale [-1, 1] Parameters ---------- probs : torch.Tensor tensor of shape [N, C, H, W] where each channel along axis 1 is interpreted as a probability density. Returns ------- mu : torch.Tensor tensor of shape [N, C, 2] representing partwise mean coordinates of x and y for each item in the batch sigma : torch.Tensor tensor of shape [N, C, 2, 2] representing covariance matrix for each item in the batch """ bn, nk, h, w = shape_as_list(probs) y_t = tile(torch.linspace(-1, 1, h).view(h, 1), w, 1) x_t = tile(torch.linspace(-1, 1, w).view(1, w), h, 0) y_t = torch.unsqueeze(y_t, dim=-1) x_t = torch.unsqueeze(x_t, dim=-1) meshgrid = torch.cat([y_t, x_t], dim=-1) if probs.is_cuda: meshgrid = meshgrid.to(probs.device) mu = torch.einsum("ijl,akij->akl", meshgrid, probs) mu_out_prod = torch.einsum("akm,akn->akmn", mu, mu) mesh_out_prod = torch.einsum("ijm,ijn->ijmn", meshgrid, meshgrid) sigma = torch.einsum("ijmn,akij->akmn", mesh_out_prod, probs) - mu_out_prod return mu, sigma
d0653e50d1f9ec4125e9b30c10a0e6cb78c6dc8e
3,649,202
def fetch_biomart_genes_mm9(): """Fetches mm9 genes from Ensembl via biomart.""" return _fetch_genes_biomart( host='http://may2012.archive.ensembl.org', gene_name_attr='external_gene_id')
f184608e87a2d390b47fd0f78d293dfd52064ad0
3,649,203
def tweet_words(tweet): """Return the words in a tweet.""" return extract_words(tweet_text(tweet))
c207553fa1bd718083d26e57a9daea43c5629116
3,649,204
def meter_statistics(meter_id,api_endpoint,token,meter_list,web,**kwargs): """ Get the statistics for the specified meter. Args: meter_id(string): The meter name. api_endpoint(string): The api endpoint for the ceilometer service. token(string): X-Auth-token. meter_list(list): The list of available meters. Returns: bool: True if successful, False otherwise. list: The list with the meter statistics. """ meter_stat = [None] headers = { #'Accept': 'application/json', 'Content-Type': 'application/json;', 'Accept': 'application/json', 'X-Auth-Token': token } path = "/v2/meters/"+meter_id+"/statistics?" q=kwargs.pop('q') target = urlparse(api_endpoint+path+q) method = 'GET' logger.info('Inside meter-statistics: Path is %s',target) if(web==False): from_date,to_date,from_time,to_time,resource_id,user_id,status_q=query() if(status_q==True): q=set_query(from_date,to_date,from_time,to_time,resource_id,user_id,status_q) body="{"+q period=raw_input("Do you want to define a time period? Enter 'Y' if yes, 'N' if no.") if(period=="Y"): period_def=raw_input("Enter the desired time period in seconds: ") body=body+',"period":'+period_def groupby=raw_input("Do you want to define a group by value? Enter 'Y' if yes, 'N' if no.") if (groupby=="Y") : rid=raw_input("Do you want to group by the resource id? If yes, enter 'Y', else enter 'N'. ") if(rid=="Y"): groupby_def=',"groupby":[' groupby_def=groupby_def+'"resource_id"' pid=raw_input("Do you want to group by the project id? If yes, enter 'Y', else enter 'N'. ") if(pid=="Y"): groupby_def=groupby_def+',"project_id"' groupby_def=groupby_def+']' body=body+groupby_def else: pid=raw_input("Do you want to group by the project id? If yes, enter 'Y', else enter 'N'. ") if(pid=="Y"): groupby_def=',"groupby":[' groupby_def=groupby_def+'"project_id"' groupby_def=groupby_def+']' body=body+groupby_def body=body+"}" else: body="{" period=raw_input("Do you want to define a time period? Enter 'Y' if yes, 'N' if no.") if(period=="Y"): period_def=raw_input("Enter the desired time period in seconds: ") body=body+'"period":'+period_def rid=raw_input("Do you want to group by the resource id? If yes, enter 'Y', else enter 'N'. ") if(rid=="Y"): groupby_def=',"groupby":[' groupby_def=groupby_def+'"resource_id"' pid=raw_input("Do you want to group by the project id? If yes, enter 'Y', else enter 'N'. ") if(pid=="Y"): groupby_def=groupby_def+',"project_id"' groupby_def=groupby_def+']' body=body+groupby_def else: pid=raw_input("Do you want to group by the project id? If yes, enter 'Y', else enter 'N'. ") if(pid=="Y"): groupby_def=',"groupby":[' groupby_def=groupby_def+'"project_id"' groupby_def=groupby_def+']' body=body+groupby_def body=body+"}" else: rid=raw_input("Do you want to group by the resource id? If yes, enter 'Y', else enter 'N'. ") if(rid=="Y"): groupby_def='"groupby":[' groupby_def=groupby_def+'"resource_id"' pid=raw_input("Do you want to group by the project id? If yes, enter 'Y', else enter 'N'. ") if(pid=="Y"): groupby_def=groupby_def+',"project_id"' groupby_def=groupby_def+']' body=body+groupby_def else: pid=raw_input("Do you want to group by the project id? If yes, enter 'Y', else enter 'N'. ") if(pid=="Y"): groupby_def='"groupby":[' groupby_def=groupby_def+'"project_id"' groupby_def=groupby_def+']' body=body+groupby_def body=body+"}" else: #q=kwargs.pop('q') if 'period' in kwargs: period=kwargs.pop('period') body="{"+q body=body+',"period":'+period+"}" else: body="{"+q+"}" if is_in_mlist(meter_id,meter_list): logger.info('Inside meter_statistics: body is %s',body) h = http.Http() #print method #print body #print headers #print target.geturl() #response, content = h.request(target.geturl(),method,body,headers) response, content = h.request(target.geturl(),method,'',headers) #print response header = json.dumps(response) #print header json_header = json.loads(header) #print json_header server_response = json_header["status"] if server_response not in {'200'}: print "Inside meter_statistics(): Something went wrong!" logger.warn('Inside meter_statistics: not a valid response ') return False, meter_stat else: logger.info('Getting the meter statistics \n') data = json.loads(content) #print content #print data #print "=========================" meter_stat = [None]*len(data) for i in range(len(data)): meter_stat[i]={} meter_stat[i]["average"] = data[i]["avg"] meter_stat[i]["count"] = data[i]["count"] meter_stat[i]["duration"] = data[i]["duration"] meter_stat[i]["duration-end"] = data[i]["duration_end"] meter_stat[i]["duration-start"] = data[i]["duration_start"] meter_stat[i]["max"] = data[i]["max"] meter_stat[i]["min"] = data[i]["min"] meter_stat[i]["period"] = data[i]["period"] meter_stat[i]["period-end"] = data[i]["period_end"] meter_stat[i]["period-start"] = data[i]["period_start"] meter_stat[i]["sum"] = data[i]["sum"] meter_stat[i]["unit"] = data[i]["unit"] meter_stat[i]["group-by"] = data[i]["groupby"] return True, meter_stat else: logger.warn("Inside meter statistics: not an existing meter name") print "Choose a meter from the meter list!" return False,meter_stat
33c717dee32027a1502a5a295b87c5cd67a2c054
3,649,205
from typing import Union from typing import Tuple def parse_image_size(image_size: Union[Text, int, Tuple[int, int]]): """Parse the image size and return (height, width). Args: image_size: A integer, a tuple (H, W), or a string with HxW format. Returns: A tuple of integer (height, width). """ if isinstance(image_size, int): # image_size is integer, with the same width and height. return (image_size, image_size) if isinstance(image_size, str): # image_size is a string with format WxH width, height = image_size.lower().split("x") return (int(height), int(width)) if isinstance(image_size, tuple): return image_size raise ValueError( "image_size must be an int, WxH string, or (height, width)" "tuple. Was %r" % image_size )
12d8925780914672b1e7d976040596f3178e7e20
3,649,206
def find_last_match(view, what, start, end, flags=0): """Find last occurrence of `what` between `start`, `end`. """ match = view.find(what, start, flags) new_match = None while match: new_match = view.find(what, match.end(), flags) if new_match and new_match.end() <= end: match = new_match else: return match
fc863cf00d05a1fb6302a34b5b1e891e3c9eb3d7
3,649,207
def convert_metrics_per_batch_to_per_sample(metrics, target_masks): """ Args: metrics: list of len(num_batches), each element: list of len(num_metrics), each element: (num_active_in_batch,) metric per element target_masks: list of len(num_batches), each element: (batch_size, seq_len, feat_dim) boolean mask: 1s active, 0s ignore Returns: metrics_array = list of len(num_batches), each element: (batch_size, num_metrics) metric per sample """ metrics_array = [] for b, batch_target_masks in enumerate(target_masks): num_active_per_sample = np.sum(batch_target_masks, axis=(1, 2)) batch_metrics = np.stack(metrics[b], axis=1) # (num_active_in_batch, num_metrics) ind = 0 metrics_per_sample = np.zeros((len(num_active_per_sample), batch_metrics.shape[1])) # (batch_size, num_metrics) for n, num_active in enumerate(num_active_per_sample): new_ind = ind + num_active metrics_per_sample[n, :] = np.sum(batch_metrics[ind:new_ind, :], axis=0) ind = new_ind metrics_array.append(metrics_per_sample) return metrics_array
2ceae1402ac0efae841683d426f87a295f3695c8
3,649,208
import asyncio async def get_series(database, series_id): """Get a series.""" series_query = """ select series.id, series.played, series_metadata.name, rounds.tournament_id, tournaments.id as tournament_id, tournaments.name as tournament_name, events.id as event_id, events.name as event_name from series join rounds on series.round_id=rounds.id join series_metadata on series.id=series_metadata.series_id join tournaments on rounds.tournament_id=tournaments.id join events on tournaments.event_id=events.id where series.id=:id """ participants_query = 'select series_id, name, score, winner from participants where series_id=:id' matches_query = 'select id, series_id from matches where series_id=:id' values = {'id': series_id} series, participants, matches = await asyncio.gather( database.fetch_one(series_query, values=values), database.fetch_all(participants_query, values=values), database.fetch_all(matches_query, values=values) ) return dict( series, participants=list(map(dict, participants)), match_ids=list(map(lambda m: m['id'], matches)), tournament=dict( id=series['tournament_id'], name=series['tournament_name'], event=dict( id=series['event_id'], name=series['event_name'] ) ) )
f5e122052209c399c41afcd579f9b16e863c7a28
3,649,209
def n_mpjpe(predicted, target): """ Normalized MPJPE (scale only), adapted from: https://github.com/hrhodin/UnsupervisedGeometryAwareRepresentationLearning/blob/master/losses/poses.py """ assert predicted.shape == target.shape norm_predicted = np.mean(np.sum(predicted**2, axis=2, keepdims=True), axis=1, keepdims=True) norm_target = np.mean(np.sum(target*predicted, axis=2, keepdims=True), axis=1, keepdims=True) scale = norm_target / norm_predicted return euclidean_distance_3D(scale * predicted, target)
68656aca6226db3a4cc7670ccc1972d666b11261
3,649,210
import math def calc_distance(p1, p2): """ calculates a distance on a 2d euclidean space, between two points""" dist = math.sqrt((p2[0] - p1[0]) ** 2 + (p2[1] - p1[1]) ** 2) return dist
d4005d44d5724c051860fb9aa2edeab1654157c6
3,649,211
def rgb2ycbcr(img, range=255., only_y=True): """same as matlab rgb2ycbcr, please use bgr2ycbcr when using cv2.imread img: shape=[h, w, 3] range: the data range only_y: only return Y channel """ in_img_type = img.dtype img.astype(np.float32) range_scale = 255. / range img *= range_scale # convert if only_y: rlt = np.dot(img, [65.481, 128.553, 24.966]) / 255.0 + 16.0 else: rlt = np.matmul(img, [[65.481, -37.797, 112.0], [128.553, -74.203, -93.786], [24.966, 112.0, -18.214]]) / 255.0 + [16, 128, 128] rlt /= range_scale if in_img_type == np.uint8: rlt = rlt.round() return rlt.astype(in_img_type)
e3dfd7b35faf437a936813afe537d1d4a41b2f6b
3,649,212
def _create_xctest_bundle(name, actions, binary): """Creates an `.xctest` bundle that contains the given binary. Args: name: The name of the target being built, which will be used as the basename of the bundle (followed by the .xctest bundle extension). actions: The context's actions object. binary: The binary that will be copied into the test bundle. Returns: A `File` (tree artifact) representing the `.xctest` bundle. """ xctest_bundle = derived_files.xctest_bundle( actions = actions, target_name = name, ) args = actions.args() args.add(xctest_bundle.path) args.add(binary) actions.run_shell( arguments = [args], command = ( 'mkdir -p "$1/Contents/MacOS" && ' + 'cp "$2" "$1/Contents/MacOS"' ), inputs = [binary], mnemonic = "SwiftCreateTestBundle", outputs = [xctest_bundle], progress_message = "Creating test bundle for {}".format(name), ) return xctest_bundle
cf6c64b73b7fcbd7df2a5e6bb60e0605b16a8f58
3,649,213
def doFile(path_, *args, **kwargs): """Execute a given file from path with arguments.""" result, reason = loadfile(path_) if result: data = result(*args, **kwargs) if data: return data[1] error(data[1]) error(reason)
15c6dd79872b479275717fb8a574a34f92381390
3,649,214
from pretty import pretty from pprint import pformat def pformat(obj, verbose=False): """ Prettyprint an object. Either use the `pretty` library or the builtin `pprint`. """ try: return pretty(obj, verbose=verbose) except ImportError: return pformat(obj)
7522c9b64650a5056fb22d7fdd0c459ce87ca7c7
3,649,215
def reanalyze_function(*args): """ reanalyze_function(func_t pfn, ea_t ea1=0, ea_t ea2=BADADDR, bool analyze_parents=False) reanalyze_function(func_t pfn, ea_t ea1=0, ea_t ea2=BADADDR) reanalyze_function(func_t pfn, ea_t ea1=0) reanalyze_function(func_t pfn) """ return _idaapi.reanalyze_function(*args)
52d248fbb82ebb41ff925c42b7cb6856c5cba927
3,649,216
def categorical_sample_logits(logits): """ Samples (symbolically) from categorical distribution, where logits is a NxK matrix specifying N categorical distributions with K categories specifically, exp(logits) / sum( exp(logits), axis=1 ) is the probabilities of the different classes Cleverly uses gumbell trick, based on https://github.com/tensorflow/tensorflow/issues/456 """ U = tf.random_uniform(tf.shape(logits)) return tf.argmax(logits - tf.log(-tf.log(U)), dimension=1)
c5bf8615fe3c25f392bc3fa27f965527f237ef3e
3,649,217
def mass(d, r): """ computes the right hand side of the differential equation of mass continuity """ return 4 * pi * d * r * r
1924309951e35d36b51fe92389c3fa68fac3ebfa
3,649,218
def bord(u): """ éxécution de bord("undébutuntructrucundébut") i suffix estPréfixe 23 ndébutuntructrucundébut False 22 débutuntructrucundébut False 21 ébutuntructrucundébut False 20 butuntructrucundébut False 19 utuntructrucundébut False 18 tuntructrucundébut False 17 untructrucundébut False 16 ntructrucundébut False 15 tructrucundébut False 14 ructrucundébut False 13 uctrucundébut False 12 ctrucundébut False 11 trucundébut False 10 rucundébut False 9 ucundébut False 8 cundébut False 7 undébut True """ suffix = "" for i in reversed(range(0, len(u))): suffix = u[len(u)-i:len(u)] if estPréfixe(u, suffix): break return suffix
950eaac804a0788c9d2f845d594b7781d5ea9aa4
3,649,219
def is_unique_n_bit_vector(string: str) -> bool: """ Similiar to the dict solution, it just uses a bit vector instead of a dict or array. """ vector = 0 for letter in string: if vector & 1 << ord(letter): return False vector |= 1 << ord(letter) return True
d19609f1fb1e6a189a9adb11b37a96632c8d0958
3,649,220
def seq2msk(isq): """ Convert seqhis into mskhis OpticksPhoton.h uses a mask but seq use the index for bit-bevity:: 3 enum 4 { 5 CERENKOV = 0x1 << 0, 6 SCINTILLATION = 0x1 << 1, 7 MISS = 0x1 << 2, 8 BULK_ABSORB = 0x1 << 3, 9 BULK_REEMIT = 0x1 << 4, """ ifl = np.zeros_like(isq) for n in range(16): msk = 0xf << (4*n) ## nibble mask nib = ( isq & msk ) >> (4*n) ## pick the nibble and shift to pole position flg = 1 << ( nib[nib>0] - 1 ) ## convert flag bit index into flag mask ifl[nib>0] |= flg pass return ifl
950dc8fe1fcc275f7a90e695816ea1777cc5164e
3,649,221
def split(ich): """ Split a multi-component InChI into InChIs for each of its components. (fix this for /s [which should be removed in split/join operations] and /m, which is joined as /m0110.. with no separators) :param ich: InChI string :type ich: str :rtype: tuple(str) """ fml_slyr = formula_sublayer(ich) main_dct = main_sublayers(ich) char_dct = charge_sublayers(ich) ste_dct = stereo_sublayers(ich) iso_dct = isotope_sublayers(ich) fml_slyrs = _split_sublayer_string( fml_slyr, count_sep_ptt='', sep_ptt=app.escape('.')) count = len(fml_slyrs) main_dcts = _split_sublayers(main_dct, count) char_dcts = _split_sublayers(char_dct, count) ste_dcts = _split_sublayers(ste_dct, count) iso_dcts = _split_sublayers(iso_dct, count) ichs = tuple(from_data(fml_slyr=fml_slyr, main_lyr_dct=main_dct, char_lyr_dct=char_dct, ste_lyr_dct=ste_dct, iso_lyr_dct=iso_dct) for fml_slyr, main_dct, char_dct, ste_dct, iso_dct in zip(fml_slyrs, main_dcts, char_dcts, ste_dcts, iso_dcts)) return ichs
0db3bee951e38f7db8cbcdb02a64ed28b9562e9d
3,649,222
def crtb_cb(client, crtb): """Wait for the crtb to have the userId populated""" def cb(): c = client.reload(crtb) return c.userId is not None return cb
eff248a877e195e59d2f6db812af2ff43955aee0
3,649,223
def create_network(network_input, n_alphabets): """ create the structure of the neural network """ model = Sequential() model.add(LSTM(512,input_shape=(network_input.shape[1], network_input.shape[2]),return_sequences=True)) model.add(Dropout(0.3)) model.add(Bidirectional(LSTM(512, return_sequences=True))) model.add(Dropout(0.3)) model.add(Bidirectional(LSTM(512))) model.add(Dense(256)) model.add(Dropout(0.3)) model.add(Dense(n_alphabets)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam') return model
dd6610f0db02d0d20fb457d91346144494ad32e4
3,649,224
def create_derivative_graph(f, xrange, n): """Takes a function as an input with a specific interval xrange, then creates a list with the ouput y-points for the nth derivative of f. :param f: Input function that we wish to take the derivative of. :type f: lambda :param xrange: The interval on which to evaluate f^n(x). :type xrange: list :param n: The derivative (1st, 2nd, 3rd, etc) :type n: int :return: A list of all f^n(x) points for all x in xrange. :rtype: list of floats """ plot_points = [] for x in xrange: plot_points.append(nth_derivative(f, x, n)) return plot_points
782d26d22c93ae4b05d075fbf4075a8bba9d89b8
3,649,225
def _matching_not_matching(on, **kwargs): """ Change the text for matching/not matching """ text = "matching" if not on else "not matching" classname = "colour-off" if not on else "colour-on" return text, classname
aeefa7f16e3268ffe7af93db72490abe053370b2
3,649,226
import os def prepare_testenv(config=None, template=None, args=None): """ prepare an engine-ready environment for a test This utility method is used to provide an `RelengEngine` instance ready for execution on an interim working directory. Args: config (optional): dictionary of options to mock for arguments template (optional): the folder holding a template project to copy into the prepared working directory args (optional): additional arguments to add to the "forwarded options" Yields: the engine """ class MockArgs(object): def __getattr__(self, name): return self.name if name in self.__dict__ else None if config is None: config = {} with generate_temp_dir() as work_dir, interim_working_dir(work_dir): # force root directory to temporary directory; or configure all working # content based off the generated temporary directory if 'root_dir' not in config: config['root_dir'] = work_dir else: if 'cache_dir' not in config: config['cache_dir'] = os.path.join(work_dir, 'cache') if 'dl_dir' not in config: config['dl_dir'] = os.path.join(work_dir, 'dl') if 'out_dir' not in config: config['out_dir'] = os.path.join(work_dir, 'out') if template: templates_dir = os.path.join(find_test_base(), 'templates') template_dir = os.path.join(templates_dir, template) if not path_copy(template_dir, work_dir, critical=False): assert False, 'failed to setup template into workdir' # build arguments instance test_args = MockArgs() for k, v in config.items(): setattr(test_args, k, v) # prepare engine options and build an engine instance opts = RelengEngineOptions(args=test_args, forward_args=args) engine = RelengEngine(opts) yield engine
f1d021ed0eedcc9b0481ba3524581e5fd5aa241b
3,649,227
def sample(model, x, steps, temperature=1.0, sample=False, top_k=None): """ take a conditioning sequence of indices in x (of shape (b,t)) and predict the next token in the sequence, feeding the predictions back into the model each time. Clearly the sampling has quadratic complexity unlike an RNN that is only linear, and has a finite context window of block_size, unlike an RNN that has an infinite context window. """ block_size = model.block_size for k in range(steps): x_cond = x if x.shape[1] <= block_size else x[:, -block_size:] # crop context if needed logits = model(x_cond) # pluck the logits at the final step and scale by temperature logits = logits[:, -1, :] / temperature # optionally crop probabilities to only the top k options if top_k is not None: logits = top_k_logits(logits, top_k) # apply softmax to convert to probabilities probs = tf.nn.softmax(logits, axis=-1) # sample from the distribution or take the most likely if sample: ix = tf.random.categorical(logits,1,dtype=tf.int32) else: _, ix = tf.math.top_k(probs, k=1) # append to the sequence and continue x = tf.concat((x,ix), axis=1) return x
532ad7e1af4c7b059bbd12a8584c469bcb5d079e
3,649,228
def vstack(arg_list): """Wrapper on vstack to ensure list argument. """ return Vstack(*arg_list)
95215c8277da6b86c21220021d667ae3dcc05440
3,649,229
import json def metadata_to_list(metadata): """Transform a metadata dictionary retrieved from Cassandra to a list of tuples. If metadata items are lists they are split into multiple pairs in the result list :param metadata: dict""" res = [] for k, v in metadata.iteritems(): try: val_json = json.loads(v) val = val_json.get('json', '') # If the value is a list we create several pairs in the result if isinstance(val, list): for el in val: res.append((k, el)) else: if val: res.append((k, val)) except ValueError: if v: res.append((k, v)) return res
1044a93742a635e72e443d3a5c2e5805702d1602
3,649,230
def GetSetUpAndResponse(): """ This method is called by an API acting as a client while performing the PSI protocol. This method initialises a server object. (This API acts as server) This method uses the PSI Request ID given by the calling API to identify the corresponding node list from the server directory. This method then encrypts all elements in the node list using a newly generated secret key and creates a server set up message (serialized protobuf) with the encrypted elements. This method converts the byte stream of data received to a serialized protobuf. This protobuf is the request message created by the client object in the calling API. This request message contains a list of nodes that are encrypted by the client's secret key. This method then encrypts the each encrypted node with its server secret key generated earlier and creates a response message (Protobuf Form) Both the set up message and response message are converted to JSON and returned to the calling API. --- parameters: client_id: type: string (API ID) description: API ID of the API that has called this method. (API ID of the client) required: True set_size: type: int description: Size of client node list. required: True psi_request_id: type: string (Request ID) description: Request ID of the PSI protocol that is occuring currently. This is used to retrieved the corresponding node list from the server directory. required: True data: description: Request message generated by the client object of the calling API. This message has been serialized to a bytestream from a protobuf format. This message contains a list of nodes that have been encrypted by the secret key of the client object created by the calling API. responses: 200: description: Returns a set up and response message in JSON format. schema: setup: type: JSON description: Protobuf message containing a list of server nodes encrypted using the server's secret key. (Converted to JSON format) resp: type: JSON description: Protobuf message containing a list of client nodes encrypted using both the client and server's secret key. (Converted to JSON format) """ PrintInfo("Server setup request received...") PrintInfo("Client ID: " + str(request.args.get("client_id"))) ClientSetSize = int(request.args.get("set_size")) ClientRequestMessage = request.data PrintDebug("Client Set Size: " + str(ClientSetSize)) dstReq = psi.Request() dstReq.ParseFromString(ClientRequestMessage) ClientRequest = dstReq fpr = 1.0 / (1000000000) s = psi.server.CreateWithNewKey(True) PsiRequestID = request.args.get("psi_request_id") ServerSet = ServerDirectory.get(PsiRequestID) setup = s.CreateSetupMessage(fpr, ClientSetSize, ServerSet) resp = s.ProcessRequest(ClientRequest) setupJson = MessageToJson(setup) respJson = MessageToJson(resp) DisposeServerSet(PsiRequestID) return {"setup": setupJson, "resp": respJson}
b8795f5aaebcf28c448e81d725a3b4235c5490c2
3,649,231
from typing import Optional from typing import Union import torch from pathlib import Path import json def load_separator( model_str_or_path: str = "umxhq", niter: int = 1, residual: bool = False, slicq_wiener: bool = False, wiener_win_len: Optional[int] = 300, device: Union[str, torch.device] = "cpu", pretrained: bool = True, ): """Separator loader Args: model_str_or_path (str): Model name or path to model _parent_ directory E.g. The following files are assumed to present when loading `model_str_or_path='mymodel', targets=['vocals']` 'mymodel/separator.json', mymodel/vocals.pth', 'mymodel/vocals.json'. Defaults to `umxhq`. targets (list of str or None): list of target names. When loading a pre-trained model, all `targets` can be None as all targets will be loaded device (str): torch device, defaults to `cpu` pretrained (bool): determines if loading pre-trained weights """ model_path = Path(model_str_or_path).expanduser() # when path exists, we assume its a custom model saved locally if model_path.exists(): with open(Path(model_path, "separator.json"), "r") as stream: enc_conf = json.load(stream) xumx_model, model_nsgt, jagged_slicq_sample = load_target_models( model_str_or_path=model_path, pretrained=pretrained, sample_rate=enc_conf["sample_rate"], device=device ) separator = model.Separator( xumx_model, model_nsgt, jagged_slicq_sample, stft_wiener=not slicq_wiener, sample_rate=enc_conf["sample_rate"], nb_channels=enc_conf["nb_channels"], ).to(device) return separator
2cb2d951d669c7d08a3bf3cabc5c49a11ca717fc
3,649,232
def IntermediateParticleConst_get_decorator_type_name(): """IntermediateParticleConst_get_decorator_type_name() -> std::string""" return _RMF.IntermediateParticleConst_get_decorator_type_name()
efb869aece5ad0f19e06f5d1a13e89998cde53a8
3,649,233
def multiply_add_plain_with_delta(ct, pt, context_data): """Add plaintext to ciphertext. Args: ct (Ciphertext): ct is pre-computed carrier polynomial where we can add pt data. pt (Plaintext): A plaintext representation of integer data to be encrypted. context (Context): Context for extracting encryption parameters. Returns: A Ciphertext object with the encrypted result of encryption process. """ ct_param_id = ct.param_id coeff_modulus = context_data.param.coeff_modulus pt = pt.data plain_coeff_count = len(pt) delta = context_data.coeff_div_plain_modulus ct0, ct1 = ct.data # here ct = pk * u * e # Coefficients of plain m multiplied by coeff_modulus q, divided by plain_modulus t, # and rounded to the nearest integer (rounded up in case of a tie). Equivalent to for i in range(plain_coeff_count): for j in range(len(coeff_modulus)): temp = round(delta[j] * pt[i]) % coeff_modulus[j] ct0[j][i] = (ct0[j][i] + temp) % coeff_modulus[j] return CipherText([ct0, ct1], ct_param_id)
4f004cc443d183f25cf35bc691c9797b4a8a5875
3,649,234
def ginput(n=1, timeout=30, debug=False): """ Simple functional call for physicists. This will wait for n clicks from the user and return a list of the coordinates of each click. """ x = GaelInput() return x(n, timeout, debug)
60c7c89774fcaee0143003fc6e2d66a4dc1c356b
3,649,235
import json from datetime import datetime def retrieve_form_data(form, submission_type="solution"): """Quick utility function that groups together the processing of request data. Allows for easier handling of exceptions Takes request object as argument On Success, returns hashmap of processed data...otherwise raise an exception""" if submission_type == "solution": processed_data = {} try: print("FCD =>", form.cleaned_data) processed_data["prob_id"] = int(form.cleaned_data.get("problem_id")) processed_data["uid"] = int(form.cleaned_data.get("user_id")) processed_data["code_data"] = form.cleaned_data.get("solution") processed_data["course_id"] = form.cleaned_data.get("course_id", None) except Exception as e: print("POST NOT OK: Error during intial processing of uploaded data - {0}".format(str(e))) return Response(ERROR_CODES["Form Submission Error"], status=status.HTTP_400_BAD_REQUEST) return processed_data elif submission_type == "problem_upload": data = form.cleaned_data processed_data = {} try: processed_data["author_id"] = int(data.get("author_id")) processed_data["category"] = data.get("category") processed_data["target_file"] = data.get("target_file", None) processed_data["data_file"] = data.get("data_file", None) processed_data["course_id"] = data.get("course_id", None) if processed_data["data_file"] is not None: processed_data["data_file"].seek(0) processed_data["init_data"] = processed_data["data_file"].read().decode("utf-8") try: json.loads(processed_data["init_data"]) except Exception as e: raise Exception("Invalid JSON in init_data_file! - {0}".format(str(e))) else: processed_data["init_data"] = None processed_data["name"] = data.get("name").replace("(", "[").replace(")", "]") if "(" in processed_data["name"] or ")" in processed_data["name"]: print("POST NOT OK: Problem Name cannot contain parnetheses!") return Response(ERROR_CODES["Form Submission Error"], status=status.HTTP_400_BAD_REQUEST) description = data.get("description") processed_data["program_file"] = data.get("program") processed_data["code"] = [line.decode("utf-8") for line in processed_data["program_file"].read().splitlines()] processed_data["metadata"] = data.get("meta_file") processed_data["metadata"]["description"] = description processed_data["date_submitted"] = datetime.now() processed_data["inputs"] = data.get("inputs", None) if processed_data["category"] == "file_io": processed_data["metadata"]["inputs"] = "file" else: processed_data["metadata"]["inputs"] = True if processed_data["inputs"] is not None else False processed_data["metadata"]["init_data"] = True if processed_data["init_data"] is not None else False except Exception as e: print("POST NOT OK: Error during intial processing of uploaded data - {0}".format(str(e))) return Response(ERROR_CODES["Form Submission Error"], status=status.HTTP_400_BAD_REQUEST) return processed_data
4ab635ac226ebb7811baf2d0e3d71c8cfc25b1da
3,649,236
import os def existing_file(fname): """ Check if the file exists. If not raise an error Parameters ---------- fname: string file name to parse Returns ------- fname : string """ if os.path.isfile(fname): return fname else: msg = "The file '{}' does not exist".format(fname) raise ap.ArgumentTypeError(msg)
2e93559868588398c255d512a6009df0556df742
3,649,237
def keep_english_for_spacy_nn(df): """This function takes the DataFrame for songs and keep songs with english as main language for english version of spacy neural network for word processing""" #Keep only english for spacy NN English preprocessing words #Network for other languages like french, spanish, portuguese are also available df = df.loc[df['Main Language'] == 'en',:] #Drop the translation column not use for lyrics in english df.drop(['English Translation Lyrics'],axis =1,inplace = True) return df
e24402fa91ee0444c86867c98777fbd3cb7c9894
3,649,238
def make_dealer_cards_more_fun(deck, dealer): """ to make dealercards more fun to make dealer win this game more. :param dealercards: dealercards :return: none maybe has a lot of memory work will arise. """ dealercards = card_sorting_dealer(dealer) count = 0 if jokbo(dealercards) == 0 or jokbo(dealercards) == 1 or jokbo(dealercards) == 2: deck.append({"suit": dealer[0]["suit"], "rank": dealer[0]["rank"]}) deck.append({"suit": dealer[1]["suit"], "rank": dealer[1]["rank"]}) deck.append({"suit": dealer[2]["suit"], "rank": dealer[2]["rank"]}) deck.append({"suit": dealer[3]["suit"], "rank": dealer[3]["rank"]}) deck.append({"suit": dealer[4]["suit"], "rank": dealer[4]["rank"]}) if jokbo(dealercards) == 0: #하이카드 while True: count += 1 dealer = [] card, deck = hit(deck) dealer.append(card) card, deck = hit(deck) dealer.append(card) card, deck = hit(deck) dealer.append(card) card, deck = hit(deck) dealer.append(card) card, deck = hit(deck) dealer.append(card) dealercards = card_sorting_dealer(dealer) if count == 30: break if jokbo(dealercards) != 0 and jokbo(dealercards) != 1: break else: deck.append({"suit": dealer[0]["suit"], "rank": dealer[0]["rank"]}) deck.append({"suit": dealer[1]["suit"], "rank": dealer[1]["rank"]}) deck.append({"suit": dealer[2]["suit"], "rank": dealer[2]["rank"]}) deck.append({"suit": dealer[3]["suit"], "rank": dealer[3]["rank"]}) deck.append({"suit": dealer[4]["suit"], "rank": dealer[4]["rank"]}) continue return dealer elif jokbo(dealercards) == 1: #원페어 while True: count += 1 dealer = [] card, deck = hit(deck) dealer.append(card) card, deck = hit(deck) dealer.append(card) card, deck = hit(deck) dealer.append(card) card, deck = hit(deck) dealer.append(card) card, deck = hit(deck) dealer.append(card) dealercards = card_sorting_dealer(dealer) if count == 30: break if jokbo(dealercards) != 0 and jokbo(dealercards) != 1: break else: deck.append({"suit": dealer[0]["suit"], "rank": dealer[0]["rank"]}) deck.append({"suit": dealer[1]["suit"], "rank": dealer[1]["rank"]}) deck.append({"suit": dealer[2]["suit"], "rank": dealer[2]["rank"]}) deck.append({"suit": dealer[3]["suit"], "rank": dealer[3]["rank"]}) deck.append({"suit": dealer[4]["suit"], "rank": dealer[4]["rank"]}) continue return dealer else: return dealer
5adf8fbeeb53124c75ec43a13492d7aef1ebdc7e
3,649,239
import os import configparser import logging def subinit1_initpaths_config_log(): """ Initializes the paths (stored in global __PATHS): 1 Finds the project location 2 Reads config.ini 3 Reads the paths defined in config.ini 4 Checks that the paths exist """ # -------------------------------------------------------------------------------- 1) FUNDAMENTAL PATHS # - project root + directory PATHS = {'Proj': os.path.dirname(os.path.abspath(__file__)),} # Proj directory is well-defined. All paths are relative to the root (Paths['Proj']) toAbsPath = lambda PATHS_key, relpath: os.path.join(PATHS[PATHS_key], relpath) # ............................................................... a) Subdirs of root PATHS['Inputs and resources'] = toAbsPath('Proj', 'Inputs and resources') PATHS['Results'] = toAbsPath('Proj', 'Results') PATHS['To WRF'] = toAbsPath('Proj', 'To WRF') # ............................................................... b) Subdirs of Inputs and resources PATHS['Fundamentals'] = toAbsPath('Inputs and resources', 'Fundamentals') PATHS['Gen Parameters'] = toAbsPath('Inputs and resources', 'Gen Parameters') PATHS['Other Parameters'] = toAbsPath('Inputs and resources', 'Other Parameters') PATHS['Fundamentals', 'fuels'] = toAbsPath('Fundamentals', 'fuels') PATHS['Gen Parameters', 'efficiency curves'] = toAbsPath('Gen Parameters', 'efficiency curves') # ............................................................... c) Subdirs of To WRF PATHS['WRF resources'] = toAbsPath('To WRF', 'Resources') # -------------------------------------------------------------------------------- 2) Read CONFIG PATHS['config'] = toAbsPath('Inputs and resources', 'config.ini') config = configparser.ConfigParser() config.read(PATHS['config']) # -------------------------------------------------------------------------------- 3) Start log PATHS['log'] = toAbsPath('Inputs and resources', config['log']['file_name']) logging.basicConfig(filename=PATHS['log'], level=eval(config['log']['level']), filemode='w') logging.info("[PROGRAM START] at {}.\nInitialization commencing. \n " "---------------------------------------------------------------".format(log_time())) # -------------------------------------------------------------------------------- 4) __PATHS from CONFIG # Q: Why is the metadata file configurable? # A: If all inputs are configurable, and the metadata is part of the input, then rightfully so. PATHS['PP database'] = toAbsPath('Inputs and resources', config['paths']['fp_powerplant_database']) PATHS['metadata'] = toAbsPath('Inputs and resources', config['paths']['fp_metadata']) PATHS['pint defn'] = toAbsPath('Inputs and resources', config['data import settings']['pint_unitdefn']) # -------------------------------------------------------------------------------- 5) Check that all dir/file exists donotexist = tuple(key for key, fp in PATHS.items() if not os.path.exists(fp)) if donotexist: strgen = ("\t{}: '{}'".format(key, PATHS[key]) for key in donotexist) raise FileNotFoundError("The ff. paths or files were not found: \n{}\n\nPls. double check that " "config.ini (section 'paths') points to these required paths in the project " "directory, and that the project directory system was not altered.".format( '\n'.join(strgen))) return PATHS, config
d2edfc46736b742cb2707b3c90915889e729f7ff
3,649,240
def data(request): """Returns available albums from the database. Can be optionally filtered by year. This is called from templates/albums/album/index.html when the year input is changed. """ year = request.GET.get('year') if year: try: year = int(year) except (ValueError, TypeError): return HttpResponseBadRequest('invalid year parameter') else: year = None return JsonResponse(list(get_albums(year)), safe=False)
8390bcc6fd2bcc109930cb34b3269b450c12a87c
3,649,241
from typing import Tuple def yaw_to_quaternion3d(yaw: float) -> Tuple[float,float,float,float]: """ Args: - yaw: rotation about the z-axis Returns: - qx,qy,qz,qw: quaternion coefficients """ qx,qy,qz,qw = Rotation.from_euler('z', yaw).as_quat() return qx,qy,qz,qw
263a0b12e0c165f929c5004cdb67b8133f117140
3,649,242
def parse_coap_response_code(response_code): """ Parse the binary code from CoAP response and return the response code as a float. See also https://tools.ietf.org/html/rfc7252#section-5.9 for response code definitions. :rtype float """ response_code_class = response_code // 32 response_code_detail = response_code % 32 # Compose response code return response_code_class + response_code_detail / 100
9a8165f205ec2f6fe8576e18a831498f82834a10
3,649,243
from functools import reduce def modified_partial_sum_product( sum_op, prod_op, factors, eliminate=frozenset(), plate_to_step=dict() ): """ Generalization of the tensor variable elimination algorithm of :func:`funsor.sum_product.partial_sum_product` to handle markov dimensions in addition to plate dimensions. Markov dimensions in transition factors are eliminated efficiently using the parallel-scan algorithm in :func:`funsor.sum_product.sequential_sum_product`. The resulting factors are then combined with the initial factors and final states are eliminated. Therefore, when Markov dimension is eliminated ``factors`` has to contain a pairs of initial factors and transition factors. :param ~funsor.ops.AssociativeOp sum_op: A semiring sum operation. :param ~funsor.ops.AssociativeOp prod_op: A semiring product operation. :param factors: A collection of funsors. :type factors: tuple or list :param frozenset eliminate: A set of free variables to eliminate, including both sum variables and product variable. :param dict plate_to_step: A dict mapping markov dimensions to ``step`` collections that contain ordered sequences of Markov variable names (e.g., ``{"time": frozenset({("x_0", "x_prev", "x_curr")})}``). Plates are passed with an empty ``step``. :return: a list of partially contracted Funsors. :rtype: list """ assert callable(sum_op) assert callable(prod_op) assert isinstance(factors, (tuple, list)) assert all(isinstance(f, Funsor) for f in factors) assert isinstance(eliminate, frozenset) assert isinstance(plate_to_step, dict) # process plate_to_step plate_to_step = plate_to_step.copy() prev_to_init = {} for key, step in plate_to_step.items(): # map prev to init; works for any history > 0 for chain in step: init, prev = chain[: len(chain) // 2], chain[len(chain) // 2 : -1] prev_to_init.update(zip(prev, init)) # convert step to dict type required for MarkovProduct plate_to_step[key] = {chain[1]: chain[2] for chain in step} plates = frozenset(plate_to_step.keys()) sum_vars = eliminate - plates prod_vars = eliminate.intersection(plates) markov_sum_vars = frozenset() for step in plate_to_step.values(): markov_sum_vars |= frozenset(step.keys()) | frozenset(step.values()) markov_sum_vars &= sum_vars markov_prod_vars = frozenset( k for k, v in plate_to_step.items() if v and k in eliminate ) markov_sum_to_prod = defaultdict(set) for markov_prod in markov_prod_vars: for k, v in plate_to_step[markov_prod].items(): markov_sum_to_prod[k].add(markov_prod) markov_sum_to_prod[v].add(markov_prod) var_to_ordinal = {} ordinal_to_factors = defaultdict(list) for f in factors: ordinal = plates.intersection(f.inputs) ordinal_to_factors[ordinal].append(f) for var in sum_vars.intersection(f.inputs): var_to_ordinal[var] = var_to_ordinal.get(var, ordinal) & ordinal ordinal_to_vars = defaultdict(set) for var, ordinal in var_to_ordinal.items(): ordinal_to_vars[ordinal].add(var) results = [] while ordinal_to_factors: leaf = max(ordinal_to_factors, key=len) leaf_factors = ordinal_to_factors.pop(leaf) leaf_reduce_vars = ordinal_to_vars[leaf] for (group_factors, group_vars) in _partition( leaf_factors, leaf_reduce_vars | markov_prod_vars ): # eliminate non markov vars nonmarkov_vars = group_vars - markov_sum_vars - markov_prod_vars f = reduce(prod_op, group_factors).reduce(sum_op, nonmarkov_vars) # eliminate markov vars markov_vars = group_vars.intersection(markov_sum_vars) if markov_vars: markov_prod_var = [markov_sum_to_prod[var] for var in markov_vars] assert all(p == markov_prod_var[0] for p in markov_prod_var) if len(markov_prod_var[0]) != 1: raise ValueError("intractable!") time = next(iter(markov_prod_var[0])) for v in sum_vars.intersection(f.inputs): if time in var_to_ordinal[v] and var_to_ordinal[v] < leaf: raise ValueError("intractable!") time_var = Variable(time, f.inputs[time]) group_step = { k: v for (k, v) in plate_to_step[time].items() if v in markov_vars } f = MarkovProduct(sum_op, prod_op, f, time_var, group_step) f = f.reduce(sum_op, frozenset(group_step.values())) f = f(**prev_to_init) remaining_sum_vars = sum_vars.intersection(f.inputs) if not remaining_sum_vars: results.append(f.reduce(prod_op, leaf & prod_vars - markov_prod_vars)) else: new_plates = frozenset().union( *(var_to_ordinal[v] for v in remaining_sum_vars) ) if new_plates == leaf: raise ValueError("intractable!") f = f.reduce(prod_op, leaf - new_plates - markov_prod_vars) ordinal_to_factors[new_plates].append(f) return results
24d5f529d03eeb3a332cc861fdabff3a0d613d37
3,649,244
def load_scicar_cell_lines(test=False): """Download sci-CAR cell lines data from GEO.""" if test: adata = load_scicar_cell_lines(test=False) adata = subset_joint_data(adata) return adata return load_scicar( rna_url, rna_cells_url, rna_genes_url, atac_url, atac_cells_url, atac_genes_url, )
4760b41e2a29125ba9eaf597c555b1b40e338612
3,649,245
def binary_search(sorted_list, item): """ Implements a Binary Search, O(log n). If item is is list, returns amount of steps. If item not in list, returns None. """ steps = 0 start = 0 end = len(sorted_list) while start < end: steps += 1 mid = (start + end) // 2 # print("#", mid) if sorted_list[mid] == item: return steps # If the item is lesser than the list # item == 3 and sorted_list == [1, 2, 3, 4, 5, 6, 8] # the END of my list becomes the middle (4), excluding all items from the middle to the end # end == 4 # next time, when mid = (start + end) // 2 executes, mid == 2 if sorted_list[mid] > item: end = mid # If the item is bigger than the list # item == 8 and sorted_list == [1, 2, 3, 4, 5, 6, 8] # the START of my list will be the middle (4) plus 1, excluding all items from the middle to the begginning # start == 5 # next time, when mid = (start + end) // 2 executes, mid == 8 if sorted_list[mid] < item: start = mid + 1 return None
30b1bba330752455d932b4c6cf1ad4dab5969db3
3,649,246
from typing import List import os def _process_split( pipeline, *, filename_template: naming.ShardedFileTemplate, out_dir: utils.ReadWritePath, file_infos: List[naming.FilenameInfo], ): """Process a single split.""" beam = lazy_imports_lib.lazy_imports.apache_beam # Use unpack syntax on set to implicitly check that all values are the same split_name, = {f.split for f in file_infos} # Check that all the file-info from the given split are consistent # (no missing file) shard_ids = sorted(f.shard_index for f in file_infos) num_shards, = {f.num_shards for f in file_infos} assert shard_ids == list(range(num_shards)), 'Missing shard files.' # Check that the file extension is correct. file_suffix, = {f.filetype_suffix for f in file_infos} file_format = file_adapters.file_format_from_suffix(file_suffix) adapter = file_adapters.ADAPTER_FOR_FORMAT[file_format] data_dir = utils.as_path(filename_template.data_dir) # Build the pipeline to process one split return (pipeline | beam.Create(file_infos) | beam.Map(_process_shard, data_dir=data_dir, adapter=adapter) # Group everything in a single elem (_ShardInfo -> List[_ShardInfo]) | _group_all() # pytype: disable=missing-parameter # pylint: disable=no-value-for-parameter | beam.Map(_merge_shard_info, filename_template=filename_template) | beam.Map(_split_info_to_json_str) | beam.io.WriteToText( # pytype: disable=missing-parameter os.fspath(out_dir / _out_filename(split_name)), num_shards=1, shard_name_template='', ))
cb5134cb5213de817d49ad9239ef96be8c8e750b
3,649,247
def _scale_by(number, should_fail=False): """ A helper function that creates a scaling policy and scales by the given number, if the number is not zero. Otherwise, just triggers convergence. :param int number: The number to scale by. :param bool should_fail: Whether or not the policy execution should fail. :return: A function that can be passed to :func:`_oob_disable_then` as the ``then`` parameter. """ def _then(helper, rcs, group): policy = ScalingPolicy(scale_by=number, scaling_group=group) return (policy.start(rcs, helper.test_case) .addCallback(policy.execute, success_codes=[403] if should_fail else [202])) return _then
046dcaf120d4c04578e9562b23f76f1cb8f98690
3,649,248
import traceback def selectgender(value): """格式化为是/否 :param value:M/F, :return: 男/女 """ absent = {"M": u'男', "F": u'女'} try: if value: return absent[value] return "" except: traceback.print_exc()
7b6b0b41b5ea8d3eaab5574881b40f5c00da73cd
3,649,249
def Clifford_twirl_channel_one_qubit(K, rho, sys=1, dim=[2]): """ Twirls the given channel with Kraus operators in K by the one-qubit Clifford group on the given subsystem (specified by sys). """ n = int(np.log2(np.sum([d for d in dim]))) C1 = eye(2**n) C2 = Rx_i(sys, np.pi, n) C3 = Rx_i(sys, np.pi / 2.0, n) C4 = Rx_i(sys, -np.pi / 2.0, n) C5 = Rz_i(sys, np.pi, n) C6 = Rx_i(sys, np.pi, n) * Rz_i(sys, np.pi, n) C7 = Rx_i(sys, np.pi / 2.0, n) * Rz_i(sys, np.pi, n) C6 = Rx_i(sys, np.pi, n) * Rz_i(sys, np.pi, n) C8 = Rx_i(sys, -np.pi / 2.0, n) * Rz_i(sys, np.pi, n) C9 = Rz_i(sys, np.pi / 2.0, n) C10 = Ry_i(sys, np.pi, n) * Rz_i(sys, np.pi / 2.0, n) C11 = Ry_i(sys, -np.pi / 2.0, n) * Rz_i(sys, np.pi / 2.0, n) C12 = Ry_i(sys, np.pi / 2.0, n) * Rz_i(sys, np.pi / 2.0, n) C13 = Rz_i(sys, -np.pi / 2.0, n) C14 = Ry_i(sys, np.pi, n) * Rz_i(sys, -np.pi / 2.0, n) C15 = Ry_i(sys, -np.pi / 2.0, n) * Rz_i(sys, -np.pi / 2.0, n) C16 = Ry_i(sys, np.pi / 2.0, n) * Rz_i(sys, -np.pi / 2.0, n) C17 = ( Rz_i(sys, -np.pi / 2.0, n) * Rx_i(sys, np.pi / 2.0, n) * Rz_i(sys, np.pi / 2.0, n) ) C18 = ( Rz_i(sys, np.pi / 2.0, n) * Rx_i(sys, np.pi / 2.0, n) * Rz_i(sys, np.pi / 2.0, n) ) C19 = Rz_i(sys, np.pi, n) * Rx_i(sys, np.pi / 2.0, n) * Rz_i(sys, np.pi / 2.0, n) C20 = Rx_i(sys, np.pi / 2.0, n) * Rz_i(sys, np.pi / 2.0, n) C21 = ( Rz_i(sys, np.pi / 2.0, n) * Rx_i(sys, -np.pi / 2.0, n) * Rz_i(sys, np.pi / 2.0, n) ) C22 = ( Rz_i(sys, -np.pi / 2.0, n) * Rx_i(sys, -np.pi / 2.0, n) * Rz_i(sys, np.pi / 2.0, n) ) C23 = Rx_i(sys, -np.pi / 2.0, n) * Rz_i(sys, np.pi / 2.0, n) C24 = Rx_i(sys, np.pi, n) * Rx_i(sys, -np.pi / 2.0, n) * Rz_i(sys, np.pi / 2.0, n) C = [ C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15, C16, C17, C18, C19, C20, C21, C22, C23, C24, ] rho_twirl = 0 for i in range(len(C)): rho_twirl += ( (1.0 / 24.0) * C[i] @ apply_channel(K, dag(C[i]) @ rho @ C[i], sys, dim) @ dag(C[i]) ) return rho_twirl, C
1225c8689641e245d7666c75f9e31d862f1efe56
3,649,250
def unpack_batch(batch, use_cuda=False): """ Unpack a batch from the data loader. """ input_ids = batch[0] input_mask = batch[1] segment_ids = batch[2] boundary_ids = batch[3] pos_ids = batch[4] rel_ids = batch[5] knowledge_feature = batch[6] bio_ids = batch[1] # knowledge_adjoin_matrix = batch[7] # know_segment_ids = batch[6] # know_input_ids = batch[7] # know_input_mask = batch[8] # knowledge_feature = (batch[6], batch[7], batch[8]) return input_ids, input_mask, segment_ids, boundary_ids, pos_ids, rel_ids, knowledge_feature,bio_ids
6bc8bc9b3c8a9e2b40ac08e67c9fbcf84914e2eb
3,649,251
def truncate(text: str, length: int = 255, end: str = "...") -> str: """Truncate text. Parameters --------- text : str length : int, default 255 Max text length. end : str, default "..." The characters that come at the end of the text. Returns ------- truncated text : str Examples -------- .. code-block:: html <meta property="og:title" content="^^ truncate(title, 30) ^^">""" return f"{text[:length]}{end}"
f14605542418ca95e4752be7ec2fea189b9454ce
3,649,252
from sys import path import logging def create_logger(log_dir, log_file, level="info"): """ Function used to create logger object based on log directory and log file name """ handler = RotatingFileHandler(filename=path.join(log_dir, log_file), mode='a', maxBytes=5000000, backupCount=10) formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(filename)s, %(lineno)d [%(name)s]: %(message)s', '%d-%b-%y %H:%M:%S') handler.setFormatter(formatter) logger = logging.getLogger(__name__) log_level = _get_log_level(level) logger.setLevel(log_level) logger.addHandler(handler) return logger
128abb796c7a602dee3d835c3f3c39ccf5f07c56
3,649,253
def gaussian_slice(x, sigma, mu): """ return a slice of x in which the gaussian is significant exp(-0.5 * ((x - mu) / sigma) ** 2) < given_threshold """ r = sigma * sp.sqrt(-2.0 * sp.log(small_thr)) x_lo = bisect_left(x, mu - r) x_hi = bisect_right(x, mu + r) return slice(x_lo, x_hi)
25ed1bf4423e8d86baaebec54e4478b58b58365c
3,649,254
def preview(delivery_id): """ 打印预览 :param delivery_id: :return: """ delivery_info = get_delivery_row_by_id(delivery_id) # 检查资源是否存在 if not delivery_info: abort(404) # 检查资源是否删除 if delivery_info.status_delete == STATUS_DEL_OK: abort(410) delivery_print_date = time_utc_to_local(delivery_info.update_time).strftime('%Y-%m-%d') delivery_code = '%s%s' % (g.ENQUIRIES_PREFIX, time_utc_to_local(delivery_info.create_time).strftime('%y%m%d%H%M%S')) # 获取客户公司信息 customer_info = get_customer_row_by_id(delivery_info.customer_cid) # 获取客户联系方式 customer_contact_info = get_customer_contact_row_by_id(delivery_info.customer_contact_id) # 获取出货人员信息 user_info = get_user_row_by_id(delivery_info.uid) delivery_items = get_delivery_items_rows(delivery_id=delivery_id) # 文档信息 document_info = DOCUMENT_INFO.copy() document_info['TITLE'] = _('delivery preview') template_name = 'delivery/preview.html' return render_template( template_name, delivery_id=delivery_id, delivery_info=delivery_info, customer_info=customer_info, customer_contact_info=customer_contact_info, user_info=user_info, delivery_items=delivery_items, delivery_print_date=delivery_print_date, delivery_code=delivery_code, **document_info )
d57acf49d7692fe4da02607695ad71fdad1758e5
3,649,255
import requests def request_with_json(json_payload): """ Load interpolations from the interp service into the DB """ test_response = requests.post(INTERP_URL, json=json_payload) test_response_json = test_response.json() return test_response_json
5222060788ce321d258fa23309f5894640a70589
3,649,256
def correlation(df, rowvar=False): """ Calculate column-wise Pearson correlations using ``numpy.ma.corrcoef`` Input data is masked to ignore NaNs when calculating correlations. Data is returned as a Pandas ``DataFrame`` of column_n x column_n dimensions, with column index copied to both axes. :param df: Pandas DataFrame :return: Pandas DataFrame (n_columns x n_columns) of column-wise correlations """ # Create a correlation matrix for all correlations # of the columns (filled with na for all values) df = df.copy() maskv = np.ma.masked_where(np.isnan(df.values), df.values) cdf = np.ma.corrcoef(maskv, rowvar=False) cdf = pd.DataFrame(np.array(cdf)) cdf.columns = df.columns cdf.index = df.columns cdf = cdf.sort_index(level=0, axis=1) cdf = cdf.sort_index(level=0) return cdf
b64ab2f5f08191c9536f6d08b8132b3ecc100698
3,649,257
def cost_zpk_fit(zpk_args, f, x, error_func=kontrol.core.math.log_mse, error_func_kwargs={}): """The cost function for fitting a frequency series with zero-pole-gain. Parameters ---------- zpk_args: array A 1-D list of zeros, poles, and gain. Zeros and poles are in unit of Hz. f: array The frequency axis. x: array The frequecy series data. error_func: func(x1: array, x2: array) -> float The function that evaluate the error between arrays x1 and x2. Defaults to kontrol.core.math.log_mse, which evaluates the logarithmic mean square error. error_func_kwargs: dict, optional Keyword arguments passed to the error function. Defaults {}. Returns ------- cost: float The cost. """ x_zpk = abs( kontrol.frequency_series.conversion.args2zpk(f=f, zpk_args=zpk_args)) cost = error_func(x, x_zpk, **error_func_kwargs) return cost
fb18cfae20a279e0b65a03b37a10c33e6a17c6db
3,649,258
def getTrainPredictions(img,subImgSize,model): """Makes a prediction for an image. Takes an input of any size, crops it to specified size, makes predictions for each cropped window, and stitches output together. Parameters ---------- img : np.array (n x m x 3) Image to be transformed subImgSize : np.array (a x b) Input size for model model: keras.model Keras model used to make predictions Returns ------- pred: np.array (n x m) Prediction from image """ # get the size of the input image l,w,_ = np.shape(img) # init array for new image pred = np.zeros(shape = (l,w)) r = l//subImgSize[0] c = w//subImgSize[1] roffset = 0 coffset = 0 if l%subImgSize[0] != 0: roffset = 1 if w%subImgSize[1] != 0: coffset = 1 x1 = 0 predX1 = 0 # Crop the image for j in range(r + roffset): y1 = 0 predY1 = 0 x2 = (j+1)*subImgSize[0] if x2 > l: x2 = l x1 = l - subImgSize[0] for k in range(c + coffset): # find upper bounds of window y2 = (k+1)*subImgSize[1] # if outer dimension is larger than image size, adjust if y2 > w: y2 = w y1 = w - subImgSize[1] # crop area of picture croppedArea = img[x1:x2,y1:y2,:] # make prediction using model modelPrediction = model.predict(np.expand_dims(croppedArea,axis = 0)) # update prediction image pred[predX1:x2,predY1:y2] = modelPrediction[0,(predX1-x1):,(predY1-y1):,0] # update the bounds y1 = y2 predY1 = y1 # update the lower x bound x1 = x2 predX1 = x1 return pred
e81ee8d6839fa07753ac379520c60d7b2d5be175
3,649,259
def use_bcbio_variation_recall(algs): """Processing uses bcbio-variation-recall. Avoids core requirement if not used. """ for alg in algs: jointcaller = alg.get("jointcaller", []) if not isinstance(jointcaller, (tuple, list)): jointcaller = [jointcaller] for caller in jointcaller: if caller not in set(["gatk-haplotype-joint", None, False]): return True return False
c833f9a2dd9523f78cf294a1822b251b6940a1cd
3,649,260
from typing import Mapping def _sa_model_info(Model: type, types: AttributeType) -> Mapping[str, AttributeInfo]: """ Get the full information about the model This function gets a full, cachable, information about the model's `types` attributes, once. sa_model_info() can then filter it the way it likes, without polluting the cache. """ # Get a list of all available InfoClasses info_classes = [ InfoClass for InfoClass in AttributeInfo.all_implementations() if InfoClass.extracts() & types # only enabled types ] # Apply InfoClasses' extraction to every attribute # If there is any weird attribute that is not supported, it is silently ignored. return { name: InfoClass.extract(attribute) for name, attribute in all_sqlalchemy_model_attributes(Model).items() for InfoClass in info_classes if InfoClass.matches(attribute, types) }
8886427a4722bb1fb37664fa7382f61922d89b69
3,649,261
def bll6_models(estimators, cv_search={}, transform_search={}): """ Provides good defaults for transform_search to models() Args: estimators: list of estimators as accepted by models() transform_search: optional LeadTransform arguments to override the defaults """ cvd = dict( year=range(2011, 2014+1), month=1, day=1, train_years=[6], train_query=[None], ) cvd.update(cv_search) transformd = dict( wic_sample_weight=[0], aggregations=aggregations.args, outcome_expr=['max_bll0 >= 6'] ) transformd.update(transform_search) return models(estimators, cvd, transformd)
c69d17c1f5c6625ef6382959910b23d44459c158
3,649,262
def bgColor(col): """ Return a background color for a given column title """ # Auto-generated columns if col in ColumnList._COLUMNS_GEN: return BG_GEN # KiCad protected columns elif col in ColumnList._COLUMNS_PROTECTED: return BG_KICAD # Additional user columns else: return BG_USER
ae6a44c61807f513a679ccad0f4c39622efa768e
3,649,263
def merge_hedge_positions(df, hedge): """ 将一个表中的多条记录进行合并,然后对冲 :param self: :param df: :return: """ # 临时使用,主要是因为i1709.与i1709一类在分组时会出问题,i1709.是由api中查询得到 if df.empty: return df df['Symbol'] = df['InstrumentID'] # 合并 df = df.groupby(by=['Symbol', 'InstrumentID', 'HedgeFlag', 'Side'])[ 'Position'].sum().to_frame().reset_index() # print(df) # 对冲 if hedge: df['Net'] = df['Side'] * df['Position'] df = df.groupby(by=['Symbol', 'InstrumentID', 'HedgeFlag'])['Net'].sum().to_frame().reset_index() df['Position'] = abs(df['Net']) df['Side'] = df['Net'] / df['Position'] df = df[df['Position'] != 0] df = df[['Symbol', 'InstrumentID', 'HedgeFlag', 'Side', 'Position']] # print(df) return df
4bcaa8b160186c6c5e6e3382017d0db3ee9d6c6e
3,649,264
import numpy def BackwardSubTri(U,y): """ usage: x = BackwardSubTri(U,y) Row-oriented backward substitution to solve the upper-triangular, 'tridiagonal' linear system U x = y This function does not ensure that U has the correct nonzero structure. It does, however, attempt to catch the case where U is singular. Inputs: U - square n-by-n matrix (assumed upper triangular and 'tridiagonal') y - right-hand side vector (n-by-1) Outputs: x - solution vector (n-by-1) """ # check inputs m, n = numpy.shape(U) if (m != n): raise ValueError("BackwardSubTri error: matrix must be square") p = numpy.size(y) if (p != n): raise ValueError("BackwardSubTri error: right-hand side vector has incorrect dimensions") if (numpy.min(numpy.abs(numpy.diag(U))) < 100*numpy.finfo(float).eps): raise ValueError("BackwardSubTri error: matrix is [close to] singular") # create output vector x = y.copy() # perform forward-subsitution algorithm for i in range(n-1,-1,-1): if (i<n-1): x[i] -= U[i,i+1]*x[i+1] x[i] /= U[i,i] return x
5b7c2c636eac0912aa26bc8a236f1c870b95c48b
3,649,265
def discrete_model(parents, lookup_table): """ Create CausalAssignmentModel based on a lookup table. Lookup_table maps inputs values to weigths of the output values The actual output values are sampled from a discrete distribution of integers with probability proportional to the weights. Lookup_table for the form: Dict[Tuple(input_vales): (output_weights)] Arguments --------- parents: list variable names of parents lookup_table: dict lookup table Returns ------- model: CausalAssignmentModel """ assert len(parents) > 0 # create input/output mapping inputs, weights = zip(*lookup_table.items()) output_length = len(weights[0]) assert all(len(w) == output_length for w in weights) outputs = np.arange(output_length) ps = [np.array(w) / sum(w) for w in weights] def model(**kwargs): n_samples = kwargs["n_samples"] a = np.vstack([kwargs[p] for p in parents]).T b = np.zeros(n_samples) * np.nan for m, p in zip(inputs, ps): b = np.where( (a == m).all(axis=1), np.random.choice(outputs, size=n_samples, p=p), b) if np.isnan(b).any(): raise ValueError("It looks like an input was provided which doesn't have a lookup.") return b return CausalAssignmentModel(model, parents)
a0eee81439b5997b91941181b8c7978d7f3581c9
3,649,266
import base64 import json import tempfile import traceback def retrieve(datafile, provider): """ Retrieve a file from the remote provider :param datafile: :param provider: :return: the path to a temporary file containing the data, or None """ r = _connect(provider) try: data = base64.b64decode(json.loads(r.get(datafile.storage_key))['data']) with tempfile.NamedTemporaryFile(delete=False) as tmpfile: tmpfile.write(data) tmpfilename = tmpfile.name return tmpfilename except: print('Download failed: %s' % traceback.format_exc()) return None
fca0595df40b1743e5cdb73c8a20b0ddc6a2611f
3,649,267
def edf_parse_message(EDFFILE): """Return message info.""" message = edf_get_event_data(EDFFILE).contents time = message.sttime message = string_at(byref(message.message[0]), message.message.contents.len + 1)[2:] message = message.decode('UTF-8') return (time, message)
3d29db28b7d110e9fcdf6e8309c027cb4254c647
3,649,268
from typing import Dict import logging def read_abbrevs_and_add_to_db(abbrevs_path: str, db: Connection) -> Dict[str, int]: """Add abbreviations from `abbrevs_path` to `idx` and `defns`.""" with open(abbrevs_path, 'rt') as ab: abbrevs = read_abbrevs(ab) abbrev_nid = add_abbrevs_to_db(abbrevs, db) logging.info('Added %d abbreviations.', len(abbrevs)) return abbrev_nid
6c937fd15352d8b7e3dcf607bbf2a5b66f105ffb
3,649,269
def is_encrypted(input_file: str) -> bool: """Checks if the inputted file is encrypted using PyPDF4 library""" with open(input_file, 'rb') as pdf_file: pdf_reader = PdfFileReader(pdf_file, strict=False) return pdf_reader.isEncrypted
be03d2843f35e21d7881c17f086f33ffbee5e8fa
3,649,270
def get_parameter_by_name(device, name): """ Find the given device's parameter that belongs to the given name """ for i in device.parameters: if i.original_name == name: return i return
9669262a9bcac8b4c054e07b2c04b780b5f84f87
3,649,271
from typing import Optional import requests def LogPrint(email: str, fileName: str, materialType: str, printWeight: float, printPurpose: str, msdNumber: Optional[str], paymentOwed: bool) -> bool: """Logs a print. Returns if the task was successful. :param email: Email of the user exporting the print. :param fileName: Name of the file that was exported. :param materialType: Type of the material being used. :param printWeight: Weight of the print being exported. :param printPurpose: Purpose of the print being exported. :param msdNumber: MSD Number of the print being exported. :param paymentOwed: Whether the payment is owed or not. """ # Get the hashed id and return if there is none. hashedId = getUniversityIdHash(email) if hashedId is None: return False # Check if this is a Senior Design print. msd = printPurpose == "Senior Design Project (Reimbursed)" # Create the payload. arguments = { "hashedId": hashedId, "fileName": fileName, "material": materialType, "weight": printWeight, "purpose": printPurpose, "billTo": msdNumber if msd else None, "owed": paymentOwed, } # Send the request and return the result. printResult = requests.post(getHost() + "/print/add", json=arguments).json() return "status" in printResult.keys() and printResult["status"] == "success"
eb8c629d4eacdf24988cd6d33d4b4733bb90caac
3,649,272
def is_requirement(line): """ Return True if the requirement line is a package requirement; that is, it is not blank, a comment, or editable. """ # Remove whitespace at the start/end of the line line = line.strip() # Skip blank lines, comments, and editable installs return not ( line == '' or line.startswith('-r') or line.startswith('#') or line.startswith('-e') or line.startswith('git+') )
db30ff6bb2421d2b31939a20e708bf1c923a353e
3,649,273
def read_pose_txt(pose_txt): """ Read the pose txt file and return a 4x4 rigid transformation. """ with open(pose_txt, "r") as f: lines = f.readlines() pose = np.zeros((4, 4)) for line_idx, line in enumerate(lines): items = line.split(" ") for i in range(4): pose[line_idx, i] = float(items[i]) return pose
250cc4c793a3bba948aeac2ca547c6680937a6e7
3,649,274
def get_futures(race_ids=list(range(1, 13000))): """Get Futures for all BikeReg race pages with given race_ids.""" session = FuturesSession(max_workers=8) return [session.get(f'https://results.bikereg.com/race/{race_id}') for race_id in race_ids if race_id not in BAD_IDS]
1992c7b2fee93eecb75fd6e0c7a625181073609f
3,649,275
def sum_of_proper_divisors(number: int): """ Let d(n) be defined as the sum of proper divisors of n (numbers less than n which divide evenly into n). :param number: :return: """ divisors = [] for n in range(1, number): if number % n == 0: divisors.append(n) return sum(divisors)
9015dd3809f90d328b0b4a6b51f6fcb145f0241d
3,649,276
def coddington_meridional(p, q, theta): """ return radius of curvature """ f = p * q / (p + q) R = 2 * f / np.sin(theta) return R
ef4964f08af065b2da6cbe5b156e4e976406a879
3,649,277
def read_analysis_file(timestamp=None, filepath=None, data_dict=None, file_id=None, ana_file=None, close_file=True, mode='r'): """ Creates a data_dict from an AnalysisResults file as generated by analysis_v3 :param timestamp: str with a measurement timestamp :param filepath: (str) path to file :param data_dict: dict where to store the file entries :param file_id: suffix to the usual HDF measurement file found from giving a measurement timestamp. Defaults to '_AnalysisResults,' the standard suffix created by analysis_v3 :param ana_file: HDF file instance :param close_file: whether to close the HDF file at the end :param mode: str specifying the HDF read mode (if ana_file is None) :return: the data dictionary """ if data_dict is None: data_dict = {} try: if ana_file is None: if filepath is None: if file_id is None: file_id = '_AnalysisResults' folder = a_tools.get_folder(timestamp) filepath = a_tools.measurement_filename(folder, file_id=file_id) ana_file = h5py.File(filepath, mode) read_from_hdf(data_dict, ana_file) if close_file: ana_file.close() except Exception as e: if close_file: ana_file.close() raise e return data_dict
5e0d1797f45f18665f3ae5eaa6bac987fe94f926
3,649,278
def get_player_macro_econ_df(rpl: sc2reader.resources.Replay, pid: int) -> pd.DataFrame: """This function organises the records of a player's major macroeconomic performance indicators. The function uses a player's PlayerStatsEvents contained in a Replay object to compose a DataFrame. In the DataFrame, each column points to a particular indicator. Each row points to the records of all indicators at a specific moment during the game. *Arguments* - rpl (sc2reader.resources.Replay) Replay object generated with sc2reader containing a match's data. - pid (int) A player's id number distinguishes them from the other players in a match. It can be extracted from a Participant object through the pid attribute. *Returns* - pd.DataFrame This DataFrame contains all the time series that illustrate the changes of each attribute during a match. Each column alludes to an attribute, each row to a moment during the match. """ columns_names =[ 'second', 'minerals_current', 'vespene_current', 'minerals_used_active_forces', 'vespene_used_active_forces', 'minerals_collection_rate', 'vespene_collection_rate', 'workers_active_count', 'minerals_used_in_progress', 'vespene_used_in_progress', 'resources_used_in_progress', 'minerals_used_current', 'vespene_used_current', 'resources_used_current', 'minerals_lost', 'vespene_lost', 'resources_lost', 'minerals_killed', 'vespene_killed', 'resources_killed', 'food_used', 'food_made' ] # Generate a DataFrame with the columns listed above pstatse_list = get_pstatse(rpl, pid) pstatse_dicts_list = [event.__dict__ for event in pstatse_list] pstatse_df = pd.DataFrame(pstatse_dicts_list, columns= columns_names) # Complete the DataFrame with the real_time, unspent_rsrc columns and # army_value. # Also, eliminate possible duplicate last record. return complete_pstatse_df(rpl, pstatse_df)
4a0123ce4fe7f704f83c39a8c78e29c9347b1e1a
3,649,279
import re def get_seconds_from_duration(time_str: str) -> int: """ This function will convert the TM1 time to seconds :param time_str: P0DT00H01M43S :return: int """ pattern = re.compile('\w(\d+)\w\w(\d+)\w(\d+)\w(\d+)\w') matches = pattern.search(time_str) d, h, m, s = matches.groups() seconds = (int(d) * 86400) + (int(h) * 3600) + (int(m) * 60) + int(s) return seconds
a8614c0ed6e41c7216ae461ef1fd57319a5995e1
3,649,280
def get_protecteds(object: Object) -> Dictionary: """Gets the protected namespaces of an object.""" return object.__protecteds__
479f6ee0a9334107f67517fd6bb2ad55a915d0ac
3,649,281
def pah2area(_position, angle, height, shape): """Calculates area from position, angle, height depending on shape.""" if shape == "PseudoVoigt": fwhm = np.tan(angle) * height area = (height * (fwhm * np.sqrt(np.pi / ln2)) / (1 + np.sqrt(1 / (np.pi * ln2)))) return area elif shape == "DoniachSunjic": fwhm = np.tan(angle) * height area = height / pure_ds(0, amplitude=1, center=0, fwhm=fwhm, asym=0.5) return area elif shape == "Voigt": fwhm = np.tan(angle) * height area = height / voigt(0, amplitude=1, center=0, fwhm=fwhm, fwhm_l=0.5) return area raise NotImplementedError
79e239de4ee8b356152717f7a9a301f062cc6c71
3,649,282
from typing import Callable import os import logging def caching_query_s3( s3_url: str, query_fun: Callable, force_query=False, df_save_fun: Callable = lambda df, loc: df.to_pickle(loc, compression="gzip"), df_load_fun: Callable = lambda loc: pd.read_pickle(loc, compression="gzip"), ): """ Retrieve cached data if available, query and cache otherwise. Args: s3_url (str): Location of the saved data. query_fun (Callable): Function to query the data force_query (Optional[bool]): Whether to force a query. Defaults to False. df_save_fun (Optional[Callable]): Function to save the data. Defaults to pickling to a gzip file. df_load_fun (Optional[Callable]): Function to load the data. Defaults to unpickling from a gzip file. """ # generate a unique one to avoid collisions when running in parallel tmp_file_name = str(uuid4()) if file_exists_in_s3(s3_url) and not force_query: get_file_from_s3(s3_url, tmp_file_name) df = df_load_fun(tmp_file_name) os.remove(tmp_file_name) logging.info(f"File {s3_url} exists, loaded it") else: print(f"Didn't find {s3_url} so calling query_fun") df = query_fun() df_save_fun(df, tmp_file_name) save_file_to_s3(tmp_file_name, s3_url) os.remove(tmp_file_name) logging.info(f"File {s3_url} didn't exist, created it") return df
6704a02265eb14fc45dd63be212090401a93e635
3,649,283
def config(key, values, axis=None): """Class decorator to parameterize the Chainer configuration. This is a specialized form of `parameterize` decorator to parameterize the Chainer configuration. For all `time_*` functions and `setup` function in the class, this decorator wraps the function to be called inside the context where specified Chainer configuration set. This decorator adds parameter axis with the name of the configuration by default. You can change the axis name by passing axis parameter. You cannot apply `parameterize` decorator to the class already decorated by this decorator. If you want to use `parameterize` along with this decorator, make `parameterize` the most inner (i.e., the closest to the class declaration) decorator. Example of usage is as follows: >>> @config('autotune', [True, False]) ... class ConvolutionBenchmark(object): ... def time_benchmark(self): ... ... """ axis = key if axis is None else axis def _wrap_class(klass): assert isinstance(klass, type) if not hasattr(chainer.config, key): print( '''Notice: Configuration '{}' unknown to this version of ''' '''Chainer'''.format(key)) return _inject_config(klass, axis, key, values) return _wrap_class
a2ab11ca245647c6a5267b2f1c62a55b9aa1b96b
3,649,284
import sys import logging def handle_switches(args, sysroot): """Fetch the targeted binary and determine how to attach gdb. Args: args: Parsed arguments. sysroot: Local sysroot path. Returns: (binary_file, attach_pid, run_cmd). Precisely one of attach_pid or run_cmd will be None. """ device = args.device binary_file = None pid = None run_cmd = None if args.target_pid: # Fetch the binary using the PID later. pid = args.target_pid elif args.target_name: # Fetch the binary using the PID later. pid = get_remote_pid(device, args.target_name) elif args.run_cmd: if not args.run_cmd[0]: sys.exit("empty command passed to -r") if not args.run_cmd[0].startswith("/"): sys.exit("commands passed to -r must use absolute paths") run_cmd = args.run_cmd binary_file, local = gdbrunner.find_file(device, run_cmd[0], sysroot, user=args.user) if binary_file is None: assert pid is not None try: binary_file, local = gdbrunner.find_binary(device, pid, sysroot, user=args.user) except adb.ShellError: sys.exit("failed to pull binary for PID {}".format(pid)) if not local: logging.warning("Couldn't find local unstripped executable in {}," " symbols may not be available.".format(sysroot)) return (binary_file, pid, run_cmd)
04bc894568da275516df94a4a49ef9ec58ef3cf3
3,649,285
def dot2states(dot): """Translate a dot-bracket string in a sequence of numerical states""" dot = dot.replace(".", "0") # Unpaired dot = dot.replace("(", "1") # Paired dot = dot.replace(")", "1") # Paired return np.array(list(dot), dtype=int)
655b57749a39d04f62aae20ac16ffb1f31b0bc71
3,649,286
def mel_to_hz(mels, htk=False): """Convert mel bin numbers to frequencies Examples -------- >>> librosa.mel_to_hz(3) 200. >>> librosa.mel_to_hz([1,2,3,4,5]) array([ 66.667, 133.333, 200. , 266.667, 333.333]) Parameters ---------- mels : np.ndarray [shape=(n,)], float mel bins to convert htk : bool use HTK formula instead of Slaney Returns ------- frequencies : np.ndarray [shape=(n,)] input mels in Hz See Also -------- hz_to_mel """ mels = np.asanyarray(mels) if htk: return 700.0 * (10.0**(mels / 2595.0) - 1.0) # Fill in the linear scale f_min = 0.0 f_sp = 200.0 / 3 freqs = f_min + f_sp * mels # And now the nonlinear scale min_log_hz = 1000.0 # beginning of log region (Hz) min_log_mel = (min_log_hz - f_min) / f_sp # same (Mels) logstep = np.log(6.4) / 27.0 # step size for log region if mels.ndim: # If we have vector data, vectorize log_t = (mels >= min_log_mel) freqs[log_t] = min_log_hz * np.exp(logstep * (mels[log_t] - min_log_mel)) elif mels >= min_log_mel: # If we have scalar data, check directly freqs = min_log_hz * np.exp(logstep * (mels - min_log_mel)) return freqs
93e9d115d9ef0a58420c796737b96f4460f44ceb
3,649,287
import numpy as np def load_images(images): """ Decodes batch of image bytes and returns a 4-D numpy array. """ batch = [] for image in images: img_np = readImage(image) batch.append(img_np) batch_images = np.concatenate(batch) logger.info('batch_images.shape:%s'%(str(batch_images.shape))) return batch_images
ae4f18488cdfa4980f849f2f7110f9963381e6e7
3,649,288
from bob.db.base.utils import null import sys def dumplist(args): """Dumps lists of files based on your criteria""" db = Database() objects = db.objects( protocol=args.protocol, purposes=args.purposes, groups=args.groups, kinds=args.kinds ) output = sys.stdout if args.selftest: output = null() for obj in objects: output.write('%s\n' % obj.make_path(directory=args.directory)) return 0
cb57aa44ad89a023d33661421de4fc62a5b3c094
3,649,289
def stats_hook(): """ decorator to register a stats hook. :raises InvalidStatsHookTypeError: invalid stats hook type error. :returns: stats hook class. :rtype: type """ def decorator(cls): """ decorates the given class and registers an instance of it into available stats hooks. :param type cls: stats hook class. :returns: stats hook class. :rtype: type """ instance = cls() stat_services.register_hook(instance) return cls return decorator
de386a9bff39c4060833f11100ec538b6d2b8d68
3,649,290
def safe_infer(node, context=None): """Return the inferred value for the given node. Return None if inference failed or if there is some ambiguity (more than one node has been inferred). """ try: inferit = node.infer(context=context) value = next(inferit) except exceptions.InferenceError: return None try: next(inferit) return None # None if there is ambiguity on the inferred node except exceptions.InferenceError: return None # there is some kind of ambiguity except StopIteration: return value
928c1d2e3c2813cc389085ea6bd3ccd50709effe
3,649,291
import functools def catch_exception(func): """ Returns: object: """ @functools.wraps(func) def wrapper(*args, **kwargs): worker = kwargs['error_catcher'] try: return func(*args, **kwargs) except Exception as e: print('stdout:', worker.stdout.read().decode("utf-8")) print('stderr:', worker.stderr.read().decode("utf-8")) raise return wrapper
be579d9b6723e5025b7b70f38c83bcedc30196a5
3,649,292
def RandomCrop(parent, new_shape, name=""): """\ Crop an image layer at a random location with size ``[height, width]``. :param parent: parent layer :param new_shape: [height, width] size :param name: name of the output layer :return: CropRandom layer """ return _eddl.RandomCrop(parent, new_shape, name)
6078cf9f6daf73876d3503a1e77523df079c41d1
3,649,293
import subprocess def run_client(instance): """ Start a client process """ port = [1008, 8989, 9002][instance] cpu = ['(3,4)', '(5,6)', '(7,8)'][instance] # TODO: the following line is an example of code that is not suitable! # should switch to run_udp_app instead of this function # ips = [[_server_ips[1], _server_ips[0]], ips = [[_server_ips[0],], [_server_ips[0]], [_server_ips[0]]][instance] mpps = 1000 * 1000 rate = [-2 * mpps, 2 * mpps, 6 * mpps][instance] _ips = ' '.join(ips) _cnt_flow = [1, count_flow, count_flow][instance] delay = [0, 0, 100] # cycles per packet args = { 'bin': slow_receiver_exp, 'cpu': cpu, 'count_queue': count_queue, 'sysmod': 'bess' if sysmod == 'bess-bp' else sysmod, 'mode': 'client', 'cnt_ips': len(ips), 'ips': _ips, 'count_flow': _cnt_flow, 'duration': duration, 'source_ip': _client_ip[instance], 'port': port, 'delay': delay[instance], 'bidi': 'false', } if PORT_TYPE == PMD: vdev = ['virtio_user1,path=/tmp/ex_vhost1.sock,queues='+str(count_queue), 'virtio_user3,path=/tmp/ex_vhost3.sock,queues='+str(count_queue),][instance] prefix = 'slow_receiver_exp_client_{}'.format(instance) args['vdev'] = vdev args['file-prefix'] = prefix cmd = ('sudo {bin} --no-pci --lcores="{cpu}" --file-prefix={file-prefix} ' '--vdev="{vdev}" --socket-mem=128 -- ' 'bidi={bidi} {source_ip} {count_queue} {sysmod} {mode} {cnt_ips} {ips} ' '{count_flow} {duration} {port} {delay}').format(**args) else: vdev = ['ex_vhost1', 'ex_vhost3', 'ex_vhost4'][instance] prefix = 'bessd-dpdk-prefix' args['vdev'] = vdev args['file-prefix'] = prefix cmd = ('sudo {bin} --no-pci --lcores="{cpu}" --file-prefix={file-prefix} ' '--proc-type=secondary --socket-mem=128 -- ' 'bidi={bidi} vport={vdev} {source_ip} {count_queue} ' '{sysmod} {mode} {cnt_ips} {ips} ' '{count_flow} {duration} {port} {delay}').format(**args) if rate >= 0: # add rate limit argument cmd += ' {}'.format(rate) print("=" * 32) print(" " * 13 + "client") print(cmd) print("=" * 32, end='\n\n') # Run in background if not DIRECT_OUTPUT: p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE) else: p = subprocess.Popen(cmd, shell=True) return p
7f7b91a9c3a503df99736948ad3b37501e0609df
3,649,294
import time def get_linear_sys(eqns, params): """Gets the linear system corresponding to the symbolic equations Note that this function only work for models where the left-hand side of the equations all contain only linear terms with respect to the given model parameters. For these linear cases, this function will return a matrix :math:`\\mathbf{A}` and a vector :math:`\\mathbf{v}` such that the given equations can be written as .. math:: \\mathbf{A} \\mathbf{x} = \\mathbf{v} with :math:`\\mathbf{x}` being the column vector of the values of the model symbols. Normally the matrix will have more rows than columns for over- determined fitting. :param eqns: A sequence of ``Eqn`` objects for the equations of the fitting. :param params: A sequence of the ``ModelParam`` objects for the parameters to be fitted. :returns: The matrix :math:`\\mathbf{A}` and the vector :math:`\\mathbf{v}`. :rtype: tuple :raises ValueError: if the system of equations are not linear. """ # We treat the equations one-by-one, write rows of the matrix and # the vector one-by-one. n_params = len(params) n_eqns = len(eqns) mat = np.zeros((n_eqns, n_params), dtype=np.float) vec = np.empty((n_eqns, ), dtype=np.float) # Extract the symbols for the parameters and assort the result into a # dictionary for fast loop up of the location of the symbols. symbs = { param.symb: idx for idx, param in enumerate(params) } print('\nForming the matrix and vectors for the linear model...') start_time = time.process_time() for idx, eqn in enumerate(eqns): # First get the vector to the reference value of the equation. vec[idx] = eqn.ref_val # Get the symbolic expression. expr = eqn.modelled_val.simplify().expand() # Get its terms. if isinstance(expr, Add): terms = expr.args else: terms = [expr, ] # Loop over the terms to get the coefficients ahead of the symbols. for term in terms: # Split the term into a symbol and a coefficient. symb, coeff = _get_symb_w_coeff(term) if symb is None: # When we are treating a pure number term, we can move it to # the left-hand side of the equation. vec[idx] -= coeff else: # When we are going a symbol, we need to locate the symbol. try: col_idx = symbs[symb] except KeyError: raise ValueError( 'Unrecognised symbol {!r}'.format(symb) ) else: mat[idx, col_idx] += coeff # Go on to the next term. continue # Go on to the next equation. continue print( 'Finished: {!s}sec.'.format(time.process_time() - start_time) ) # Return the matrix and the vector. return mat, vec
e3ef88c695bcbcd6e7ab1dfbe8ee45ad552e3be7
3,649,295
def slightly(membership: npt.ArrayLike) -> npt.ArrayLike: """ Applies the element-wise function fn(u) = u^(1/2). :param membership: Membership function to be modified. >>> from fuzzy_expert.operators import slightly >>> slightly([0, 0.25, 0.5, 0.75, 1]) array([0. , 0.16326531, 0.99696182, 1. , 0. ]) """ plus_membership: npt.ArrayLike = np.power(membership, 1.25) not_very_membership: npt.ArrayLike = 1 - np.power(membership, 2) membership: npt.ArrayLike = np.where( membership < not_very_membership, plus_membership, not_very_membership ) membership: npt.ArrayLike = membership / np.max(membership) return np.where(membership <= 0.5, membership ** 2, 1 - 2 * (1 - membership) ** 2)
eb0e71462c5e3959584970e9e9b84a3dff876d54
3,649,296
def int_max(int_a, int_b): """ max(a, b) """ if int_a > int_b: return int_a else: return int_b
5ae0df8ff7bdc5539d127fad4df03b6215d9380f
3,649,297
def extract_depth_map(frame): """ Extract front-view lidar camera projection for ground-truth depth maps """ (range_images, camera_projections, range_image_top_pose) = frame_utils.parse_range_image_and_camera_projection(frame) for c in frame.context.camera_calibrations: if dataset_pb2.CameraName.Name.Name(c.name) == 'FRONT': extrinsic = np.reshape( np.array(c.extrinsic.transform, np.float32), [4, 4]) range_images_cartesian = convert_range_image_to_cartesian(frame,range_images,range_image_top_pose) cam_projection = (np.array(camera_projections[1][0].data).reshape(64,2650,6))[np.newaxis,...] depth = range_image_utils.build_camera_depth_image(range_images_cartesian[1][np.newaxis,...],extrinsic[np.newaxis,...],cam_projection ,[1280,1920],1) p = np.where(depth[0]!= 0) v = np.extract(depth[0]!=0,depth[0]) grid_w,grid_h = np.mgrid[0:1280,0:1920] depth_map = griddata(p, v, (grid_w, grid_h), method='nearest') depth_map = depth_map return depth_map[0:1280:4,0:1920:4]
2568d8563e256bde6c5df5c3bb34038b57993a1b
3,649,298
from .functions import express def cross(vect1, vect2): """ Returns cross product of two vectors. Examples ======== >>> from sympy.vector import CoordSys3D >>> from sympy.vector.vector import cross >>> R = CoordSys3D('R') >>> v1 = R.i + R.j + R.k >>> v2 = R.x * R.i + R.y * R.j + R.z * R.k >>> cross(v1, v2) (-R.y + R.z)*R.i + (R.x - R.z)*R.j + (-R.x + R.y)*R.k """ if isinstance(vect1, Add): return VectorAdd.fromiter(cross(i, vect2) for i in vect1.args) if isinstance(vect2, Add): return VectorAdd.fromiter(cross(vect1, i) for i in vect2.args) if isinstance(vect1, BaseVector) and isinstance(vect2, BaseVector): if vect1._sys == vect2._sys: n1 = vect1.args[0] n2 = vect2.args[0] if n1 == n2: return Vector.zero n3 = ({0,1,2}.difference({n1, n2})).pop() sign = 1 if ((n1 + 1) % 3 == n2) else -1 return sign*vect1._sys.base_vectors()[n3] try: v = express(vect1, vect2._sys) except ValueError: return Cross(vect1, vect2) else: return cross(v, vect2) if isinstance(vect1, VectorZero) or isinstance(vect2, VectorZero): return Vector.zero if isinstance(vect1, VectorMul): v1, m1 = next(iter(vect1.components.items())) return m1*cross(v1, vect2) if isinstance(vect2, VectorMul): v2, m2 = next(iter(vect2.components.items())) return m2*cross(vect1, v2) return Cross(vect1, vect2)
8857f53a3db4066b2be6cd0fc3443b89a9c97022
3,649,299