content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
import itertools import re def _apply_constraints(password_hash, size, is_non_alphanumeric): """ Fiddle with the password a bit after hashing it so that it will get through most website filters. We require one upper and lower case, one digit, and we look at the user's password to determine if there should be at least one alphanumeric or not. """ starting_size = 0 if size < 4 else size - 4 result = password_hash[:starting_size] extras = itertools.chain((ord(ch) for ch in password_hash[starting_size:]), itertools.repeat(0)) extra_chars = (chr(ch) for ch in extras) def next_between(start, end): interval = ord(end) - ord(start) + 1 offset = next(extras) % interval return chr(ord(start) + offset) chars_ranges = (("A", "Z"), ("a", "z"), ("0", "9")) for first, last in chars_ranges: any_of_chars = re.compile("[{}-{}]".format(first, last)) if any_of_chars.search(result): result += next(extra_chars) else: result += next_between(first, last) non_word = re.compile(r"\W") if non_word.search(result) and is_non_alphanumeric: result += next(extra_chars) else: result += "+" while non_word.search(result) and not is_non_alphanumeric: result = non_word.sub(next_between("A", "Z"), result, 1) flip_place = next(extras) % len(result) result = result[flip_place:] + result[:flip_place] return result.strip("\x00")
8757c3197052fb1606a95dfa417a13ba833cdb43
3,636,700
def SplitLineRecursive(linepts,i,j,THRESHOLD=5.0,ds_min=50.0): """ Choose best point at which to split a line to minimize total reprojection error """ max_err = np.max(ProjectionError(np.stack((linepts[:,i],linepts[:,j])).T, linepts[:,i:j])) if max_err < THRESHOLD: ds = np.cumsum(np.sqrt(np.sum(np.diff(linepts[:,i:j])**2,axis=0))) if ds[-1] > ds_min: k = i + np.argmin((ds - ds[-1]/2.)**2) + 1 return k else: return j errors1 = np.zeros(j-(i+1)) errors2 = np.zeros(j-(i+1)) max_errors1 = np.zeros(j-(i+1)) max_errors2 = np.zeros(j-(i+1)) for k in range(i+1,j): l1 = np.stack((linepts[:,i],linepts[:,k])).T l2 = np.stack((linepts[:,k],linepts[:,j])).T errors1[k-i-1] = np.sum(ProjectionError(l1, linepts[:,i+1:k])) / (k-i) errors2[k-i-1] = np.sum(ProjectionError(l2, linepts[:,k+1:j])) / (j-k) max_errors1[k-i-1] = np.max(ProjectionError(l1, linepts[:,i:k])) max_errors2[k-i-1] = np.max(ProjectionError(l2, linepts[:,k:j])) k = i+1 + np.argmin(errors1 + errors2) # max_err1 = np.max(max_errors1) # max_err2 = np.max(max_errors2) return k
fe17a756d588468b04db999f72274d459fec0d65
3,636,701
def create_report() -> FlaskResponse: """Creates a new report. Note: This is the existing implementation, currently used for the v1 endpoint. Returns: FlaskResponse: details of the report just created or a list of errors with the corresponding HTTP status code. """ logger.info("Creating a new report") try: report_name = create_report_job() report_details = get_reports_details(report_name) return created(reports=report_details) except Exception as e: msg = f"{ERROR_UNEXPECTED} ({type(e).__name__})" logger.error(msg) logger.exception(e) return internal_server_error(msg)
dbe64f79e05dc67932beac0aa06b6cd6c4f998c5
3,636,702
import argparse def get_arguments(): """Parsing the arguments""" parser = argparse.ArgumentParser(description="", usage=''' ______________________________________________________________________ BiG-MAP map: maps the reads to the predicted MGCs ______________________________________________________________________ Generic command: python3 BiG-MAP.map.py {-I1 [mate-1s] -I2 [mate-2s] | -U [samples]} -O [outdir] -F [family] [Options*] Maps the metagenomic/metatranscriptomic reads to the fasta reference file and outputs RPKM read counts in .csv and BIOM format. Use BiG-MAP_process conda environment. Data inputs: either paired or unpaired -I1 Provide the mate 1s of the paired metagenomic and/or metatranscriptomic samples here. These samples should be provided in fastq-format (.fastq, .fq, .fq.gz). Also, this can be a space separated list from the command line. -I2 Provide the mate 2s of the paired metagenomic and/or metatranscriptomic samples here. These samples should be provided in fastq-format (.fastq, .fq, .fq.gz). Also, this can be a space separated list from the command line. -U Provide the unpaired metagenomic/metatranscriptomic samples here. These samples should be provided in fastq-format (.fastq, .fq, .fq.gz). Also, this can be a space separated list from the command line. File inputs: either separated or pickled: -F Directory with all the output files from the family module -P Input files are in pickled format (named: BiG-MAP.[name].pickle). The format of the pickled file: fasta file, GCF json file, and optionally a bed file and/or BiG-SCAPE GCF dictionary. Obligatory arguments: -O Name of the output directory for where the output files are going to be written. Default = current folder (.) Options: -b Outputs the resulting read counts in biom format (v1.0) as well. This will be useful to analyze the results in BiG-MAP.analyse. Therefore, it is important to include the metadata here as well: this metagenomical data should be in the same format as the example metadata -f Input files are in fasta format (.fna, .fa, .fasta): True/False. Default = False. -s Bowtie2 setting: END-TO-END mode: very-fast, fast, sensitive, very-sensitive LOCAL mode: very-fast-local, fast-local, sensitive-local, very-sensitive-local. Default = fast -a Ouput read average values across GCFs instead of summed counts: True/False. Default = False. -th Number of used threads in the bowtie2 mapping step. Default = 6 ______________________________________________________________________ ''') parser.add_argument("-O", "--outdir", help=argparse.SUPPRESS, required=True) parser.add_argument("-I1","--fastq1", nargs='+',help=argparse.SUPPRESS, required=False) parser.add_argument("-I2","--fastq2",nargs='+',help=argparse.SUPPRESS, required = False) parser.add_argument("-U","--U_fastq",nargs='+',help=argparse.SUPPRESS, required = False) parser.add_argument("-F", "--family", help=argparse.SUPPRESS, required=False) parser.add_argument("-P", "--pickle_file", help=argparse.SUPPRESS, required=False) parser.add_argument( "-b", "--biom_output", help=argparse.SUPPRESS, type=str, required = False) parser.add_argument( "-f", "--fasta", help=argparse.SUPPRESS, type=str, required = False, default=False) parser.add_argument( "-a", "--average", help=argparse.SUPPRESS, type=str, required = False, default=False) parser.add_argument( "-s", "--bowtie2_setting", help=argparse.SUPPRESS, type=str, required = False, default="fast") parser.add_argument( "-th", "--threads", help=argparse.SUPPRESS, type=int, required = False, default=6) return(parser, parser.parse_args())
79173e468d640457f7374ebd5aa23d7da9f7684c
3,636,703
def sample_exercise(): """Create a sample exercise""" return ExerciseModel.objects.create( name='exercise name', duration=10, calories=10 )
c4c9424aa987a8cb2d0fa493c63920b438ae5b73
3,636,704
def der_kinetic_integral(a,bfi,bfj): """ The kinetic energy operator does not depend on the atomic position so we only have to consider differentiating the Gaussian functions. There are 4 possible cases we have to evaluate Case 1: Neither of the basis functions depends on the position of atom A which gives: dT_ij/dXa = 0 Cases 2 and 3: Only one of the basis functions depends the position of atom A which gives us either of the following possible integrals to evaluate dT_ij/dXa = integral{dr dg_i/dXa T g_j } dT_ij/dXa = integral{dr g_i T dg_j/dXa } Case 4: Both of the basis functions depend on the position of atom A which gives the following integral to evaluate dT_ij/dXa = integral{dr dg_i/dXa T g_j + g_i T dg_j/dXa } """ dTij_dXa,dTij_dYa,dTij_dZa = 0.0,0.0,0.0 #we use atom ids on the CGBFs to evaluate which of the 4 above case we have #bfi is centered on atom a if bfi.atid==a: for upbf in bfj.prims(): for vpbf in bfi.prims(): alpha = vpbf.exp() l,m,n = vpbf.powers() origin = vpbf.origin() coefs = upbf.coef()*vpbf.coef() #x component v = PGBF(alpha,origin,(l+1,m,n)) v.normalize() terma = sqrt(alpha*(2.0*l+1.0))*coefs*v.kinetic(upbf) if l>0: v.reset_powers(l-1,m,n) v.normalize() termb = -2*l*sqrt(alpha/(2.0*l-1.0))*coefs*v.kinetic(upbf) else: termb = 0.0 dTij_dXa += terma + termb #y component v.reset_powers(l,m+1,n) v.normalize() terma = sqrt(alpha*(2.0*m+1.0))*coefs*v.kinetic(upbf) if m>0: v.reset_powers(l,m-1,n) v.normalize() termb = -2*m*sqrt(alpha/(2.0*m-1.0))*coefs*v.kinetic(upbf) else: termb = 0.0 dTij_dYa += terma + termb #z component v.reset_powers(l,m,n+1) v.normalize() terma = sqrt(alpha*(2.0*n+1.0))*coefs*v.kinetic(upbf) if n>0: v.reset_powers(l,m,n-1) v.normalize() termb = -2*n*sqrt(alpha/(2.0*n-1.0))*coefs*v.kinetic(upbf) else: termb = 0.0 dTij_dZa += terma + termb #bfj is centered on atom a if bfj.atid==a: for upbf in bfi.prims(): for vpbf in bfj.prims(): alpha = vpbf.exp() l,m,n = vpbf.powers() origin = vpbf.origin() coefs = upbf.coef()*vpbf.coef() #x component v = PGBF(alpha,origin,(l+1,m,n)) v.normalize() terma = sqrt(alpha*(2.0*l+1.0))*coefs*v.kinetic(upbf) if l>0: v.reset_powers(l-1,m,n) v.normalize() termb = -2*l*sqrt(alpha/(2.0*l-1.0))*coefs*v.kinetic(upbf) else: termb = 0.0 dTij_dXa += terma + termb #y component v.reset_powers(l,m+1,n) v.normalize() terma = sqrt(alpha*(2.0*m+1.0))*coefs*v.kinetic(upbf) if m>0: v.reset_powers(l,m-1,n) v.normalize() termb = -2*m*sqrt(alpha/(2.0*m-1.0))*coefs*v.kinetic(upbf) else: termb = 0.0 dTij_dYa += terma + termb #z component v.reset_powers(l,m,n+1) v.normalize() terma = sqrt(alpha*(2.0*n+1.0))*coefs*v.kinetic(upbf) if n>0: v.reset_powers(l,m,n-1) v.normalize() termb = -2*n*sqrt(alpha/(2.0*n-1.0))*coefs*v.kinetic(upbf) else: termb = 0.0 dTij_dZa += terma + termb return dTij_dXa,dTij_dYa,dTij_dZa
5c84eea3fcd1f44bd41a9c14d0c104d9b3af0390
3,636,705
import torch import datasets def get_celeba(): """Get and preprocess the CelebA dataset. """ transform = transforms.Compose([ transforms.Resize((64, 64)), transforms.ToTensor()]) # Use only 18/40 labels as described in Appendix C.1 mask = torch.tensor( [False, True, False, True, False, True, False, False, True, True, False, True, True, True, False, True, False, False, True, False, True, False, False, False, True, False, True, False, True, False, False, True, False, True, False, False, False, False, True, True]) train = datasets.CelebA('./data', split='train', transform=transform, download=True) valid = datasets.CelebA('./data', split='valid', transform=transform, download=True) test = datasets.CelebA('./data', split='test', transform=transform, download=True) # Use 'train' split and 'valid' split as the training set train = [(img, label[mask]) for img, label in chain(train, valid)] test = [(img, label[mask]) for img, label in test] return train, test
05789f37a9fa0c360ae4d5a2cdcfe5f9a2a4c440
3,636,706
def waa_adjust_baseline(rsl, baseline, wet, waa_max, delta_t, tau): """Calculate baseline adjustion due to wet antenna Parameters ---------- rsl : iterable of float Time series of received signal level baseline : iterable of float Time series of baseline for rsl waa_max : float Maximum value of wet antenna attenuation delta_t : float Parameter for wet antnenna attenation model tau : float Parameter for wet antnenna attenation model wet : iterable of int or iterable of float Time series with wet/dry classification information. Returns ------- iterable of float Adjusted time series of baseline iterable of float Time series of wet antenna attenuation """ if type(rsl) == pd.Series: rsl = rsl.values if type(baseline) == pd.Series: baseline = baseline.values if type(wet) == pd.Series: wet = wet.values rsl = rsl.astype(np.float64) baseline = baseline.astype(np.float64) wet = wet.astype(np.float64) waa = _numba_waa_schleiss(rsl, baseline, waa_max, delta_t, tau, wet) #return baseline + waa, waa return baseline + waa
80bdec1a9cdd5dcf22008a6efdc08c5a7ae9ec1f
3,636,707
def linear_warmup_decay(learning_rate, warmup_steps, num_train_steps): """ Applies linear warmup of learning rate from 0 and decay to 0.""" with fluid.default_main_program()._lr_schedule_guard(): lr = fluid.layers.tensor.create_global_var( shape=[1], value=0.0, dtype='float32', persistable=True, name="scheduled_learning_rate") global_step = fluid.layers.learning_rate_scheduler._decay_step_counter( ) with fluid.layers.control_flow.Switch() as switch: with switch.case(global_step < warmup_steps): warmup_lr = learning_rate * (global_step / warmup_steps) fluid.layers.tensor.assign(warmup_lr, lr) with switch.default(): decayed_lr = fluid.layers.learning_rate_scheduler.polynomial_decay( learning_rate=learning_rate, decay_steps=num_train_steps, end_learning_rate=0.0, power=1.0, cycle=False) fluid.layers.tensor.assign(decayed_lr, lr) return lr
727d303adf144407e45a013ca36b0bac592bc522
3,636,708
def space2(): """Create a Space with two real dimensions.""" space = Space() space.register(Real("lr", "uniform", 0, 1)) space.register(Real("weight_decay", "uniform", 0, 1)) return space
acc6bc1529fdc26c6e3c8140c89c0db3d7703ea7
3,636,709
def calculate_thresh(twindow, pctile, skipna): """Calculate threshold for one cell grid at the time Parameters ---------- twindow: xarray DataArray Stacked array timeseries with new 'z' dimension representing a window of width 2*w+1 pctile: int Threshold percentile used to detect events skipna: bool If True percentile and mean function will use skipna=True. Using skipna option is much slower Returns ------- thresh_climYear: xarray DataArray Climatological threshold """ thresh_climYear = (twindow .groupby('doy') .quantile(pctile/100., dim='z', skipna=skipna)) # calculate value for 29 Feb from mean of 28-29 feb and 1 Mar thresh_climYear = thresh_climYear.where(thresh_climYear.doy!=60, feb29(thresh_climYear)) thresh_climYear = thresh_climYear.chunk({'doy': -1}) return thresh_climYear
37228003d1c564205067e6d71a78ab83ffdeaf2f
3,636,710
def edit_car(item_id): """ Edit item :param item_id: :return mix: """ # get user user = get_user_by_id(session['uid']) # Get car car = get_item_by_id(item_id) # Check the user is the owner if int(session['uid']) != int(car.author): flash('You don\'t have permission to edit it.', 'error') return redirect('/profile', 302) # Get token token = user.generate_auth_token(3600) if request.method == 'POST' and request.form['csrf_token'] == csrf_token: _car = dict() # cleaning data try: _car['description'] = clean(request.form['description']) _car['title'] = clean(request.form['title']) _car['model'] = clean(request.form['model']) _car['price'] = clean(request.form['price']) _car['brand'] = clean(request.form['brand']) _car['author'] = session['uid'] except TypeError: flash('fields can\'t be empty', 'error') return render('catalog/new_car.html', brands=brands, csrf=csrf_token) # update car, create success message and redirect user item = update_item(_car, item_id) flash('Record "%s" was successfully updated' % item.title, 'success') return redirect('/profile', 302) return render('catalog/edit_car.html', brands=brands, car=car.serialize, token=token, user=user.serialize, csrf_token=csrf_token)
498b9b07292cf2de3ac8a929f624d0e93ee793b7
3,636,711
def replace_ensembl_ids(expression_df, gene_id_mapping): """ Replaces ensembl gene ids with hgnc symbols Arguments --------- expression_df: df gene expression data matrix (sample x gene) gene_id_mapping: df Dataframe mapping ensembl ids (used in DE_stats_file) to hgnc symbols, used in Crow et. al. NOTE: ----- This function is deprecated due to large memory usage: when `expression_df` is a large dataframe, manipulating it inside the momory becomes very slow (and sometimes even impossible) due to large memory consumption. The same functionality has been refactored into `get_renamed_columns()` and `map_recount2_data()` functions in this module. THIS FUNCTION IS KEPT AS A REFERENCE ONLY. """ # Some columns are duplicates, for example: # (ENSG00000223773.7, ENSG00000223773) --> CD99P1 # (ENSG00000124334.17, ENSG00000124334) --> IL9R # We keep the first occurence of duplicated ensembl ids updated_mapping = gene_id_mapping.loc[ ~gene_id_mapping.index.duplicated(keep="first") ] # Same ensembl ids are mapped to different gene symbol twice (CCL3L1, CCL3L3) # ENSG00000187510.7 ENSG00000187510 C12orf74 # ENSG00000187510.7 ENSG00000187510 PLEKHG7 # Manually mapping them based on what is found on ensembl site manual_mapping = { "ENSG00000187510.7": "PLEKHG7", "ENSG00000230417.11": "LINC00595", "ENSG00000255374.3": "TAS2R45", "ENSG00000276085.1": "CCL3L1", } # Apply manual mappings to `updated_mapping` for ensembl_id, gene_symbol in manual_mapping.items(): updated_mapping.loc[ensembl_id].hgnc_symbol = gene_symbol # Remove paralogs. # Some ensembl ids are paralogs (for example, "geneA" and "geneA_PAR_Y"). # They map to the same hgnc symbol. Homologous sequences are paralogous # if they were separated by a gene duplication event: if a gene in an # organism is duplicated to occupy two different positions in the same # genome, then the two copies are paralogous. updated_expression_df = expression_df.iloc[ :, ~expression_df.columns.str.contains("PAR_Y") ] # Replace ensembl ids with gene symbol updated_expression_df.columns = updated_expression_df.columns.map( updated_mapping["hgnc_symbol"] ) # Remove columns whose mapped ensembl id is an empty string updated_expression_df = updated_expression_df.iloc[ :, updated_expression_df.columns != "" ] # Remove columns whose mapped ensembl id is `NaN` updated_expression_df = updated_expression_df.iloc[ :, updated_expression_df.columns.notnull() ] return updated_expression_df
db21341c337481f897da47e482a6667b3e4b9c8e
3,636,712
async def get_latency(ctx: Context) -> dict[str, str]: """ Get the bot's latency and database latency. Parameters ---------- ctx : Context The context. """ now = perf_counter() collection = ctx.bot.db['test']['TESTS'] if await collection.find_one({'_id': PAYLOAD['_id']}) is None: await collection.insert_one(PAYLOAD) else: await collection.find_one({'_id': PAYLOAD['_id']}) bot_latency = f'{round(ctx.bot.latency * 1000)}ms' database_latency = f'{round(perf_counter() - now)}ms' data: dict[str, str] = { 'bot': bot_latency, 'database': database_latency } return data
f42e8d3456a72b9b9b6520a2c60b777d74924cd3
3,636,713
def ssl_allowed(fn): """ Decorator - marks a route as allowing ssl, but not requiring it. It can be served over http and https. NOTE: This must go BEFORE the route! """ fn.ssl_allowed = True return fn
d8a22ed69a356189bca69a08516fd0a1187e4866
3,636,714
import re def _get_visible_photos(browser, known_urls): """ extracts all *currently visible* photo URLs from a Flickr photoset/album page, converts them into "embed code compatible" (i.e. sanctioned by Flickr) URLs and returns them. Parameters ---------- browser : TODO ??? a selenium webdriver instance known_urls : dict(str: dict(str: str)) a dictionary mapping from embed code compatible image URLs to a dictionary holding some metadata ('image_page', 'title' and 'orientation'). We'll update this dict, if we find new image after scrolling down the page. output : str or None if 'cli': print an embed code as soon as a new image is found/parsed Returns ------- known_urls : dict(str: dict(str: str)) a dictionary mapping from embed code compatible image URLs to a dictionary holding some metadata ('image_page', 'title' and 'orientation') """ image_elems = browser.find_elements_by_class_name('awake') for elem in image_elems: style_attrib = elem.get_attribute('style') match = re.match(STYLE_STRING_PATTERN, style_attrib, re.VERBOSE) width = int(match.group('width')) height = int(match.group('height')) orientation = get_orientation(width, height) url = match.group('url') # URL of the page that only shows one image try: image_page_elem = elem.find_element_by_class_name('overlay') image_page = image_page_elem.get_attribute('href') except NoSuchElementException as e: image_page = browser.current_url # title of the image try: title_elem = elem.find_element_by_class_name('interaction-bar') title_str = title_elem.get_attribute('title') title = re.match('^(?P<title>.*) by.*$', title_str).group('title') except NoSuchElementException as e: title = '' try: embed_url = hotlink_url2embed_url(url) if not embed_url in known_urls: known_urls[embed_url] = { 'image_page': image_page, 'title': title, 'orientation': orientation} except AttributeError as e: raise AttributeError("Warning: can't convert URL: {}".format(url)) return known_urls
86832fc6d21e987dd815296245ef352174772cd7
3,636,715
import argparse def get_parser(): """ return a parser """ parser = argparse.ArgumentParser("cli") parser.add_argument('registryimage', help="registry/image:tag - tag is optional") # Username and password come last to make them optional later parser.add_argument('username', help='username') parser.add_argument('password', help='password') return parser
e7bf6233cce887bf3fff1a1360ddef2134fe3eb6
3,636,716
def fold_with_enum_index(xtypes, x): """ see MixedIntegerContext.fold_with_enum_index """ x = np.atleast_2d(x) xfold = np.zeros((x.shape[0], len(xtypes))) unfold_index = 0 for i, xtyp in enumerate(xtypes): if xtyp == FLOAT or xtyp == INT: xfold[:, i] = x[:, unfold_index] unfold_index += 1 elif isinstance(xtyp, tuple) and xtyp[0] == ENUM: index = np.argmax(x[:, unfold_index : unfold_index + xtyp[1]], axis=1) xfold[:, i] = index unfold_index += xtyp[1] else: _raise_value_error(xtyp) return xfold
42a2385f591ac3349d9a7c25870adb23eb0a8fe8
3,636,717
import redis def unlock(arguments): """Unlock the database.""" u = coil.utils.ask("Redis URL", "redis://localhost:6379/0") db = redis.StrictRedis.from_url(u) db.set('site:lock', 0) print("Database unlocked.") return 0
859ec2ec159529ab5cb5e05c32703a3164666e68
3,636,718
from typing import cast def filter_atom_tokens(entity: SerializableEntity) -> bool: """ When locating tokens for equations, only detect atom tokens (i.e., skipping affix tokens like arrows and hats), because affixes be colorized by wrapping them in colorization commands. """ token = cast(SerializableToken, entity) return token.type_ == "atom"
42c60615a7e7c87dee40d2326d5e41518644ac88
3,636,719
import io import os def handle_subliminal_download(video, video_path, languages_to_retrieve): """ # Download the best subtitles in french and english Args: video : Name of video video_path: absolute path to videos languages_to_retrieve : dict of subtitles languages to retrieve return : two dicts with the path of each subtitles with str of language as key / Exemple : 'eng' for english, 'fra' for french . the first dict is the path to vtt subtitles, the second one is the path to str subtitles """ webvtt_subtitles_returned = {} srt_subtitles_returned = {} best_subtitles = download_best_subtitles( [video], set(map(Language, languages_to_retrieve))) if best_subtitles[video]: for retrieved_subtitle in best_subtitles[video]: subtitles_are_saved = save_subtitles( video, [retrieved_subtitle], encoding='utf8') if subtitles_are_saved: srt_fullpath = subtitle.get_subtitle_path( video_path, retrieved_subtitle.language) srt_subtitles_returned[ retrieved_subtitle.language.alpha3] = srt_fullpath new_data = remove_nullcharacters(srt_fullpath) with io.open(srt_fullpath, 'w', encoding='utf-8') as f: for line in new_data: f.write(line) webvtt_fullpath = os.path.splitext(srt_fullpath)[0]+'.vtt' if os.path.isfile(webvtt_fullpath): # Add the subtitles path to subtitles_returned even if they are already downloaded/converted webvtt_subtitles_returned[ retrieved_subtitle.language.alpha3] = webvtt_fullpath if os.path.isfile(srt_fullpath): # Add the subtitles path to subtitles_returned after converting them in .vtt convert_subtitles_to_webvtt(srt_fullpath, webvtt_fullpath) webvtt_subtitles_returned[ retrieved_subtitle.language.alpha3] = webvtt_fullpath return webvtt_subtitles_returned, srt_subtitles_returned
358aeb71fe36d3813827758854d85536beb638ea
3,636,720
def xmp_extract(fns, type_map): """xmp_extract :param fns: :param type_map: """ logger.info("Extracting raw XMP data.") func = partial(xmp_to_vec, type_map=type_map) xmp_to_vec(fns[0], type_map=type_map) xmp_data = imap_unordered_bar(func, fns, n_proc=2) xmp_data = pd.DataFrame(xmp_data) # convert the data types data_fields, data = convert_types(xmp_data, type_map) df = pd.DataFrame(data).transpose() df.columns = data_fields df['fn'] = fns return df
413d179dd5dd8579e12ffb648f05802e1ff7501e
3,636,721
import textwrap def fisbUnavailable(db): """Create string containing any FIS-B Unavailable messages. Args: db (object): Handle to database connection. Returns: str: Containing any FIS-B Unavailable information. """ if SHOW_UNAVAILABLE == False: return '' fisbStr = '' for r in db.MSG.find({'type': 'FIS_B_UNAVAILABLE'},{'contents': 1, 'centers': 1}): centerList = ','.join(r['centers']) centerStr = ' [' + centerList + ']' fisbEntry = r['contents'] + centerStr fisbStr = fisbStr + textwrap.fill(fisbEntry, 78, subsequent_indent=' ') + '\n' if fisbStr != '': fisbStr = '\n' + fisbStr return fisbStr
319a1477c0873741d7c67550b9c27d64f2707c73
3,636,722
import os import json def log_get(): """ Parses JSON log file and sends it to the web server """ log(bottle.request) logbuf = [] try: if os.path.exists('log.json'): with open('log.json') as infile: logbuf = json.load(infile) except (ValueError, IOError) as e: error(e) # Prepare response header bottle.response.content_type = 'application/json' bottle.response.expires = 0 bottle.response.set_header('Pragma', 'no-cache') bottle.response.set_header('Cache-Control', 'no-cache, no-store, must-revalidate') # Send log buffer return json.dumps(logbuf)
5a3386b6e91f09ff56066b5004cc1aea1dfd7e34
3,636,723
def rotate_file(filename, copy=False): """ Rotate file like logrotate. If given filename already exists, rename it to "filename".n, n=1... Filename with larger n is older one. """ # If not exist, if not os.path.isfile(filename): return # make list [ [filename, number], ... ] old_list = [] dot_files = glob.glob(filename + ".*") for f in dot_files: suffix = f.replace(filename+".", "") try: i = int(suffix) if str(i) == suffix: # ignore if suffix was such as 003... old_list.append([f, i]) except ValueError, e: continue old_list.sort(lambda x,y: x[1]-y[1]) # rotate files for f, i in reversed(old_list): os.rename(f, "%s.%d" % (f[:f.rfind(".")], i+1)) if copy: shutil.copyfile(filename, filename + ".1") else: os.rename(filename, filename + ".1") return filename + ".1"
1d2ddbc5153b8b79e4f8130c82fdf34437f4a4d6
3,636,724
def change_master(host, confirm=False): """ Change to different master host. Arguments: - host (str): Hostname of the new master to change to. Optional arguments: - confirm (bool): Acknowledge the execution of this command. Default is 'False'. """ if not confirm: raise salt.exceptions.CommandExecutionError( "This command will replace your current master host to '{:s}' - add parameter 'confirm=true' to continue anyway".format(host)) ret = {} ret["master_key_removed"] = __salt__["file.remove"]("/etc/salt/pki/minion/minion_master.pub") ret["config_changed"] = __salt__["file.replace"]("/etc/salt/minion", "^master:.*$", "master: {:s}".format(host)) ret["restart"] = restart() return ret
e24da255ef2c85b18266e3143e31d19d8d4c3136
3,636,725
import six def decode_text(s): """ Decodes a PDFDocEncoding string to Unicode. Adds py3 compatability to pdfminer's version. """ if type(s) == bytes and s.startswith(b'\xfe\xff'): return six.text_type(s[2:], 'utf-16be', 'ignore') else: ords = (ord(c) if type(c) == str else c for c in s) return ''.join(PDFDocEncoding[o] for o in ords)
9a98160acff455bb77dca6223454a57a0058a418
3,636,726
def normalize_bound(sig, lb=0, ub=1): """ Normalize a signal between the lower and upper bound. Parameters ---------- sig : ndarray Original signal to be normalized. lb : int, float, optional Lower bound. ub : int, float, optional Upper bound. Returns ------- ndarray Normalized signal. """ mid = ub - (ub - lb) / 2 min_v = np.min(sig) max_v = np.max(sig) mid_v = max_v - (max_v - min_v) / 2 coef = (ub - lb) / (max_v - min_v) return sig * coef - (mid_v * coef) + mid
a9f609da88d05f76ce4c244eb516405956d79acb
3,636,727
import itertools def get_chisqr3d(res3d): """Extract fit3d result chisqr attribute into a 3d volume Args: res3d -- 3d numpy array of model.ModelResult; output of fit3d Return: attr3d -- numpy arrays of chi-square statistics of fit """ # create empty array data_type = type(res3d[0,0,0].chisqr) shape = res3d.shape attr3d = np.zeros(shape,dtype=data_type) # fill the arrays for x, y, z in itertools.product(*map(range, shape)): attr3d[x,y,z] = res3d[x,y,z].chisqr return attr3d
e0f0571237a4b79694e14abe1b0376d864b656fd
3,636,728
def detect_Telephony_SMS_abuse(x) : """ @param x : a VMAnalysis instance @rtype : a list of formatted strings """ formatted_str = [] structural_analysis_results = x.tainted_packages.search_methods("Landroid/telephony/SmsManager","sendTextMessage", ".") #structural_analysis_results = x.tainted_packages.search_methods("Lcom/geinimi/c/i","a", ".") print (structural_analysis_results) """ # ke added print (structural_analysis_results[0]) print (show_Path(structural_analysis_results)) print (structural_analysis_results[0].get_name()) print (structural_analysis_results[0].get_class_name()) print (structural_analysis_results[0].get_idx()) print (structural_analysis_results[0].get_descriptor()) #print (len(structural_analysis_results)) raw_input() """ for result in xrange(len(structural_analysis_results)) : registers = data_flow_analysis(structural_analysis_results, result, x) #print (registers) #print (result) #raw_input(" Y_______Y ") """ if len(registers) > 3 : target_phone_number = get_register_value(1, registers) sms_message = get_register_value(3, registers) local_formatted_str = "This application sends an SMS message '%s' to the '%s' phone number" % (sms_message, target_phone_number) if not(local_formatted_str in formatted_str) : formatted_str.append(local_formatted_str) """ return formatted_str
1d337cc66ea8d536832b2582beabd1985c88a2f2
3,636,729
import torch from typing import Optional from typing import Tuple def depth_map_to_point_cloud( depth_map: torch.Tensor, valid_map: Optional[torch.Tensor], focal_x: float, focal_y: float, center_x: float, center_y: float, trunc_depth_min: Optional[float], trunc_depth_max: Optional[float], flip_h: bool = True, device: Optional[torch.device] = None, _validate_args: bool = True ) -> Tuple[torch.Tensor, torch.Tensor]: """Generate point clouds from `depth_map`. The generated point clouds are in the camera space. X: camera right, Y: camera up, Z: forward (depth). The rank of `depth_map` must be at least 4D (b, c, h, w). If not, it is interpreted as (h, w), (c, h, w), (b, c, h, w) and (b, ..., h, w) for higher ranks. Note that the `valid_map` must be a image type as `depth_map`. (h, w), (c, h, w) or (b, c, h, w). Note that (b, h, w) is not allowed and will be interpreted as (c, h, w) without warnings. Args: depth_map (torch.Tensor): UNNORMALIZED depth map, which means the range of values is [min_depth, max_depth]. torch.float32. The rank must be at least 4D (b, c, h, w). If not, it is converted automatically. valid_map (torch.Tensor, optional): binary mask to indicate which pixels are valid or invalid. Invalid pixels are discard during scattering. torch.bool focal_x (float): focal length on x direction. focal_y (float): focal length on y direction. center_x (float): center coordinate of depth map. center_y (float): center coordinate of depth map. trunc_depth_min (float): depth below this value is truncated. None to disable. trunc_depth_max (float): depth above this value is truncated. None to disable. flip_h (bool, optional): whether to flip the horizontal axis. Note that in OpenCV format, the origin (0, 0) of an image is at the upper left corner, which should be flipped before converting to point cloud. Defaults to True. device (torch.device, optional): torch device. Defaults to None. Returns: torch.Tensor: point cloud in shape (..., 3) torch.Tensor: mask in shape (..., h, w) indicating the valid area. """ if _validate_args: # Convert to tensors and ensure they are on the same device depth_map = utils.validate_tensors(depth_map, same_device=device or True) # Ensure tensor shape at least 4D (b, ..., h, w) depth_map = utils.to_4D_image(depth_map) # (b, c, h, w) # Ensure dtypes depth_map = depth_map.to(dtype=torch.float32) if valid_map is not None: valid_map = utils.to_tensor(valid_map, device=depth_map.device) valid_map = utils.to_4D_image(valid_map) # (b, c, h, w) valid_map = valid_map.to(dtype=torch.bool) device = depth_map.device x, y = utils.generate_image_coords( depth_map.shape, dtype = torch.float32, device = device ) # same shape as depth_map z = depth_map # (..., h, w) points = torch.stack((x, y, z), dim=-1) point_cloud = image_to_camera_space( points = points, focal_x = focal_x, focal_y = focal_y, center_x = center_x, center_y = center_y, flip_h = flip_h, height = depth_map.shape[-2], _validate_args = False ) # (..., h, w, 3) valid_area = torch.ones_like(z, dtype=torch.bool) # (..., h, w) # Truncate invalid values if trunc_depth_max is not None: valid_area = torch.logical_and(z <= trunc_depth_max, valid_area) if trunc_depth_min is not None: valid_area = torch.logical_and(z >= trunc_depth_min, valid_area) if valid_map is not None: valid_area = torch.logical_and(valid_area, valid_map) return point_cloud, valid_area
555dc106e8e9075d6f05ba5b221bba3cb8dccb34
3,636,730
import logging def symbol_definitions(goto, wkdir, srcdir=None): """Symbol definitions appearing in symbol table. Source file path names in symbol table are absolute or relative to wkdir. If srcdir is given, return only symbols defined in files under srcdir. """ wkdir = srcloct.abspath(wkdir) srcdir = srcloct.abspath(srcdir) symbols = {} for dfn in parse_symbol_table(symbol_table(goto), wkdir): sym, src, num = dfn['symbol'], dfn['file'], dfn['line'] if sym is None or src is None or num is None: logging.info("Skipping symbol table entry: %s: %s, %s", sym, src, num) continue if srcdir and not src.startswith(srcdir): logging.info("Skipping symbol table entry: %s: %s, %s", sym, src, num) continue srcloc = srcloct.make_srcloc(src, None, num, wkdir, srcdir) if sym in symbols and srcloc != symbols[sym]: logging.warning("Skipping redefinition of symbol name: %s", sym) logging.warning(" Old symbol %s: file %s, line %s", sym, symbols[sym]["file"], symbols[sym]["line"]) logging.warning(" New symbol %s: file %s, line %s", sym, srcloc["file"], srcloc["line"]) continue symbols[sym] = srcloc return symbols
4030fa44407146f3339088fb6a34ff2822410b83
3,636,731
import os def setup_source_space(subject, fname=True, spacing='oct6', surface='white', overwrite=False, subjects_dir=None, add_dist=None, verbose=None): """Setup a source space with subsampling Parameters ---------- subject : str Subject to process. fname : str | None | bool Filename to use. If True, a default name will be used. If None, the source space will not be saved (only returned). spacing : str The spacing to use. Can be ``'ico#'`` for a recursively subdivided icosahedron, ``'oct#'`` for a recursively subdivided octahedron, or ``'all'`` for all points. surface : str The surface to use. overwrite: bool If True, overwrite output file (if it exists). subjects_dir : string, or None Path to SUBJECTS_DIR if it is not set in the environment. add_dist : bool Add distance and patch information to the source space. This takes some time so precomputing it is recommended. The default is currently False but will change to True in release 0.9. verbose : bool, str, int, or None If not None, override default verbose level (see mne.verbose). Returns ------- src : list The source space for each hemisphere. """ if add_dist is None: msg = ("The add_dist parameter to mne.setup_source_space currently " "defaults to False, but the default will change to True in " "release 0.9. Specify the parameter explicitly to avoid this " "warning.") logger.warning(msg) cmd = ('setup_source_space(%s, fname=%s, spacing=%s, surface=%s, ' 'overwrite=%s, subjects_dir=%s, add_dist=%s, verbose=%s)' % (subject, fname, spacing, surface, overwrite, subjects_dir, add_dist, verbose)) # check to make sure our parameters are good, parse 'spacing' space_err = ('"spacing" must be a string with values ' '"ico#", "oct#", or "all", and "ico" and "oct"' 'numbers must be integers') if not isinstance(spacing, string_types) or len(spacing) < 3: raise ValueError(space_err) if spacing == 'all': stype = 'all' sval = '' elif spacing[:3] == 'ico': stype = 'ico' sval = spacing[3:] elif spacing[:3] == 'oct': stype = 'oct' sval = spacing[3:] else: raise ValueError(space_err) try: if stype in ['ico', 'oct']: sval = int(sval) elif stype == 'spacing': # spacing sval = float(sval) except: raise ValueError(space_err) subjects_dir = get_subjects_dir(subjects_dir) surfs = [op.join(subjects_dir, subject, 'surf', hemi + surface) for hemi in ['lh.', 'rh.']] bem_dir = op.join(subjects_dir, subject, 'bem') for surf, hemi in zip(surfs, ['LH', 'RH']): if surf is not None and not op.isfile(surf): raise IOError('Could not find the %s surface %s' % (hemi, surf)) if not (fname is True or fname is None or isinstance(fname, string_types)): raise ValueError('"fname" must be a string, True, or None') if fname is True: extra = '%s-%s' % (stype, sval) if sval != '' else stype fname = op.join(bem_dir, '%s-%s-src.fif' % (subject, extra)) if fname is not None and op.isfile(fname) and overwrite is False: raise IOError('file "%s" exists, use overwrite=True if you want ' 'to overwrite the file' % fname) logger.info('Setting up the source space with the following parameters:\n') logger.info('SUBJECTS_DIR = %s' % subjects_dir) logger.info('Subject = %s' % subject) logger.info('Surface = %s' % surface) if stype == 'ico': src_type_str = 'ico = %s' % sval logger.info('Icosahedron subdivision grade %s\n' % sval) elif stype == 'oct': src_type_str = 'oct = %s' % sval logger.info('Octahedron subdivision grade %s\n' % sval) else: src_type_str = 'all' logger.info('Include all vertices\n') # Create the fif file if fname is not None: logger.info('>>> 1. Creating the source space file %s...' % fname) else: logger.info('>>> 1. Creating the source space...\n') # mne_make_source_space ... actually make the source spaces src = [] # pre-load ico/oct surf (once) for speed, if necessary if stype in ['ico', 'oct']: ### from mne_ico_downsample.c ### if stype == 'ico': logger.info('Doing the icosahedral vertex picking...') ico_surf = _get_ico_surface(sval) else: logger.info('Doing the octahedral vertex picking...') ico_surf = _tessellate_sphere_surf(sval) else: ico_surf = None for hemi, surf in zip(['lh', 'rh'], surfs): logger.info('Loading %s...' % surf) s = _create_surf_spacing(surf, hemi, subject, stype, sval, ico_surf, subjects_dir) logger.info('loaded %s %d/%d selected to source space (%s)' % (op.split(surf)[1], s['nuse'], s['np'], src_type_str)) src.append(s) logger.info('') # newline after both subject types are eggie # Fill in source space info hemi_ids = [FIFF.FIFFV_MNE_SURF_LEFT_HEMI, FIFF.FIFFV_MNE_SURF_RIGHT_HEMI] for s, s_id in zip(src, hemi_ids): # Add missing fields s.update(dict(dist=None, dist_limit=None, nearest=None, type='surf', nearest_dist=None, pinfo=None, patch_inds=None, id=s_id, coord_frame=np.array((FIFF.FIFFV_COORD_MRI,), np.int32))) s['rr'] /= 1000.0 del s['tri_area'] del s['tri_cent'] del s['tri_nn'] del s['neighbor_tri'] # upconvert to object format from lists src = SourceSpaces(src, dict(working_dir=os.getcwd(), command_line=cmd)) if add_dist: add_source_space_distances(src, verbose=verbose) # write out if requested, then return the data if fname is not None: write_source_spaces(fname, src) logger.info('Wrote %s' % fname) logger.info('You are now one step closer to computing the gain matrix') return src
203008b69f0bf6fd06b8e069a92f1742d6131c19
3,636,732
import re def get_my_ip() -> None: """ Funtion to get current ip in Network """ url = "http://checkip.dyndns.com/" return re.compile(r"Address: (\d+.\d+.\d+.\d+)").search(get(url).text).group(1)
0c52ad85ec29a1dfb65f2699b13e66b038fc31a8
3,636,733
def param_squared_mean(ns_run, logw=None, simulate=False, param_ind=0): """Mean of the square of single parameter (second moment of its posterior distribution). Parameters ---------- ns_run: dict Nested sampling run dict (see the data_processing module docstring for more details). logw: None or 1d numpy array, optional Log weights of samples. simulate: bool, optional Passed to ns_run_utils.get_logw if logw needs to be calculated. param_ind: int, optional Index of parameter for which the second moment should be calculated. This corresponds to the column of ns_run['theta'] which contains the parameter. Returns ------- float """ if logw is None: logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate) w_relative = np.exp(logw - logw.max()) # protect against overflow w_relative /= np.sum(w_relative) return np.sum(w_relative * (ns_run['theta'][:, param_ind] ** 2))
bcb7677eb648ad559e56b853de4f9246da638ff2
3,636,734
def get_historical_data(Code=None): """ A function to get a historical data corresponding to a specified stock code. Args: Code (str): A stock code corresponding to the historical data. Returns: A historical data corresponding to the stock code in DataFrame format of Pandas. """ if Code is None: raise ValueError("""Specify a stock code. """) share_data = share.Share(Code).get_historical( share.PERIOD_TYPE_YEAR, 6, share.FREQUENCY_TYPE_DAY, 1 ) columns = list(share_data.keys()) columns[0] = "Date" columns[-2] = "Adj. Close" # TODO: it's not actually Adjusted Close, but Close. df = pd.DataFrame( list(zip(*share_data.values())), columns=columns ) df["Date"] = pd.to_datetime(df["Date"], unit="ms").dt.date return df
a951340385868fa84e6b7905715f7a0ade6f05d3
3,636,735
def match_login_url_with(username, default="https://foo.bar/login"): """ Match a given username with the corresponding login URL :param username: username of user. type str :returns URL: login URL for user. type str """ return matches( { "yelluw": "https://yelluw.com/login", "Pablo": "https://pablojuan.com/login", }, username, default=default )
f6351e978b2010818092bfb3340fde1b84635d32
3,636,736
def odict_1to1(from_sp, to_sp): """ Filtered flat odict with only 1to1 orthologs. """ od = odict(from_sp, to_sp) od_rev = odict(to_sp, from_sp) return dict([(k,list(v)[0]) for k,v in od.items() if len(v)==1 and len(od_rev[list(v)[0]])==1])
1ad1deca32d883f2bd8637d93b8b1a3578a05a75
3,636,737
import fnmatch def find_devices(vendor=None, product=None, serial_number=None, custom_match=None, **kwargs): """Find connected USB devices matching certain keywords. Wildcards can be used for vendor, product and serial_number. :param vendor: name or id of the vendor (manufacturer) :param product: name or id of the product :param serial_number: serial number. :param custom_match: callable returning True or False that takes a device as only input. :param kwargs: other properties to match. See usb.core.find :return: """ kwargs = kwargs or {} attrs = {} if isinstance(vendor, str): attrs['manufacturer'] = vendor elif vendor is not None: kwargs['idVendor'] = vendor if isinstance(product, str): attrs['product'] = product elif product is not None: kwargs['idProduct'] = product if serial_number: attrs['serial_number'] = str(serial_number) if attrs: def cm(dev): if custom_match is not None and not custom_match(dev): return False info = DeviceInfo.from_device(dev) for attr, pattern in attrs.items(): if not fnmatch(getattr(info, attr).lower(), pattern.lower()): return False return True else: cm = custom_match return usb.core.find(find_all=True, custom_match=cm, **kwargs)
7067b27d9d3c27eabe28150e460661682f02045d
3,636,738
def get_static_graph(app_name=None, app_dict=None, *args, **kwargs): """ Explicityl avoid request and user. """ return get_graph(app_name=app_name, app_dict=app_dict, request=None)
8628f88e88080b18ead39871f3e3f69ba07c09a6
3,636,739
def downsample_spectrum(ar_wavelength, ar_flux, ar_ivar, scale): """ :type ar_wavelength: np.ndarray :type ar_flux: np.ndarray :type ar_ivar: np.ndarray :type scale: int :return: (np.ndarray, np.ndarray, np.ndarray) """ new_length = ar_wavelength.size // scale old_length_clipped = new_length * scale ar_wavelength_2d = ar_wavelength[:old_length_clipped].reshape((new_length, scale)) ar_flux_2d = ar_flux[:old_length_clipped].reshape((new_length, scale)) ar_ivar_2d = ar_ivar[:old_length_clipped].reshape((new_length, scale)) ar_weighted_flux_2d = ar_flux_2d * ar_ivar_2d ar_wavelength_small = np.nanmean(ar_wavelength_2d, axis=1) ar_ivar_small = np.nansum(ar_ivar_2d, axis=1) with np.errstate(invalid='ignore'): ar_flux_small = np.nansum(ar_weighted_flux_2d, axis=1) / ar_ivar_small return ar_wavelength_small, ar_flux_small, ar_ivar_small
443a917c02eab7bdfc2c544c9bec431dbe1691ac
3,636,740
def format_date(date): """ Format date for creation of Twitter URL and Facebook API. Format a datetime object to a string in the form of '%Y-%m-%d', e.g. '2018-01-21' Parameters ---------- date : datetime date to be formated Returns ------- str date in string representation """ return date.strftime('%Y-%m-%d')
d76e81613d2c3b06623cadb30d706c537555ad51
3,636,741
import base64 def basic_token(username, password): """Generate the Authorization token for Resource Orchestrator (SO-ub container). Args: username (str): the SO-ub username password (str): the SO-ub password Returns: str: the Basic token """ if not isinstance(username, str): raise TypeError("The given type of username is `{}`. Expected str.".format(type(username))) if not isinstance(password, str): raise TypeError("The given type of password is `{}`. Expected str.".format(type(password))) credentials = str.encode(username + ":" + password) return bytes.decode(base64.b64encode(credentials))
054fccad28d1c18a34d630a664742f77e15ee4fe
3,636,742
import csv def read_alias(alias_csv_path): """Reads alias.csv at the specified path. Then returns a dict mapping from alias to monster id. """ with open(alias_csv_path) as alias_csv: return { alias: int(monster_id) for alias, monster_id in csv.reader(alias_csv)}
3a3818b81a916b4dd18ca7cab5fbcbe1b4050d03
3,636,743
async def get_pipeline_run_node_steps(request: web.Request, organization, pipeline, run, node) -> web.Response: """get_pipeline_run_node_steps Retrieve run node steps details for an organization pipeline :param organization: Name of the organization :type organization: str :param pipeline: Name of the pipeline :type pipeline: str :param run: Name of the run :type run: str :param node: Name of the node :type node: str """ return web.Response(status=200)
8ad3b987500366d562a5f6f59cc106fe374f50aa
3,636,744
def query_available_collections(opts: Options) -> pd.DataFrame: """Search for the available collections.""" # Graphql query to get the collections query = create_collections_query() # Call the server reply = query_server(opts.web, query) collections = json_properties_to_dataframe(reply["collections"]) print("Available collections:\n", collections) return collections
f1ce5738739956a4a9a4258f35cdfadd1a75dffc
3,636,745
from typing import List from typing import Dict from typing import Any from typing import Tuple def _create_agent_object_list( trial_list: List[List[Dict[str, Any]]], agent_object_config_list: List[ObjectConfigWithMaterial], unit_size: Tuple[float, float] ) -> List[Dict[str, Any]]: """Create and return the MCS scene's agent object list using the given trial list from the JSON file data.""" agent_object_list = [] # Retrieve the agent data from the first frame of the first trial. # Assume only one agent and the agent will never change shape/color. json_agent = trial_list[0][0]['agent'] json_coords = json_agent[0] json_radius = json_agent[1] json_size = [json_radius * 2, json_radius * 2] # Create the MCS agent object. config_with_material = agent_object_config_list[0] agent_object = _create_object( 'agent_', config_with_material.object_type, config_with_material.material, [config_with_material.center_y, config_with_material.scale_y], [config_with_material.scale_x, config_with_material.scale_z], json_coords, json_size, unit_size ) agent_object[tags.SCENE.UNTRAINED_SHAPE] = config_with_material.untrained agent_object_list.append(agent_object) # Remove the agent's first appearance (we will override it later). agent_object['shows'] = [] agent_object['boundsAtStep'] = [] # Add data for the agent's movement across the frames to each step. step = 0 for trial in trial_list: for frame in trial: json_agent = frame['agent'] json_coords = json_agent[0] json_radius = json_agent[1] json_size = [json_radius * 2, json_radius * 2] # Move the agent to its new position for the step. agent_object['shows'].append(_create_show( step, agent_object['configHeight'], agent_object['configSize'], json_coords, json_size, unit_size )) step += 1 agent_object['boundsAtStep'].append( agent_object['shows'][-1]['boundingBox'] ) # Add 1 for the EndHabituation action step at the end of the trial. step += 1 agent_object['boundsAtStep'].append( agent_object['shows'][-1]['boundingBox'] ) # Remove the scale from each element in 'shows' except for the first, or # it will really mess up the simulation. for show in agent_object['shows'][1:]: del show['scale'] return agent_object_list
e30b172a7c2dc2c35e180955a65cd5de98a43ec1
3,636,746
async def send_data_controller_details_message_handler(request: web.BaseRequest): """Send data controller details message to remote agent hosted by Data Controller.""" context = request.app["request_context"] connection_id = request.match_info["connection_id"] # Initialise MyData DID Manager. mydata_did_manager: ADAManager = ADAManager(context=context) try: # Call the function await mydata_did_manager.send_data_controller_details_message(connection_id) except (ConnectionManagerError, BaseModelError, ADAManagerError) as err: raise web.HTTPBadRequest(reason=err.roll_up) from err except Exception as err: raise web.HTTPInternalServerError(reason=str(err)) from err return web.json_response({}, status=200)
65437e2f3ea79c9d09d04bfabe1fce6ef02294a4
3,636,747
def test_llhelper(monkeypatch): """Show how to get function pointers used in type slots""" FT = lltype.FuncType([], lltype.Signed) FTPTR = lltype.Ptr(FT) def make_wrapper(self, space): def wrapper(): return self.callable(space) return wrapper monkeypatch.setattr(pypy.module.cpyext.api.ApiFunction, '_make_wrapper', make_wrapper) @specialize.memo() def get_tp_function(space, typedef): @slot_function([], lltype.Signed, error=-1) def slot_tp_function(space): return typedef.value api_func = slot_tp_function.api_func return lambda: llhelper(api_func.functype, api_func.get_wrapper(space)) class Space: _cache = {} @specialize.memo() def fromcache(self, key): try: return self._cache[key] except KeyError: result = self._cache[key] = self.build(key) return result def _freeze_(self): return True class TypeDef: def __init__(self, value): self.value = value def _freeze_(self): return True class W_Type: def __init__(self, typedef): self.instancetypedef = typedef def _freeze(self): try: del self.funcptr except AttributeError: pass return False w_type1 = W_Type(TypeDef(123)) w_type2 = W_Type(TypeDef(456)) space = Space() def run(x): if x: w_type = w_type1 else: w_type = w_type2 typedef = w_type.instancetypedef w_type.funcptr = get_tp_function(space, typedef)() return w_type.funcptr() fn = compile(run, [bool]) assert fn(True) == 123 assert fn(False) == 456
323ddd524e24eeb70284bf2229e77fe66e557f51
3,636,748
def get_chronicle_http_client(account_info): """ Return an http client that is authorized with the given credentials using oauth2client or google-auth. """ try: credentials = service_account.Credentials.from_service_account_info( account_info, scopes=current_app.config['AUTH_SCOPES'] ) except ValueError as e: raise AuthorizationError(str(e)) return _auth.authorized_http(credentials)
df75a33c41891ccdab36ac933c81a09be8ebf4f8
3,636,749
def svn_auth_save_credentials(*args): """svn_auth_save_credentials(svn_auth_iterstate_t state, apr_pool_t pool) -> svn_error_t""" return apply(_core.svn_auth_save_credentials, args)
429958f965bea9b5f4838cf471c91dd6e1d26e77
3,636,750
def get_nodes_str (name, nodes): """ helper function to dump nodes as a list of names """ nodes_str = " %s nodes = %d\n" % (name, len(nodes)) nodes_str += " " + ", ".join(map(lambda x: x._name, nodes)) + "\n" return nodes_str
cafb9fd0aa202c2172aede97eabbf829dc9a1b53
3,636,751
def compute_X_axis(dataset, frame, dss, ref_freq=None, vspline=None, time=None): """ Computes the appropriate X-axis for the averaged difference spectrum 'vobj' is velocity of the object in the rest velocity of frame specified. Acceptable frames are defined in the SAOhdf5.rel_freq_units() docstring. In addition we allow here RADIO-OBJ which defines the rest frame of the object. @param header : information about the target source @type header : dict @param frame : the rest frame and the X-axis type (freq or vel) @type frame : str @param dss : DSS station @type dss : int @param ref_freq : frequency in MHz for computing relative frequencies @type ref_freq : float @param vspline : radial velocity of a moving body as a function of time @type vspline : function @param time : the time at which the spline is to ve evaluated @type time : UNIX seconds """ n_chans = dataset.data[1].shape[0] if ref_freq: f_ref = ref_freq else: f_ref = dataset.header['linefreq']/1e6 # MHz v_ref = dataset.header['VELOCITY'] logger.debug(" compute_X-axis: requested frame is %s", frame) logger.debug(" compute_X_axis: reference frequency is %10.3f", f_ref) if frame == "CHAN-OBS" or frame == "FREQ-OBS" or frame == "RELA-OBS": x = dataset.rel_freq_units(frame=frame, ref_freq=f_ref) vobj = None elif frame == "RADI-OBS": vobj = V_LSR(dataset.header, time, dss) x = dataset.rel_freq_units(frame=frame, ref_freq=f_ref) elif frame == "RADI-LSR": x = dataset.rel_freq_units(frame=frame, ref_freq=f_ref, v_frame=V_LSR(dataset.header, time, dss)) vobj = v_ref logger.debug("compute_X_axis: vobj = %.2f", vobj) elif frame == "RADI-OBJ": # This is the object's rest frame if vspline and time: vobj = vspline(time) x = -(c/1000)*dataset.rel_freq_units(frame="DELF-OBS")/f_ref - vobj else: vobj = dataset.header[0]['VELOCITY'] x = dataset.rel_freq_units(frame=frame, ref_freq=f_ref, v_frame=V_LSR(dataset.header, time, dss) + vobj) else: self.logger.warning(" frame %s is not valid", frame) return return x, frame, vobj
0d7d3888b053fe2012bcb05f0619bb2e5dcdb86b
3,636,752
import requests def clean_df(df, selected_columns=default_columns): """Take a dataframe with GDELT2.0 data and only retain the useful columns for us and also add the country where the news was written Keyword arguments: df -- The dataframe complying to GDELT2.0 columns format selected_columns (optionnal) -- The set of columns we want to keep """ df = df[selected_columns] df = df.dropna(axis=0, how='any') mapping = get_mapping(df).set_index('ActionGeo_CountryCode') df['Country_Code'] = df['ActionGeo_CountryCode'].apply( lambda x: mapping.loc[x]['Country_Code'] if x in mapping['Country_Code'].index.values else 'None') df['Country_Source'] = get_countries_for_dataframe(df, 'SOURCEURL', get_all_newspapers_to_country_dict(), get_tld_to_country_dict()) r = requests.get('https://raw.githubusercontent.com/mledoze/countries/master/countries.json') d = {} for c in r.json(): d[c['cca3']] = c['name']['common'] df['Country_Name'] = df['Country_Code'].apply(lambda x: d[x] if x in d else 'None') return df[cleaned_columns]
6ef2b2537c5190541691c4230e6f3164c5c9ae32
3,636,753
from functools import reduce def mse(y_true, y_pred, reduce_mode="mean"): """mean squared error。""" return reduce(tf.math.square(y_pred - y_true), reduce_mode)
a74f04405d1cbc5d4ff3715286f4b76fa3355a42
3,636,754
def calculate_assignment_probabilites(assignments, num_clusters): """ Just counts the occurence of each assignment to get an empirical pdf estimate """ temp = np.arange(num_clusters) hist_b_edges = np.hstack([-np.inf, (temp[:-1] + temp[1:]) / 2, np.inf]) assignment_counts, _ = np.histogram(assignments, hist_b_edges) empirical_density = assignment_counts / np.sum(assignment_counts) return empirical_density
fe2d99b108d9baac9876a7cb9af54cb69a04525a
3,636,755
import contextlib import os import io import sys import termios def unix_getpass(prompt='Password: ', stream=None): """Prompt for a password, with echo turned off. Args: prompt: Written on stream to ask for the input. Default: 'Password: ' stream: A writable file object to display the prompt. Defaults to the tty. If no tty is available defaults to sys.stderr. Returns: The seKr3t input. Raises: EOFError: If our input tty or stdin was closed. GetPassWarning: When we were unable to turn echo off on the input. Always restores terminal settings before returning. """ passwd = None with contextlib.ExitStack() as stack: try: # Always try reading and writing directly on the tty first. fd = os.open('/dev/tty', os.O_RDWR|os.O_NOCTTY) tty = io.FileIO(fd, 'w+') stack.enter_context(tty) input = io.TextIOWrapper(tty) stack.enter_context(input) if not stream: stream = input except OSError as e: # If that fails, see if stdin can be controlled. stack.close() try: fd = sys.stdin.fileno() except (AttributeError, ValueError): fd = None passwd = fallback_getpass(prompt, stream) input = sys.stdin if not stream: stream = sys.stderr if fd is not None: try: old = termios.tcgetattr(fd) # a copy to save new = old[:] new[3] &= ~termios.ECHO # 3 == 'lflags' tcsetattr_flags = termios.TCSAFLUSH if hasattr(termios, 'TCSASOFT'): tcsetattr_flags |= termios.TCSASOFT try: termios.tcsetattr(fd, tcsetattr_flags, new) passwd = raw_input(prompt, stream, input=input) finally: termios.tcsetattr(fd, tcsetattr_flags, old) stream.flush() # issue7208 except termios.error: if passwd is not None: # _raw_input succeeded. The final tcsetattr failed. Reraise # instead of leaving the terminal in an unknown state. raise # We can't control the tty or stdin. Give up and use normal IO. # fallback_getpass() raises an appropriate warning. if stream is not input: # clean up unused file objects before blocking stack.close() passwd = fallback_getpass(prompt, stream) stream.write('\n') return passwd
08553ad400e7c3379ab87b64a7db5fc4b9518aa5
3,636,756
def getStudiesOptions(request, id): """ Get a list of studies for an investigation id. Input: id, investigation id. """ seekdb = SeekDB(None, None, None) user_seek = seekdb.getSeekLogin(request, False) investigation_id = id studies = seekdb.getStudiesFromID(investigation_id) #print(studies) #study_options = json.dumps(convertDicToOptions(studies)) study_options = convertDicToOptions(studies) #print(study_options) data = {'msg':'okay', 'status': 1, 'study_options':study_options} return HttpResponse(simplejson.dumps(data))
b864a9aae99851f8f904cbc55b896ec6d22300c1
3,636,757
def fibonacci_thrid(n): """计算斐波那契数列3""" return n if n < 2 else fibonacci_thrid(n - 2) + fibonacci_thrid(n - 1)
b98251e9bd4ec507933338738c2b65faea8700b2
3,636,758
import socket def site_url(url): """ Determine the server URL. """ base_url = 'http://%s' % socket.gethostname() if server.port is not 80: base_url += ':%d' % server.port return urlparse.urljoin(base_url, url)
cb759e7e7d0273397106be79c20072eb3d7d1898
3,636,759
from typing import Tuple from typing import List def get_coordinates( mask: np.ndarray, ths: int = 5, kernel_len: int = 10 ) -> Tuple[List, np.ndarray, np.ndarray]: """This function extract the coordinate of table, horizontal and vertical lines. Args: mask (np.darray): A binary table image ths (int, optional): Threshold value to ignore the lines has not same y coordinate for horizontal lines or x coordinate for vertical lines. Defaults to 5. kernel_len (int, optional): The size of kernel is applied. Raises: ValueError: will be raised if the number of detected lines is not enough to rebuild the table Returns: Tuple[List, np.ndarray, np.ndarray]: Tuple contain the coordinate of table, vertical and horizontal lines. """ # get horizontal lines mask image horizontal_lines_mask = get_hor_lines_mask(mask, kernel_len) # get vertical lines mask image vertical_lines_mask = get_ver_lines_mask(mask, kernel_len) # get coordinate of horizontal and vertical lines hor_lines = get_lines_coordinate(horizontal_lines_mask, axis=0, ths=ths) ver_lines = get_lines_coordinate(vertical_lines_mask, axis=1, ths=ths) if len(hor_lines.shape) != 2 or len(ver_lines.shape) != 2: raise ValueError("Empty line coords array") # remove noise edge hor_lines, ver_lines = remove_noise(hor_lines, ver_lines, ths) # get coordinate of table tab_x1, tab_y1, tab_x2, tab_y2 = get_table_coordinate(hor_lines, ver_lines) # preserve sure that all table has 4 borders new_ver_lines = [] new_hor_lines = [] for e in ver_lines: x1, y1, x2, y2 = e # dont add left and right border if abs(x1 - tab_x1) >= ths and abs(x2 - tab_x2) >= ths: new_ver_lines.append([x1, y1, x2, y2]) for e in hor_lines: x1, y1, x2, y2 = e # dont add top and bottom border if abs(y1 - tab_y1) >= ths and abs(y2 - tab_y2) >= ths: new_hor_lines.append([x1, y1, x2, y2]) # add top, bottom ,left, right border new_ver_lines.append([tab_x1, tab_y1, tab_x1, tab_y2]) new_ver_lines.append([tab_x2, tab_y1, tab_x2, tab_y2]) new_hor_lines.append([tab_x1, tab_y1, tab_x2, tab_y1]) new_hor_lines.append([tab_x1, tab_y2, tab_x2, tab_y2]) # normalize final_hor_lines = normalize_v1(new_hor_lines, axis=0, ths=ths) final_ver_lines = normalize_v1(new_ver_lines, axis=1, ths=ths) final_hor_lines, final_ver_lines = normalize_v2(final_ver_lines, final_hor_lines) return [tab_x1, tab_y1, tab_x2, tab_y2], final_ver_lines, final_hor_lines
3c0bbc395df07cb240d82cf0cf78b3623591bd98
3,636,760
def get_pixels(extrinsic, intrinsic, X): """ Returns the x, y pixels for the given X vector :param extrinsic: extrinsic (4*4) matrix obtained from the headset :param intrinsic: intrinsic (3*3) matrix obtained from the headset :param X: the position vector :return: image pixels for the vector """ intm = np.dot(extrinsic, np.append(X, 1)) intm = (intm / intm[2])[:3] intm = np.dot(intrinsic, intm)[:2] return [intm[0], intm[1]]
700bbbd721e4a1547c593163a41e13a8c20bee0d
3,636,761
def delete_question(media_package, level=0): """ Ask user the question whether they want to delete the distribution artefacts for the next media package or for all remaining media packages. :param media_package: The media package to ask the question for :type: str :param level: The level to indent the question to :type level: int :return: The answer. :rtype: FixAnswer """ long_descriptions = ["deleting the distribution artefacts of the next media package", "deleting all(remaining) distribution artefacts", "quitting the script"] short_descriptions = ["next", "all", "quit"] options = ['n', 'a', 'q'] question = "Delete distribution artefacts of media package {}?".format(media_package) answer = get_configurable_answer(options, short_descriptions, long_descriptions, question, level) return answer
b11ea47f01f41c4211b32d1f61fa5255f5a0fb92
3,636,762
import importlib import pkgutil def import_submodules(package, recursive=True): """ Import all submodules of a package, recursively, including subpackages Arguments: 1. package = (string) name of the package (module) loader of the package 2. recrusive = (bool) True = load packages and modules from all sub-packages as well. (bool) False = load only first level of packages and modules, do not load modules from sub packages """ if isinstance(package, str): package = importlib.import_module(package) results = {} for loader, name, is_pkg in pkgutil.walk_packages(package.__path__): full_name = package.__name__ + '.' + name results[full_name] = importlib.import_module(full_name) if recursive and is_pkg: results.update(import_submodules(full_name)) return results
e299334de43ee9bd9544589698472db978fcae8d
3,636,763
def is_collision_ray_cell(map_obj, cell): """ cell : cell r, c index from left bottom. """ idx = cell[0] + map_obj.mapdim[0] * cell[1] if (cell[0] < 0) or (cell[1] < 0) or (cell[0] >= map_obj.mapdim[0]) or (cell[1] >= map_obj.mapdim[1]): return True #elif (map_obj.map is not None) and map_obj.map[cell[0], cell[1]] == 1: elif (map_obj.map is not None) and map_obj.map_linear[idx] == 1: return True else: return False
6eaf38710843c4c4e82e8411db9f1e1d97fb1710
3,636,764
from datetime import datetime def time_of_trip(datum, city): """ Takes as input a dictionary containing info about a single trip (datum) and its origin city (city) and returns the month, hour, and day of the week in which the trip was made. Remember that NYC includes seconds, while Washington and Chicago do not. HINT: You should use the datetime module to parse the original date strings into a format that is useful for extracting the desired information. see https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior """ # YOUR CODE HERE dt = None if city == "NYC": dt = datetime.strptime(datum['starttime'], "%m/%d/%Y %H:%M:%S") elif city == "Chicago": dt = datetime.strptime(datum['starttime'], "%m/%d/%Y %H:%M") elif city == "Washington": dt = datetime.strptime(datum['Start date'], "%m/%d/%Y %H:%M") month = dt.strftime("%m") hour = dt.strftime("%H") day_of_week = dt.strftime("%A") return (int(month), int(hour), day_of_week)
37824a6f2fe3816ec09fc3f86bb00400cfd43b38
3,636,765
def transform_resource_name(ctx, param, value): """Callback to transform resource_name into title case.""" if value is not None: return value.title() return value
b708c3318b731d652a7acad216093c96bc18fe2e
3,636,766
def extrema (im): """ Return the minimum and maximum of an image. Arguments: im image whose extrema are to be found """ return [im.min(), im.max()]
303d9c50cca91c3e73341d7b40195aceb02aef7a
3,636,767
def _create_statement(name, colnames): """Create table if not exists foo (...). Note: Every type is numeric. Table name and column names are all lowercased """ # every col is numeric, this may not be so elegant but simple to handle. # If you want to change this, Think again schema = ', '.join([col + ' ' + 'numeric' for col in colnames]) return "create table if not exists %s (%s)" % (name, schema)
53c7fc9486274645c5dc7dea2257fda3cf496f9e
3,636,768
def createBundle(): """create bundled type of OSC messages""" b = OSC.OSCMessage() b.address = "" b.append("#bundle") b.append(0) b.append(0) return b
fee80abd7aa2d71b2e03dbebd65aaca07be7037a
3,636,769
def binary_or(a: int, b: int): """ Take in 2 integers, convert them to binary, and return a binary number that is the result of a binary or operation on the integers provided. >>> binary_or(25, 32) '0b111001' >>> binary_or(37, 50) '0b110111' >>> binary_or(21, 30) '0b11111' >>> binary_or(58, 73) '0b1111011' >>> binary_or(0, 255) '0b11111111' >>> binary_or(0, 256) '0b100000000' >>> binary_or(0, -1) Traceback (most recent call last): ... ValueError: the value of both input must be positive >>> binary_or(0, 1.1) Traceback (most recent call last): ... TypeError: 'float' object cannot be interpreted as an integer >>> binary_or("0", "1") Traceback (most recent call last): ... TypeError: '<' not supported between instances of 'str' and 'int' """ if a < 0 or b < 0: raise ValueError("the value of both input must be positive") a_binary = str(bin(a))[2:] # remove the leading "0b" b_binary = str(bin(b))[2:] max_len = max(len(a_binary), len(b_binary)) return "0b" + "".join( str(int("1" in (char_a, char_b))) for char_a, char_b in zip(a_binary.zfill(max_len), b_binary.zfill(max_len)) )
514fa4a02b778dfa91c4097bb8916522339cda33
3,636,770
def tukey(N, alpha): """ generate a tukey window The Tukey window, also known as the tapered cosine window, can be regarded as a cosine lobe of width \alpha * N / 2 that is convolved with a rectangle window of width (1 - \alpha / 2). At \alpha = 1 it becomes rectangular, and at \alpha = 0 it becomes a Hann window. """ # Special cases if alpha <= 0: return np.ones(N) #rectangular window elif alpha >= 1: return np.hanning(N) # Normal case x = np.linspace(0, 1, N) w = np.ones(x.shape) # first condition 0 <= x < alpha/2 first_condition = x<alpha/2 w[first_condition] = 0.5 * (1 + np.cos(2*np.pi/alpha * (x[first_condition] - alpha/2) )) # second condition already taken care of # third condition 1 - alpha / 2 <= x <= 1 third_condition = x>=(1 - alpha/2) w[third_condition] = 0.5 * (1 + np.cos(2*np.pi/alpha * (x[third_condition] - 1 + alpha/2))) return w
507b86f0cc98c832d0405f560b1018531d32c172
3,636,771
import urllib def get_response(log, url, **kwargs): """ Get data from server at given url. Parameters: - log: Standard python log instance - url: The url to make a post/get request to. - kwargs: Keyword arguments. - data: dict of post data. If data != None, get_response makes a http POST request, otherwise a http GET. - timeout: int, timeout in seconds. Defaults to 120. Returns: - Text read from url. Raises: - ServiceError if return code is != 200, httpError or timeout. """ log.debug("Trying url: %s", url) data = urlencode(kwargs['data']).encode() if 'data' in kwargs else None to = kwargs['timeout'] if 'timeout' in kwargs else URL_TIMEOUT if data: log.debug("Posting data: " + data.decode('ascii')) try: with urllib.request.urlopen(url, data, timeout=to) as response: code = response.getcode() html = response.read().decode('ascii') except timeoutError: raise ServiceError("Timeout reading %s" % url) except (urllib.error.HTTPError, urllib.error.URLError) as err: raise ServiceError("Error reading %s :%s" % (url, err)) log.debug("Got response (%d) : %s", code, html) if code != 200: raise ServiceError("Cannot update, response code: %d" % code) return html
a22ec7d0aeb36a41ac7708e18932b915d974f1ea
3,636,772
def psi(X, Y, c_i, A, config, pkg='numpy'): """Computes the value of magnetic flux at point (X, Y) according to coefficients ci. Args: X (float or numpy.array): x coordinate Y (float or numpy.array): y coordinate c_i (list): list of floats, the ci coefficients A (float): plasma parameter config (str): shape of the plasma 'non-null', 'single-null', 'double-null'. pkg (str, optional): if set to 'numpy' (resp. 'sympy'), numpy (resp. sympy) objects will be used. Defaults to 'numpy'. Raises: ValueError: If argument pkg is not in ['numpy', 'np', 'sympy', 'sp'] Returns: float or numpy.array or sympy.Add: value(s) of magnetic flux """ if pkg in ['numpy', 'np']: pkg = np elif pkg in ['sympy', 'sp']: pkg = sp else: raise ValueError("Unexpected string for argument pkg") psi_1 = 1 psi_2 = X**2 psi_3 = Y**2 - X**2*pkg.log(X) psi_4 = X**4 - 4*X**2*Y**2 psi_5 = 2*Y**4 - 9*Y**2*X**2 + 3*X**4*pkg.log(X) - 12*X**2*Y**2*pkg.log(X) psi_6 = X**6 - 12*X**4*Y**2 + 8*X**2*Y**4 psi_7 = 8*Y**6 - 140*Y**4*X**2 + 75*Y**2*X**4 - 15*X**6*pkg.log(X) + \ 180*X**4*Y**2*pkg.log(X) - 120*X**2*Y**4*pkg.log(X) psis = [psi_1, psi_2, psi_3, psi_4, psi_5, psi_6, psi_7] if config == 'single-null': psi_8 = Y psi_9 = Y*X**2 psi_10 = Y**3 - 3*Y*X**2*pkg.log(X) psi_11 = 3*Y*X**4 - 4*Y**3*X**2 psi_12 = 8*Y**5 - 45*Y*X**4 - 80*Y**3*X**2*pkg.log(X) + \ 60*Y*X**4*pkg.log(X) psis += [psi_8, psi_9, psi_10, psi_11, psi_12] val = X**4/8 + A*(1/2*X**2*pkg.log(X) - X**4/8) + \ sum([c_i[i]*psis[i] for i in range(len(c_i))]) return val
af110abfe37a82a0fcf89a31d1c8eae87bf280b8
3,636,773
import logging import requests def get(baseurl, params=None, headers=None, private_keys_to_ignore=["key", "secret"], permanent_cache_file=PERMANENT_CACHE_FNAME, temp_cache_file=TEMP_CACHE_FNAME): """ Return a Response object (defined in this file) for the given URL. Look in temp_cache first, then permanent_cache. If not found, fetch data from the internet. """ logger = logging.getLogger('requests_with_caching') if params == None: params = {} for k in params: params[k] = str(params[k]) if headers == None: headers = {} if "user-agent" not in headers: # avoid captcha headers["user-agent"] = "Lynx/2.9.0dev.5 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/3.6.13" cache_key = make_cache_key(baseurl, params, private_keys_to_ignore) full_url = requests.Request("GET", baseurl, params=params, headers=headers).prepare().url logger.info(ctime() + ": fetching " + full_url) if not DISABLE_CACHING: # Load the permanent and page-specific caches from files permanent_cache = _read_from_file(permanent_cache_file) temp_cache = _read_from_file(temp_cache_file) if cache_key in temp_cache: logger.debug("found in temp_cache") # make a Response object containing text from the change, and the full_url that would have been fetched return Response(temp_cache[cache_key], full_url) elif cache_key in permanent_cache: logger.debug("found in permanent_cache") # make a Response object containing text from the change, and the full_url that would have been fetched return Response(permanent_cache[cache_key], full_url) logger.debug("new; adding to cache") # actually request it resp = requests.get(baseurl, params=params, headers=headers) # save it if resp.status_code == requests.codes.ok: add_to_cache(temp_cache_file, cache_key, resp.text) else: logger.debug(f"not adding due to error code {resp.status_code}") return resp
245073522b9b29f0b402774d0144d5b6aa20f9bf
3,636,774
def tempConvert(temp, unit): """ Convert Fahrenheit to Celsius """ if unit == 'F': celsius = (temp-32) * 5/9 return celsius else: return temp
224c7b5bd72ff5d209bfaf2b10d94cc24ac8681d
3,636,775
def _find_best_twitter_key(type, reset, remaining, limit, proxies, auth): """ This function switches to another pair of Twitter API keys, if they are available, to avoid pausing. * WANT TO SWAP KEYS HERE B/C PAUSE IS MORE THAN 3 MINUTES :param type: Type of API call: "timeline", "friends", "followers", "search_tweets", "search_users", "retweets", "rls", or "users" :param reset: The remaining window before the limit resets in UTC epoch seconds :param remaining: The number of requests left for the 15 minute window :param limit: The rate limit ceiling for that given reque :param proxies: Proxy dictionary, ex. {'http': 'http://%s:%s' % (HOST, PORT), 'https': 'http://%s:%s' % (HOST, PORT)} :param auth: Twitter application authentication, see the get_authorization method :return best_key_auth: Authorization object using the best keys :return isNewAuth: Boolean value representing whether a new authorization has been produced """ rls_types = _rls_type_list() assert (type in rls_types), "Specify an RLS type as: {}".format("', '".join(rls_types)) # Count JSON files in key directory key_dir = os.path.join(os.path.dirname(pyTweet.__file__), 'twitter_api_keys') key_jsons = _get_key_list() isNewAuth = False # Check if there are enough keys to continue with this function assert (len(key_jsons) > 0), "You have no Twitter API key files saved in {}. \nRefer to the documentation to " \ "create key files, or move your key files to that location.".format(key_dir) if len(key_jsons) == 1: print "\tThere are no other API keys to use...returning current API key." pause = abs(int(time.time()) - reset) + 5 print "\tThere are no alternative keys. Pause for {} seconds.".format(pause) time.sleep(pause) return (auth, isNewAuth) # Define best auth and key best_key_auth = auth best_key = {} best_key[type] = {'RESET': reset, 'LIMIT': limit, 'REMAINING': remaining} for k in key_jsons: try: key = load_twitter_api_key_set(key_file=k) except (ValueError, AttributeError): print "\tWarning! The file {} does not contain a valid Twitter API key. Please refer to the " \ "documentation on creating an API key".format(k) continue if ('API_KEY' not in key.keys()) or ('API_SECRET' not in key.keys()) or ('ACCESS_TOKEN' not in key.keys()) or ('ACCESS_TOKEN_SECRET' not in key.keys()): print "\tWarning! The file {} does not contain a valid Twitter API key. Please refer to the documentation " \ "on creating an API key".format(k) continue # Be sure that this is not the same key we started the function with if auth['KEY_FILE'] == k: continue if (auth['API_KEY'] == key['API_KEY']) and (auth['API_SECRET'] == key['API_SECRET']) and (auth['ACCESS_TOKEN'] == key['ACCESS_TOKEN']) and (auth['ACCESS_TOKEN_SECRET'] == key['ACCESS_TOKEN_SECRET']): continue # Check the RLS of RLS for key key_auth = get_authorization(key) _, _, _ = _get_rate_limit_status(type=type, proxies=proxies, auth=key_auth) key = load_twitter_api_key_set(key_file=k) # Skip key if it doesn't have appropriate fields if ('RESET' not in key[type].keys()) or ('REMAINING' not in key[type].keys()) or ('LIMIT' not in key[type].keys()): continue # Check keys! if key[type]['REMAINING'] == key[type]['LIMIT']: best_key = key best_key_auth = key_auth isNewAuth = True break if key[type]['REMAINING'] < 1: continue if key[type]['REMAINING'] > best_key[type]['REMAINING']: best_key = key best_key_auth = key_auth isNewAuth = True break if isNewAuth: print "\nSwitch to Twitter key {} after using {}".format(best_key_auth['KEY_FILE'], auth['KEY_FILE']) else: pause = abs(int(time.time()) - best_key[type]['RESET']) + 5 print "\nUnable to find a better Twitter key, they all appear to be exahusted for the {} call. \nPause for {} " \ "minutes".format(type, np.ceil(pause/60)) time.sleep(pause) return (best_key_auth, isNewAuth)
777cd7bed54f635c06ad3cef1bb65a6a6075dcc9
3,636,776
import fnmatch def allowed_file(filename, allowed_exts): """ The validator for blueimp that limits which file extensions are allowed. Args: filename (str): a filepath allowed_exts (str): set of allowed file extensions Returns: bool: True if extension is an allowed file type, False otherwise """ allowed_extensions = ["*."+str(e) for e in list(allowed_exts)] for ext in allowed_extensions: if fnmatch.fnmatch(filename.lower(), ext): return True return False
af23f6017ffa76e5402800a77cf794a2c1bce330
3,636,777
import string def tokenize(document): """ Given a document (represented as a string), return a list of all of the words in that document, in order. Process document by coverting all words to lowercase, and removing any punctuation or English stopwords. """ words = document.lower() words = nltk.word_tokenize(words) x = [] for w in words: for i in string.punctuation: if i in w: w = w.replace(i, "") x.append(w) words = x words = [w for w in words if w != ""] words = [w for w in words if not w in nltk.corpus.stopwords.words("english")] words = sorted(words, reverse=True) return words
76e2eedc08ccc8bec28830ab6b0d8a70d4a67b14
3,636,778
import os def return_file_size(file_path): """This is taking the final size of the pre-processed file, and this number will be used in the rendering process. """ size = os.path.getsize(file_path) return size
dbda1f7cbdbff81588636a52d72be3ab0a0269e9
3,636,779
def check_auth(username, password): """This function is called to check if a username / password combination is valid. """ database = db_connect() cursor = database.cursor() cursor.execute("SELECT password, active FROM users WHERE api_user=%s;", (username,)) results = cursor.fetchone() cursor.close() database.close() if results is None: return False if results[0] != password or results[1] != 1: return False return True
d621ecefba65793b8f8230eb9e211e3d23d2c270
3,636,780
from typing import Union def _handle_axis(axis: Union[str, int]) -> int: """Handles axis arguments including "columns" and "index" strings.""" if axis not in {0, 1, 'columns', 'index'}: raise ValueError( "axis value error: not in {0, 1, 'columns', 'index'}" ) # Map to int if str if isinstance(axis, str): axis_mapper = {'index': 0, 'columns': 1} axis = axis_mapper.get(axis) return axis
4ebc4fe2ccf9124e326d21b14a7dc4d9aae52f12
3,636,781
import logging from typing import Any async def create_app() -> web.Application: """Create an web application.""" app = web.Application( middlewares=[ cors_middleware(allow_all=True), error_middleware(), # default error handler for whole application ] ) # Set up logging logging.basicConfig(level=LOGGING_LEVEL) logging.getLogger("chardet.charsetprober").setLevel(LOGGING_LEVEL) # Set up routes: app.add_routes( [ web.view("/ping", Ping), web.view("/ready", Ready), web.view("/competition-formats", CompetitionFormatsView), web.view("/competition-formats/{id}", CompetitionFormatView), web.view("/events", EventsView), web.view("/events/{eventId}", EventView), web.view( "/events/{eventId}/generate-raceclasses", EventGenerateRaceclassesView ), web.view("/events/{eventId}/format", EventFormatView), web.view("/events/{eventId}/raceclasses", RaceclassesView), web.view("/events/{eventId}/raceclasses/{raceclassId}", RaceclassView), web.view("/events/{eventId}/contestants", ContestantsView), web.view( "/events/{eventId}/contestants/assign-bibs", ContestantsAssignBibsView ), web.view("/events/{eventId}/contestants/{contestantId}", ContestantView), ] ) async def mongo_context(app: Any) -> Any: # Set up database connection: logging.debug(f"Connecting to db at {DB_HOST}:{DB_PORT}") mongo = motor.motor_asyncio.AsyncIOMotorClient( host=DB_HOST, port=DB_PORT, username=DB_USER, password=DB_PASSWORD ) db = mongo.DB_NAME app["db"] = db yield mongo.close() app.cleanup_ctx.append(mongo_context) return app
ecfa5e010fcd2d1c59bfad67c1468bc6bc5d7fb0
3,636,782
import os def section_setup(section, zip_directory, corpus_directory): """Make folders for individual SE site (section)'s unzipped files and processed data, and generate expected path to 7z file on disk""" # makes folder for unzipped files for a site section_directory = os.path.join(zip_directory, section) make_directory(section_directory) # generate path to release zip file (saved at root zip directory) file_name = section + ".7z" full_file_name = os.path.join(zip_directory, file_name) # Generate folder for processed data corpus_section_directory = os.path.join(corpus_directory, section) make_directory(corpus_section_directory) return full_file_name, section_directory, corpus_section_directory
fb18e168a22f90f6c6ae829ec3a2359ae66fe7e1
3,636,783
import os def get_file_size(file): """Get file size. Args: file (str): Input file. Returns: int: Return size of the file in bytes. """ return os.stat(file).st_size
2838f88bd36445b5fe5bda5e4637080116f24cd9
3,636,784
def is_icmp_dest_unreach(icmp_data): """is ICMP_DEST_UNREACH?""" return icmp_data["TYPE"] == ICMP_DEST_UNREACH
07213bed90a9e17c0236883b2739f54b8d5ccf09
3,636,785
def exists_user_notifications(session, user_id): """Helper method to check if notifications for user exists.""" res = session.execute(text("""SELECT EXISTS( SELECT 1 FROM public.notification WHERE user_id='{0}') AS user""" .format(user_id))).fetchone() return res.user
a22c171359a95fc3723edbb967df865046692969
3,636,786
import torch def export_onnx(model, config, device, onnx_model_path, verbose): """ Export GPT-2 model with past state to ONNX model """ num_layer = config.n_layer dummy_inputs = get_dummy_inputs(batch_size=1, past_sequence_length=1, sequence_length=1, num_attention_heads=config.num_attention_heads, hidden_size=config.hidden_size, num_layer=num_layer, vocab_size=config.vocab_size, device=device, float16=False) dummy_input_ids, dummy_position_ids, dummy_attention_mask, dummy_past = dummy_inputs input_list = [dummy_input_ids, dummy_position_ids, dummy_attention_mask] + dummy_past with torch.no_grad(): outputs = model(*input_list) past_names = [f'past_{i}' for i in range(num_layer)] present_names = [f'present_{i}' for i in range(num_layer)] # GPT2Model outputs last_state; GPT2LMHeadModel outputs logits (prediction_scores) assert outputs[0].shape[2] == config.vocab_size or outputs[0].shape[2] == config.hidden_size output_names = ["logits" if outputs[0].shape[2] == config.vocab_size else "last_state"] + present_names # Shape of input tensors: # input_ids: (batch_size, seq_len) # past_{i}: (2, batch_size, num_heads, past_seq_len, hidden_size/num_heads) # attention_mask: (batch_size, past_seq_len + seq_len) # Shape of output tensors: # last_state: (batch_size, seq_len, hidden_size) # or logits: (batch_size, seq_len, vocab_size) # present_{i}: (2, batch_size, num_heads, past_seq_len + seq_len, hidden_size/num_heads) dynamic_axes = {'input_ids': {0: 'batch_size', 1: 'seq_len'}, output_names[0]: {0: 'batch_size', 1: 'seq_len'}} for name in past_names: dynamic_axes[name] = {1: 'batch_size', 3: 'past_seq_len'} for name in present_names: dynamic_axes[name] = {1: 'batch_size', 3: 'total_seq_len'} dynamic_axes['attention_mask'] = {0: 'batch_size', 1: 'total_seq_len'} dynamic_axes['position_ids'] = {0: 'batch_size', 1: 'seq_len'} logger.info( f"Shapes: input_ids={dummy_input_ids.shape} past={dummy_past[0].shape} output={outputs[0].shape} present={outputs[1][0].shape}" ) torch.onnx.export(model, args=tuple(input_list), f=onnx_model_path, input_names=['input_ids', 'position_ids', 'attention_mask'] + past_names, output_names=output_names, example_outputs=outputs, dynamic_axes=dynamic_axes, opset_version=11, do_constant_folding=True, verbose=verbose) return onnx_model_path
13b4152750efecef5275a154768d8a891fe95829
3,636,787
def set_peak_elo(df: DataFrame, playersElo) -> DataFrame: """Add 2 columns PeakElo and PeakEloSince to a dataframe containing Date, P1Id and P2Id fields Args: df (DataFrame): the dataframe from where we read row by row (match by match) playersElo ([type]): dict <id>:[eloratings_history] Returns: DataFrame: the input dataframe with 2 additionnal columns PeakElo, PeakEloSince """ ( df.loc[:, ["PeakElo1"]], df.loc[:, ["PeakEloSince1"]], df.loc[:, ["PeakElo2"]], df.loc[:, ["PeakEloSince2"]], ) = zip( *df.apply( lambda row: set_peak_elo_match(row, playersElo), axis=1, ) ) # save a dataframe with all matches and Elo rating of each player for the matches df.to_csv("./results/dfWithElos9m_peak.csv") return df
a55c4d098686a1b75b03eac0c46345bc7c248593
3,636,788
def calc_plane_vector(atom_pos): """ Method to calculate best-fitted (unit) plane vector given a set of points using SVD ARGS: atom_pos (ndarray) :: ndarray storing atomic positions returns: ndarray """ # Zero-centering centroid of atoms before SVD atom_pos_0 = atom_pos.T - np.mean(atom_pos.T, axis=1, keepdims=True) u, v, sh = svd(atom_pos_0, full_matrices=True) # Obtain unit plane vector and ensure it points upwards (z>0) unit_n = u[:, -1] / norm(u[:, -1]) return unit_n * np.sign(unit_n[-1])
5576152adcd1406a94e79eb29d3030214424c2b7
3,636,789
import requests def search_reddit(search, subreddit='', t='week', limit='100', sort='new', restrict_sr='1'): """ search - string object, representing your search query subreddit - string object, representing the subreddit t - string object, one of (hour, day, week, month, year, all) limit - string object, limits the number of posts returned sort - string object, one of 'hot', 'old', 'top' or 'new' restrict_sr - string object, '0' or '1', specifies if restriction to the subreddit is applied """ print(f"Retrieving reddit posts for {search=} and {subreddit=}") headers = get_reddit_token('nie_irek_ubuntu') reddit_url = "https://oauth.reddit.com/r/" + subreddit + "/search" res = requests.get(reddit_url, headers=headers, params={'q': search, 'sort': sort, 'restrict_sr': restrict_sr, 'limit': limit, 't': t }) return(res)
d26383f464b16bf8cefa5e089b975c6ad2c5f19f
3,636,790
def _expected_type_expression(typedef: Typedef) -> str: """ Determine the type expression supplied to ``from_obj`` function corresponding to the type definition. :param typedef: type definition in Python representation :return: Python code representing the type definition """ # pylint: disable=too-many-return-statements if isinstance(typedef, Booldef): return 'bool' elif isinstance(typedef, Intdef): return 'int' elif isinstance(typedef, Floatdef): return 'float' elif isinstance(typedef, Strdef): return 'str' elif isinstance(typedef, Bytesdef): return 'bytes' elif isinstance(typedef, Listdef): if typedef.items is None: raise ValueError('Unexpected None items in typedef: {!r}'.format(typedef.identifier)) return 'list, {}'.format(_expected_type_expression(typedef=typedef.items)) elif isinstance(typedef, Dictdef): if typedef.values is None: raise ValueError('Unexpected None values in typedef: {!r}'.format(typedef.identifier)) return 'dict, {}'.format(_expected_type_expression(typedef=typedef.values)) elif isinstance(typedef, Classdef): return _class_name(typedef.identifier) else: raise NotImplementedError('Translating the typedef to an expected type is not supported: {}'.format(typedef))
98ce70280c5054083b9234f1ef47b759b852720a
3,636,791
def adjoint(m): """Compute the Hermitian adjoint.""" return np.transpose(np.conj(m))
f7dea92a990473f88547574846aa1be8dc4bfee1
3,636,792
def get_graphic_template_variables(path, graphic_number): """ Generates the template variables for each graphic """ slug, abspath = utils.parse_path(path) graphic_path = '%s/%s' % (abspath, slug) ## Get Spreadsheet Path try: graphic_config = load_graphic_config(graphic_path) except IOError: print '%s/graphic_config.py does not exist.' % slug return if not hasattr(graphic_config, 'COPY_GOOGLE_DOC_KEY') or not graphic_config.COPY_GOOGLE_DOC_KEY: print 'COPY_GOOGLE_DOC_KEY is not defined in %s/graphic_config.py.' % slug return ## Generate Links From Slug spreadsheet_id = graphic_config.COPY_GOOGLE_DOC_KEY app_id = slug ## Update Spreadsheet copy_path = os.path.join(graphic_path, '%s.xlsx' % slug) get_document(graphic_config.COPY_GOOGLE_DOC_KEY, copy_path) ## Get Sheet Data copy = copytext.Copy(filename=copy_path) sheet = copy['labels'] note = { "spreadsheet_id": spreadsheet_id, "app_id": app_id, "graphic_number": graphic_number + 1, "sheet": sheet, } return note
e755ec96cdcf0f9bddeb2d5134db97cdc9777dfd
3,636,793
def svc(self, model): """ Obtain the model and the search space of the SVC classifier. """ svc_sp = {} if model == "linear": svc = LinearSVC(dual=False, class_weight='balanced') else: svc = SVC(cache_size=1000, class_weight='balanced') svc_sp['kernel'] = ['linear', 'poly', 'rbf'] svc_sp['degree'] = [2, 3, 4] svc_sp['gamma'] = ["auto", "scale"] svc_sp['C'] = [0.001, 0.01, 0.1, 1.0] return svc, svc_sp
a58bafa7bdf3ff71120afd528b479893dada14e4
3,636,794
import re def _FormatDataTransferIdentifiers(client, transfer_identifier): """Formats a transfer config or run identifier. Transfer configuration/run commands should be able to support different formats of how the user could input the project information. This function will take the user input and create a uniform transfer config or transfer run reference that can be used for various commands. This function will also set the client's project id to the specified project id. Returns: The formatted transfer config or run. """ formatted_identifier = transfer_identifier match = re.search(r'projects/([^/]+)', transfer_identifier) if not match: formatted_identifier = ('projects/' + client.GetProjectReference().projectId + '/' + transfer_identifier) else: client.project_id = match.group(1) return formatted_identifier
951a3576a1a53f9dd141e718c31c8b0314a550d7
3,636,795
def getReactionUrl(reaction, family=None, estimator=None, resonance=True): """ Get the URL (for kinetics data) of a reaction. Returns '' if the reaction contains functional Groups or LogicNodes instead of real Species or Molecules. """ kwargs = dict() for index, reactant in enumerate(reaction.reactants): if isinstance(reactant, Entry): reactant = reactant.item if isinstance(reactant, Group) or isinstance(reactant, LogicNode): return '' mol = reactant if isinstance(reactant, Molecule) else reactant.molecule[0] kwargs['reactant{0:d}'.format(index+1)] = moleculeToAdjlist(mol) for index, product in enumerate(reaction.products): mol = product if isinstance(product, Molecule) else product.molecule[0] kwargs['product{0:d}'.format(index+1)] = moleculeToAdjlist(mol) kwargs['resonance'] = resonance if family: if estimator: kwargs['family'] = family kwargs['estimator'] = estimator.replace(' ', '_') reaction_url = reverse('database:kinetics-group', kwargs=kwargs) else: reaction_url = '' else: reaction_url = reverse('database:kinetics-data', kwargs=kwargs) return reaction_url
6ce5ca833bf4871d98314f8bf64a1ce024d4f41d
3,636,796
def hexLat2W(nrows=5, ncols=5): """ Create a W object for a hexagonal lattice. Parameters ---------- nrows : int number of rows ncols : int number of columns Returns ------- w : W instance of spatial weights class W Notes ----- Observations are row ordered: first k observations are in row 0, next k in row 1, and so on. Construction is based on shifting every other column of a regular lattice down 1/2 of a cell. Examples -------- >>> import pysal as ps >>> w = ps.lat2W() >>> w.neighbors[1] [0, 6, 2] >>> w.neighbors[21] [16, 20, 22] >>> wh = ps.hexLat2W() >>> wh.neighbors[1] [0, 6, 2, 5, 7] >>> wh.neighbors[21] [16, 20, 22] >>> """ if nrows == 1 or ncols == 1: print "Hexagon lattice requires at least 2 rows and columns" print "Returning a linear contiguity structure" return lat2W(nrows, ncols) n = nrows * ncols rid = [i // ncols for i in xrange(n)] cid = [i % ncols for i in xrange(n)] r1 = nrows - 1 c1 = ncols - 1 w = lat2W(nrows, ncols).neighbors for i in xrange(n): odd = cid[i] % 2 if odd: if rid[i] < r1: # odd col index above last row # new sw neighbor if cid[i] > 0: j = i + ncols - 1 w[i] = w.get(i, []) + [j] # new se neighbor if cid[i] < c1: j = i + ncols + 1 w[i] = w.get(i, []) + [j] else: # even col # nw jnw = [i - ncols - 1] # ne jne = [i - ncols + 1] if rid[i] > 0: w[i] if cid[i] == 0: w[i] = w.get(i, []) + jne elif cid[i] == c1: w[i] = w.get(i, []) + jnw else: w[i] = w.get(i, []) + jne w[i] = w.get(i, []) + jnw return pysal.weights.W(w)
7e33f66d40d87b71d0b06d73cf83a33752aecfdd
3,636,797
import numpy as np def Modelo(Mags, Phi, Me, alpha): """ Modelo para ajustar Parameters ---------- Mags, ERR : list Magnitudes observadas Phi, Me, alpha : .float, .float, .float Parámetros del modelo Returns -------- F : list Valores de la función """ M = Mags # Definición para mejor vizualización F = [] # Contendrá valores de la función ij = 0 while ij<len(M): # Para que no sea tan larga la def. de "F": parto en factores a la función # F = f1*f2*f3 f1 = 0.4*np.log(10)*Phi f2 = 10**(-0.4*(M[ij]-Me)*(alpha+1)) f3 = np.exp( -10**(-0.4*(M[ij]-Me)) ) F.append( f1*f2*f3 ) ij = ij + 1 return F
0e547058032bc682c6d0c5bffa5f00aaa1318989
3,636,798
import fractions def totient(n): """ Calculates Euler's totient """ count = 0 for i in range(1, n): if (fractions.gcd(n, i) == 1): count = count + 1 return count
11107816582285e712d9b2f5e98649ad345f9bf0
3,636,799