content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def check_results(results): """Examines a list of individual check results and returns an overall result for all checks combined. """ if CheckResults.SCALE_UP in results: return CheckResults.SCALE_UP if all(r == CheckResults.SCALE_DOWN for r in results): return CheckResults.SCALE_DOWN return CheckResults.DONT_SCALE
a7a18caca42c9a6f555110418b96b5cc6b9d203c
21,700
import os def disk_usage(path): """returns disk usage for a path""" total = os.path.getsize(path) if os.path.isdir(path): for filename in os.listdir(path): child_path = os.path.join(path, filename) total += disk_usage(child_path) print(f"{total:<10} {path}") return total
0c4901f94d562d7def81afcef4ffa27fe48c106c
21,701
def edit_route(link_id): """edit link""" link = dynamo.tables[TABLE_NAME].get_item(Key={'id': link_id})['Item'] form = LinkForm(link=link['link'], tags=','.join(link['tags'])) if form.validate_on_submit(): link, tags = form.parsed_data() dynamo.tables[TABLE_NAME].update_item( Key={'id': link_id}, UpdateExpression='set link = :link, tags = :tags', ExpressionAttributeValues={':link': link, ':tags': tags}, ReturnValues='UPDATED_NEW') return redirect(url_for('app.index_route')) return render_template('addedit.html', form=form)
b20f22345d3de2b474e04821bdfbee5391f1d493
21,702
def build_empty_pq(): """Build empty pq.""" return PriorityQ()
a18dc1ac16ceb2475f47e1f55e0617957c1e0cad
21,703
def add_flags(flags): """Add KZC flags""" def f(test, way): test.args += flags return f
58c6db2bb46c321ce3e3592ac8be2ee6d1feecb6
21,704
import os import subprocess def RunProcess(cmd, stdinput=None, env=None, cwd=None, sudo=False, sudo_password=None): """Executes cmd using suprocess. Args: cmd: An array of strings as the command to run stdinput: An optional sting as stdin env: An optional dictionary as the environment cwd: An optional string as the current working directory sudo: An optional boolean on whether to do the command via sudo sudo_password: An optional string of the password to use for sudo Returns: A tuple of two strings and an integer: (stdout, stderr, returncode). Raises: DSException: if both stdinput and sudo_password are specified """ if sudo: sudo_cmd = ['sudo'] if sudo_password and not stdinput: # Set sudo to get password from stdin sudo_cmd = sudo_cmd + ['-S'] stdinput = sudo_password + '\n' elif sudo_password and stdinput: raise DSException('stdinput and sudo_password ' 'are mutually exclusive') else: sudo_cmd = sudo_cmd + ['-p', "%u's password is required for admin access: "] cmd = sudo_cmd + cmd environment = os.environ environment.update(env) task = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, env=environment, cwd=cwd) (stdout, stderr) = task.communicate(input=stdinput) return (stdout, stderr, task.returncode)
6b1f599b6fe601ea8e3ce85757f76ca7b4f707b2
21,705
def public_jsonp_service(view): """ More explicitly named to call attention to the extra little p """ return _json_service_wrapper(JSONPResponse, view)
76588ade3d537a102dc6ca3bf540bc32da928e30
21,706
def manage_data(xls_file: str) -> list: """ 转换xls手动标注的数据为待处理的格式 :param xls_file: 目标文件路径 :return: 转换后的字典列表 """ f = pd.read_excel(xls_file, index=False) cnt = 0 result = [] while cnt < len(f) - 1: if f.text[cnt] == f.text[cnt + 1]: temp_dic = {'text': f.text[cnt], 'spo_list': []} while cnt < len(f) - 1 and f.text[cnt] == f.text[cnt + 1]: temp_dic['spo_list'].append(f.iloc[cnt, 1:].to_dict()) cnt += 1 temp_dic['spo_list'].append(f.iloc[cnt, 1:].to_dict()) cnt += 1 result.append(temp_dic) else: temp_dic = {'text': f.text[cnt], 'spo_list': [f.iloc[cnt, 1:].to_dict()]} result.append(temp_dic) cnt += 1 return result
3198013b713f50b650bc5b3542905d1e860a6871
21,707
def get_arrival_times(inter_times): """Convert interevent times to arrival times.""" return inter_times.cumsum()
7197fc6315d3eaca118ca419f23aed7c0d7cd064
21,708
def generate_custom_background(size, background_color, nb_blobs=3000, kernel_boundaries=(50, 100)): """ Generate a customized background to fill the shapes Parameters: background_color: average color of the background image nb_blobs: number of circles to draw kernel_boundaries: interval of the possible sizes of the kernel """ img = np.zeros(size, dtype=np.uint8) img = img + get_random_color(background_color) blobs = np.concatenate([random_state.randint(0, size[1], size=(nb_blobs, 1)), random_state.randint(0, size[0], size=(nb_blobs, 1))], axis=1) for i in range(nb_blobs): col = get_random_color(background_color) cv.circle(img, (blobs[i][0], blobs[i][1]), random_state.randint(20), col, -1) kernel_size = random_state.randint(kernel_boundaries[0], kernel_boundaries[1]) cv.blur(img, (kernel_size, kernel_size), img) return img
782883d29cf67dbb33662fbb22b457783320101d
21,709
def rotate_z(domain, nrot=4): """take BoxCollection and return equivalent CylinderCollection by rotating about the second axis. thus, transform coordinates of points like (x, z) --> (x, 0, z).""" return rotate(domain, d=1, nrot=nrot)
ca1b197d758a18b86675a14be952065055dea05f
21,710
def filter_roi(roi_data, nb_nonzero_thr): """Filter slices from dataset using ROI data. This function filters slices (roi_data) where the number of non-zero voxels within the ROI slice (e.g. centerline, SC segmentation) is inferior or equal to a given threshold (nb_nonzero_thr). Args: roi_data (nd.array): ROI slice. nb_nonzero_thr (int): Threshold. Returns: bool: True if the slice needs to be filtered, False otherwise. """ # Discard slices with less nonzero voxels than nb_nonzero_thr return not np.any(roi_data) or np.count_nonzero(roi_data) <= nb_nonzero_thr
9e325f77436e152377bee84d7e82d3f80424f288
21,711
def from_numpy(shape, dt): """ Upcast a (shape, dtype) tuple if possible. >>> from_numpy((5,5), dtype('int32')) dshape('5, 5, int32') """ dtype = np.dtype(dt) if dtype.kind == 'S': measure = String(dtype.itemsize, 'A') elif dtype.kind == 'U': measure = String(dtype.itemsize / 4, 'U8') elif dtype.fields: rec = [(a,CType.from_dtype(b[0])) for a,b in dtype.fields.items()] measure = Record(rec) else: measure = CType.from_dtype(dtype) if shape == (): return measure else: return DataShape(parameters=(map(Fixed, shape)+[measure]))
249701a885dc01d13fe356ce4117300e79d803a5
21,712
import optparse def ParseArgs(): """Parse the command line options.""" option_parser = optparse.OptionParser() option_parser.add_option( '--from', dest='sender', metavar='EMAIL', help='The sender\'s email address') option_parser.add_option( '--to', action='append', metavar='EMAIL', dest='recipients', default=[], help='The recipient\'s address (reapeatable)') option_parser.add_option( '--subject', metavar='TEXT|@FILE', help='The subject of the email') option_parser.add_option( '--message', metavar='TEXT|@FILE', help='The body of the message') option_parser.add_option( '--attach', metavar='FILE', action='append', dest='attachments', default=[], help='The path of a file to attach') option_parser.add_option( '--ignore-missing', action='store_true', default=False, help='No errors on attempts to attach non-existing files') option_parser.add_option('--server', help='The SMTP server to use') option_parser.add_option('--password', help='The password to use') options, _args = option_parser.parse_args() if not options.sender: option_parser.error('--from is required') if not options.recipients: option_parser.error('At least one --to is required') if not options.subject: option_parser.error('--subject is required') if not options.message: option_parser.error('--message is reuqired') if not options.server: option_parser.error('--server is required') options.subject = ResolveParameter(options.subject) options.message = ResolveParameter(options.message) return options
eb1ee1c5fb66f76882aef0787e2c4716146526f4
21,713
def pyramidnet110_a84_cifar100(classes=100, **kwargs): """ PyramidNet-110 (a=84) model for CIFAR-100 from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_pyramidnet_cifar( classes=classes, blocks=110, alpha=84, bottleneck=False, model_name="pyramidnet110_a84_cifar100", **kwargs)
f005c26e80e87536b5032685f27944560e5d8fc7
21,714
def contains_vendored_imports(python_path): """ Returns True if ``python_path`` seems to contain vendored imports from botocore. """ # We're using a very rough heuristic here: if the source code contains # strings that look like a vendored import, we'll flag. # # Because Python is dynamic, there are lots of ways you could be # importing the vendored modules that wouldn't be caught this way, but: # # 1. Doing it in a complete foolproof way is incredibly complicated, and # I don't care that much. # 2. If you're writing your Lambda code in a deliberately obfuscated way, # you have bigger problems than vendor deprecations. # # In practice, Python imports are usually near the top of the file, so we # read it line-by-line. This means if we find an import, we can skip # reading the rest of the file. # with open(python_path, "rb") as python_src: for line in python_src: if ( b"import botocore.vendored" in line or b"from botocore.vendored import " in line ): return True return False
90ed6939d7f43cac29eb66c3e27e911b9cc62532
21,715
def filter_uniq(item): """Web app, feed template, creates unique item id""" detail = item['item'] args = (item['code'], item['path'], str(detail['from']), str(detail['to'])) return ':'.join(args)
914fa4e3fcdf6bc7e6a30b46c8f33eecd08adcf1
21,716
import joblib import time import logging import warnings import pickle def load_pickle(filename, verbose=2, use_joblib=False): """ Note: joblib can be potentially VERY slow. """ with open(filename, 'rb') as file: if verbose >= 2: start = time.time() logging.info(f'Loading PICKLE from {filename}...') if use_joblib: warnings.warn('Joblib is slower in newer versions of Python.') obj = joblib.load(file) else: try: obj = pickle.load(file) except EOFError as e: logging.error(f'Load FAILED for {filename}.') raise e if verbose >= 2: logging.info(f'Load done in {np.round(time.time()-start, 4)} seconds.') return obj
680c4b72e47efeb58ec1bd93e4899a3ae6b99709
21,717
from typing import Match async def make_match(*args, register=False, **kwargs) -> Match: """Create a Match object. There should be no need to call this directly; use matchutil.make_match instead, since this needs to interact with the database. Parameters ---------- racer_1_id: int The DB user ID of the first racer. racer_2_id: int The DB user ID of the second racer. max_races: int The maximum number of races this match can be. (If is_best_of is True, then the match is a best of max_races; otherwise, the match is just repeating max_races.) match_id: int The DB unique ID of this match. suggested_time: datetime.datetime The time the match is suggested for. If no tzinfo, UTC is assumed. r1_confirmed: bool Whether the first racer has confirmed the match time. r2_confirmed: bool Whether the second racer has confirmed the match time. r1_unconfirmed: bool Whether the first racer wishes to unconfirm the match time. r2_unconfirmed: bool Whether the second racer wishes to unconfirm the match time. match_info: MatchInfo The types of races to be run in this match. cawmentator_id: int The DB unique ID of the cawmentator for this match. sheet_id: int The sheetID of the worksheet the match was created from, if any. register: bool Whether to register the match in the database. Returns --------- Match The created match. """ if 'match_id' in kwargs and kwargs['match_id'] in match_library: return match_library[kwargs['match_id']] match = Match(*args, commit_fn=matchdb.write_match, **kwargs) await match.initialize() if register: await match.commit() match_library[match.match_id] = match return match
67346038696558f19b08a65cf45e88646b1186e4
21,718
def is_anonymous(context: TreeContext) -> bool: """Returns ``True`` if the current node is anonymous.""" # return context[-1].anonymous tn = context[-1].tag_name return not tn or tn[0] == ':'
0169a93cada0d371b3b4628ff3fabbbef6ef60f2
21,719
def expand_to_beam_size(tensor, beam_size): """Tiles a given tensor by beam_size.""" tensor = tf.expand_dims(tensor, axis=1) tile_dims = [1] * tensor.shape.ndims tile_dims[1] = beam_size return tf.tile(tensor, tile_dims)
e38adceceeecdab737f89f246125674ac87e4702
21,720
import ipdb def get_vignettes( h5_file, bed_file, tmp_dir=".", importance_filter=True, rsid_to_genes=None, ensembl_to_hgnc=None): """get the example indices at the locations of interest """ # prefix (scanmotifs) dirname = h5_file.split("/")[-2] print dirname # get a bed file from the h5 file and overlap with h5py.File(h5_file, "r") as hf: metadata = hf["example_metadata"][:,0] metadata_bed_file = "{}/{}.metadata.tmp.bed.gz".format( tmp_dir, dirname) array_to_bed(metadata, metadata_bed_file, name_key="all", merge=False) # overlap and read back in overlap_file = "{}.overlap.bed.gz".format(metadata_bed_file.split(".bed")[0]) overlap_cmd = ( "zcat {} | " "awk -F '\t' 'BEGIN{{OFS=\"\t\"}}{{ $2=$2+20; $3=$3-20; print }}' | " # offset "bedtools intersect -wo -a stdin -b {} | " "gzip -c > {}").format(metadata_bed_file, bed_file, overlap_file) print overlap_cmd os.system(overlap_cmd) overlap_data = pd.read_csv(overlap_file, sep="\t", header=None) print overlap_data.shape # for each, go back in and find the index, then check importance scores total = 0 for overlap_i in range(overlap_data.shape[0]): metadata_i = overlap_data[3][overlap_i] h5_i = np.where(metadata == metadata_i)[0][0] rsid_i = overlap_data[9][overlap_i] variant_pos = overlap_data[7][overlap_i] - overlap_data[1][overlap_i] variant_pos -= 1 # offset by 1 if variant_pos < 0: continue # filter here, if using rsid to genes if (rsid_to_genes is not None) and (len(rsid_to_genes.keys()) > 0): try: gene_id = rsid_to_genes[rsid_i] except: continue else: gene_id = "UNKNOWN" # and get hgnc hgnc_id = ensembl_to_hgnc.get(gene_id, "UNKNOWN") with h5py.File(h5_file, "r") as hf: #for key in sorted(hf.keys()): print key, hf[key].shape if False: variant_impt = hf["sequence-weighted.active"][h5_i,:,variant_pos,:] #variant_impt = hf["sequence-weighted"][h5_i,:,420:580][:,variant_pos] else: start_pos = max(variant_pos - 1, 0) stop_pos = min(variant_pos + 1, hf["sequence-weighted.active"].shape[2]) variant_impt = hf["sequence-weighted.active"][h5_i,:,start_pos:stop_pos,:] variant_val = hf["sequence.active"][h5_i,:,variant_pos,:] variant_ref_bp = idx_to_letter[np.argmax(variant_val)] # variant scoring try: variant_impt_max = np.max(np.abs(variant_impt)) except: ipdb.set_trace() if variant_impt_max > 0: # plot out if True: with h5py.File(h5_file, "r") as hf: # get the full sequences orig_importances = hf["sequence-weighted"][h5_i][:,420:580] match_importances = hf["sequence-weighted.active"][h5_i] importances = scale_scores(orig_importances, match_importances) print gene_id metadata_string = metadata_i.split("features=")[-1].replace(":", "_") # TODO also add in chrom region plot_file = "{}/{}.{}-{}.{}.{}.{}.{}.{}.plot.pdf".format( tmp_dir, dirname, gene_id, hgnc_id, h5_i, rsid_i, variant_pos, variant_ref_bp, metadata_string) print plot_file plot_weights_group(importances, plot_file) total += 1 print total return None
396d2a5b908c99fd74427f74babc50323820fa36
21,721
def delete_cluster(access_token, project_id, cluster_id): """删除集群""" url = f"{BCS_CC_API_PRE_URL}/projects/{project_id}/clusters/{cluster_id}/" params = {"access_token": access_token} return http_delete(url, params=params)
f48d7f8a6278e528792601938817d883751d7a41
21,722
def modifMail(depute, mail = MAIL): """ modifMail renvoit une chaine de caractères d'un mail personnélisé pour chaque députés des comissions choisies Prend en entrée : infoDeput : un tableau de tableau avec les données des députés. mail : l'adresse d'un fichier txt remplit selon le formalisme choisit commissionChoisies : un tableau avec les différentes commissions correspondantes aux commissions du tableau infoDeput cette fonction ne renvoit rien mais créer un dossier Mail dans lequel sera stoqué tous les documents textes personnalisés avec comme premiere ligne l'adresse email du député en question """ #Ouverture du fichier mail f = open(mail, 'r') messageMail = f.read() #Dictionnaire mettant en relation chemps personnalisés et les données du deputé: donnesDeputes = {} donnesDeputes['@nom@'] = depute[NOM].replace('É', 'E') #Le É fait bugger tout le programme... donnesDeputes['@fonction@'] = depute[FONCTION].lower() donnesDeputes['@commission@'] = depute[COMMISSION_PERMANENTE].lower() if(depute[SEXE] == 'F'): donnesDeputes['@politesse@'] = 'madame' elif(depute[SEXE] == 'H'): donnesDeputes['@politesse@'] = 'monsieur' #On remplace les tag par les données des députés texteMail = messageMail for tag in donnesDeputes.keys(): texteMail = texteMail.replace(tag, donnesDeputes[tag]) #On s'occupe des mots genrés message = texteMail.split("#") while(len(message) > 1): mot = message[1].split('/') if(depute[SEXE] == 'F'): message[0] += mot[1] elif(depute[SEXE] == 'H'): message[0] += mot[0] message.pop(1) message[0] += message[1] message.pop(1) return(message[0])
0ade624e356bd0cc94fc3312ee1746a8771cb6a4
21,723
def atoi(s, base=None): # real signature unknown; restored from __doc__ """ atoi(s [,base]) -> int Return the integer represented by the string s in the given base, which defaults to 10. The string s must consist of one or more digits, possibly preceded by a sign. If base is 0, it is chosen from the leading characters of s, 0 for octal, 0x or 0X for hexadecimal. If base is 16, a preceding 0x or 0X is accepted. """ return 0
420c9a68c1fe829a665eaba830df757114a81b47
21,724
def gzip_requested(accept_encoding_header): """ Check to see if the client can accept gzipped output, and whether or not it is even the preferred method. If `identity` is higher, then no gzipping should occur. """ encodings = parse_encoding_header(accept_encoding_header) # Do the actual comparisons if('gzip' in encodings): return encodings['gzip'] >= encodings['identity'] elif('*' in encodings): return encodings['*'] >= encodings['identity'] else: return False
a07ca3d77095467791fc97d1a525ee878715e929
21,725
import logging def create_logger(name="dummy", level=logging.DEBUG, record_format=None): """Create a logger according to the given settings""" if record_format is None: record_format = "%(asctime)s\t%(levelname)s\t%(message)s" logger = logging.getLogger("modbus_tk") logger.setLevel(level) formatter = logging.Formatter(record_format) if name == "udp": log_handler = LogitHandler(("127.0.0.1", 1975)) elif name == "console": log_handler = ConsoleHandler() elif name == "dummy": log_handler = DummyHandler() else: raise Exception("Unknown handler %s" % name) log_handler.setFormatter(formatter) logger.addHandler(log_handler) return logger
d7662fa035d7f096820c54b6e3b8403a77ce5769
21,726
def precip_units(units): """ Return a standardized name for precip units. """ kgm2s = ['kg/m2/s', '(kg/m^2)/s', 'kg/m^2/s', 'kg m^-2 s^-1', 'kg/(m^2 s)', 'kg m-2 s-1'] mmday = ['mm/day', 'mm day^-1'] if units.lower() in kgm2s: return 'kg m^-2 s^-1' elif units.lower() in mmday: return 'mm day^-1' else: raise ValueError('Unknown units ' + units)
e5f94c3dd41b68d2e7b6b7aa1905fd5508a12fab
21,727
from typing import Union from typing import List def enumerate_quantities( df: pd.DataFrame, cols: Union[List[str], None] = None, qty_col: str = "quantity" ) -> pd.DataFrame: """Creates new dataframe to convert x,count to x*count.""" if not cols: raise ValueError("parameter cols must be an iterable of strings") new_cols: List = [ sum(df.apply(lambda x: [x[col]] * x[qty_col], axis=1).tolist(), []) for col in cols ] new_df = pd.DataFrame(new_cols, index=cols).T return new_df
0defc1575ead9b70f658be5ed6795b22c3b39ac7
21,728
def calcul_acc(labels, preds): """ a private function for calculating accuracy Args: labels (Object): actual labels preds (Object): predict labels Returns: None """ return sum(1 for x, y in zip(labels, preds) if x == y) / len(labels)
3dc22c8707c181dda50e2a37f2cd822b2a31590d
21,729
def makeMolFromAtomsAndBonds(atoms, bonds, spin=None): """ Create a new Molecule object from a sequence of atoms and bonds. """ mol = Molecule(pybel.ob.OBMol()) OBMol = mol.OBMol for atomicnum in atoms: a = pybel.ob.OBAtom() a.SetAtomicNum(atomicnum) OBMol.AddAtom(a) for bond in bonds: if len(bond) != 3: raise Exception('Bond must be specified by two indices and a bond order') OBMol.AddBond(bond[0] + 1, bond[1] + 1, bond[2]) mol.assignSpinMultiplicity() if spin is not None: OBMol.SetTotalSpinMultiplicity(spin) OBMol.SetHydrogensAdded() return mol
570dafe641bbade0d070942ea8e708d7e454e011
21,730
import sys def alpha_040(enddate, index='all'): """ Inputs: enddate: 必选参数,计算哪一天的因子 index: 默认参数,股票指数,默认为所有股票'all' Outputs: Series:index 为成分股代码,values为对应的因子值 公式: ((-1\* rank(stddev(high, 10)))\* correlation(high, volume, 10)) """ enddate = to_date_str(enddate) func_name = sys._getframe().f_code.co_name return JQDataClient.instance().get_alpha_101(**locals())
3c9ac15f617a44699ce568fda2a175b11350c43b
21,731
def get_preprocessor(examples, tokenize_fn, pad_ids): """ Input: examples: [List[str]] input texts tokenize_fn: [function] encodes text into IDs Output: tf input features """ def generator(): for example in examples: tokens = tokenize_fn(example) yield pad_ids + tokens return generator
0b2fb2217e04183fee027faedd163a8f8a048e9a
21,732
def is_propositional_effect(eff: BaseEffect): """ An effect is propositional if it is either an add or a delete effect. """ return isinstance(eff, (AddEffect, DelEffect))
be440b2192dd6b89fcaff5756e774e7543f408cf
21,733
def read_user(msg): """Read user input. :param msg: A message to prompt :type msg: ``str`` :return: ``True`` if user gives 'y' otherwhise False. :rtype: ``bool`` """ user_input = input("{msg} y/n?: ".format(msg=msg)) return user_input == 'y'
662e95002130a6511e6e9a5d6ea85805f6b8f0f5
21,734
import scipy def frechet_distance(real, fake): """Frechet distance. Lower score is better. """ n = real.shape[0] mu1, sigma1 = np.mean(real, axis=0), np.cov(real.reshape(n, -1), rowvar=False) mu2, sigma2 = np.mean(fake, axis=0), np.cov(fake.reshape(n, -1), rowvar=False) diff = mu1 - mu2 covmean, _ = scipy.linalg.sqrtm(sigma1.dot(sigma2), disp=False) if not np.isfinite(covmean).all(): msg = ( "fid calculation produces singular product; " "adding %s to diagonal of cov estimates" ) % eps print(msg) offset = np.eye(sigma1.shape[0]) * eps covmean = scipy.linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset)) # Numerical error might give slight imaginary component if np.iscomplexobj(covmean): if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3): m = np.max(np.abs(covmean.imag)) raise ValueError("Imaginary component {}".format(m)) covmean = covmean.real assert np.isfinite(covmean).all() and not np.iscomplexobj(covmean) tr_covmean = np.trace(covmean) frechet_dist = diff.dot(diff) frechet_dist += np.trace(sigma1) + np.trace(sigma2) frechet_dist -= 2 * tr_covmean return frechet_dist
55ed2a4f21b8987925083c925e7df6de7b305c06
21,735
import requests def get_tenants(zuul_url): """ Fetch list of tenant names """ is_witelabel = requests.get( "%s/info" % zuul_url).json().get('tenant', None) is not None if is_witelabel: raise RuntimeError("Need multitenant api") return [ tenant["name"] for tenant in requests.get("%s/tenants" % zuul_url).json() ]
97944d2de2a8dfc2dd50dbea46a135a184e7aa37
21,736
def ant(): """Configuration for MuJoCo's ant task.""" locals().update(default()) # Environment env = 'Ant-v2' max_length = 1000 steps = 2e7 # 20M return locals()
1b99dba9f38b735056055c564e1143c5eb77401a
21,737
def docker_run(task, image, pull_image=True, entrypoint=None, container_args=None, volumes=None, remove_container=True, **kwargs): """ This task runs a docker container. For details on how to use this task, see the :ref:`docker-run` guide. :param task: The bound task reference. :type task: :py:class:`girder_worker.task.Task` :param image: The docker image identifier. :type image: str :param pull_image: Whether to explicitly pull the image prior to running the container. :type pull_image: bool :param entrypoint: Alternative entrypoint to use when running the container. :type entrypoint: str :param container_args: Arguments to pass to the container. :type container_args: list :param volumes: Volumes to expose to the container. :type volumes: dict :param remove_container: Whether to delete the container after the task is done. :type remove_container: bool :return: Fulfilled result hooks. :rtype: list """ return _docker_run( task, image, pull_image, entrypoint, container_args, volumes, remove_container, **kwargs)
6ddc61d47c7b78bf532195a8cddd37f3c730b675
21,738
def get_accuracy(pred, target): """gets accuracy either by single prediction against target or comparing their codes """ if len(pred.size()) > 1: pred = pred.max(1)[1] #pred, target = pred.flatten(), target.flatten() accuracy = round(float((pred == target).sum())/float(pred.numel()) * 100, 3) return accuracy
f30e57602e4a06b0a0e3cd131bf992cf8f9b514e
21,739
import numpy def ifourier_transform(F,dt,n): """ See Also ------- fourier_transform """ irfft = numpy.fft.irfft shift = numpy.fft.fftshift return (1.0/dt)*shift(irfft(F,n=n))
d068cdbbe95f58d4210d2e799dfaee878fb9bf98
21,740
def preprocess_labels(labels, encoder=None, categorical=True): """Encode labels with values among 0 and `n-classes-1`""" if not encoder: encoder = LabelEncoder() encoder.fit(labels) y = encoder.transform(labels).astype(np.int32) if categorical: y = np_utils.to_categorical(y) return y, encoder
3d92ce70f6ae7f713b27f5a31e92f0aab919584b
21,741
def import_minimal_log(path, parameters=None, variant=DEFAULT_VARIANT_LOG): """ Import a Parquet file (as a minimal log with only the essential columns) Parameters ------------- path Path of the file to import parameters Parameters of the algorithm, possible values: Parameters.COLUMNS -> columns to import from the Parquet file variant Variant of the algorithm, possible values: - Variants.PYARROW Returns ------------- df Pandas dataframe """ if parameters is None: parameters = {} parameters[COLUMNS] = [constants.CASE_CONCEPT_NAME, xes.DEFAULT_NAME_KEY, xes.DEFAULT_TIMESTAMP_KEY] return exec_utils.get_variant(variant).import_log(path, parameters=parameters)
530f60799318b90c90d08d427965041e4bda6dba
21,742
def get_input_assign(input_signal, input_value): """ Get input assignation statement """ input_assign = ReferenceAssign( input_signal, Constant(input_value, precision=input_signal.get_precision()) ) return input_assign
9b3e372423d323af3a718ab909c26f2ba42bfea6
21,743
def prune_repos(region: str=None, registry_prefix: str=None, repo: str=None, current_tag: str=None, all_tags: str=None): """ Pull the image from the registry if it doesn't exist locally :param region: :param registry_prefix: :param repo: :param current_tag: :param all_tags: :return: """ # Get the tags from the all_tags JSON all_tags_list = get_tags_from_all_tags(all_tags) # Add the current_tag to the recent (local) tags. Just to be safe recent_tags = all_tags_list + [current_tag] # Get the tags for the repo from ECR ecr_tags = get_tags_from_ecr(region, repo) # Get all the tags in the registry that are *not* the ones we want bad_tags = [tag for tag in ecr_tags if tag not in recent_tags] # Delete the obsolete images for tag in bad_tags: output = get_stdout('''{AWS} ecr batch-delete-image --region {region} --repository-name {repo} --image-ids imageTag={tag}''' .format(AWS=AWS, region=region, repo=repo, tag=tag)) return True
d5d21230f4440e4909a9ff0288a794471b5fb016
21,744
def convert_size_bytes_to_gb(size_in_bytes): """:rtype: float""" return float(size_in_bytes) / GB
7d3946dc431aa6a531fa11ef8e5391279f8b553a
21,745
def merge_swab(survey_df, swab_df): """ Process for matching and merging survey and swab result data. Should be executed after merge with blood test result data. """ survey_antibody_swab_df, none_record_df = execute_merge_specific_swabs( survey_df=survey_df, labs_df=swab_df, barcode_column_name="swab_sample_barcode", visit_date_column_name="visit_datetime", received_date_column_name="pcr_result_recorded_datetime", void_value="Void", ) survey_antibody_swab_df = survey_antibody_swab_df.drop( "abs_offset_diff_vs_visit_hr_swab", "count_barcode_swab", "count_barcode_voyager", "diff_vs_visit_hr_swab", "pcr_flag", "time_order_flag", "time_difference_flag", ) df_all_iqvia, df_lab_residuals, df_failed_records = merge_process_filtering( df=survey_antibody_swab_df, none_record_df=none_record_df, merge_type="swab", barcode_column_name="swab_sample_barcode", lab_columns_list=[column for column in swab_df.columns if column != "swab_sample_barcode"], ) return df_all_iqvia, df_lab_residuals, df_failed_records
38253b473c45a967dc1aeccdd61e94566014a347
21,746
def confirm_space(environ, start_response): """ Confirm a spaces exists. If it does, raise 204. If not, raise 404. """ store = environ['tiddlyweb.store'] space_name = environ['wsgiorg.routing_args'][1]['space_name'] try: space = Space(space_name) store.get(Recipe(space.public_recipe())) store.get(Recipe(space.private_recipe())) except NoRecipeError: raise HTTP404('%s does not exist' % space_name) start_response('204 No Content', []) return ['']
aff453f96bb85895115dff9796387bc223151c81
21,747
def find_ppp_device_status(address=None, username=None): """Find device status node based on address and/or username. This is currently only used by the web UI. For the web UI this is the best guess for identifying the device related to a forced web forward; which allows the web UI to default username for user login, for instance. """ def _f1(d): return (address is not None) and (d.getS(ns.pppAddress, rdf.IPv4Address) == address) def _f2(d): return (username is not None) and (d.getS(ns.username, rdf.String) == username) # There may be multiple matching devices in corner cases, e.g. two devices # in RDF with the same IP address. License monitor reconcile process should # eliminate these discrepancies eventually but here we may still encounter # them from time to time. # # If there are multiple matching entries, we take the newest one and assume # that is the desired one. If the entries have a different username, this # may match to the wrong user. This is not critical: the web UI does not # allow the user to make any user-related changes until the user has logged # in (providing his password). This function only provides the default value # for login username. # # So: return device with latest startTime (newest connection), or first in # list if no startTime is found. [filter_ppp_device_statuses_single does this.] return filter_ppp_device_statuses_single([_f1, _f2])
71d44185a5df8f72b1281102faef66ea4f8a1de1
21,748
def get_L_dash_prm_bath_OS_90(house_insulation_type, floor_bath_insulation): """主開口方向から時計回りに90°の方向の外気に面した浴室の土間床等の外周部の長さ (m) Args: house_insulation_type(str): 床断熱住戸'または'基礎断熱住戸' floor_bath_insulation(str): 床断熱住戸'または'基礎断熱住戸'または'浴室の床及び基礎が外気等に面していない' Returns: float: 主開口方向から時計回りに90°の方向の外気に面した浴室の土間床等の外周部の長さ (m) """ return get_table_3(38, house_insulation_type, floor_bath_insulation)
13362e5f035865b38ea6562aa7a836ce95298590
21,749
import os def init(): """Authorize twitter app using tweepy library""" # ensure environment variables are set if not os.environ.get("consumer_key"): raise RuntimeError("consumer_key not set") if not os.environ.get("consumer_secret"): raise RuntimeError("consumer_secret not set") auth = tweepy.OAuthHandler(os.environ.get("consumer_key"), os.environ.get("consumer_secret")) auth.set_access_token(os.environ.get("access_token"), os.environ.get("access_token_secret")) return tweepy.API(auth)
fc53f0fe47a689d7a90a43cb1400d69f909c5ded
21,750
import json def generate_books(request, form): """ Returns a list of books. """ list_of_books = Book.generate_existing_books(form.cleaned_data['part']) return HttpResponse(json.dumps(list_of_books), content_type='application/json')
d75deab68c4cb4cdc9f14e4a313ffd060ab01004
21,751
def window_reverse_4d(windows, window_size, H_q, W_q, H_s, W_s): """ Args: windows: (num_windows*B, window_size, window_size, window_size, window_size, C) window_size (int): size of window H_q (int): Height of query image W_q (int): Width of query image H_s (int): Height of support image W_s (int): Width of support image Returns: x: (B, H_q, W_q, H_s, W_s, C) """ kwargs = { 'H_q': H_q // window_size, 'W_q': W_q // window_size, 'H_s': H_s // window_size, 'W_s': W_s // window_size } x = rearrange(windows, '(B H_q W_q H_s W_s) W_1 W_2 W_3 W_4 C -> B (H_q W_1) (W_q W_2) (H_s W_3) (W_s W_4) C', **kwargs) return x
8ef2743ec15c140807a9c269680f8bd3810703a3
21,752
import argparse def parse_arguments(): """ Function to parse command line arguements from the user Returns ------- opts : dict command line arguements from the user """ info = 'Divides pdb info files for parallelization' parser = argparse.ArgumentParser(description=info) # program arguments parser.add_argument('-f', '--in-file', type=str, required=True, help='PDB info file to divide') parser.add_argument('-n', '--num-splits', default=1000, type=int, help='Number of splits to perform (Default: 1000)') parser.add_argument('-m', '--mut-file', type=str, required=True, help='File containing mutation information') parser.add_argument('--split-dir', default = "../data/split_pdbs/", type=str, help='Output directory for split PDB info files') args = parser.parse_args() opts = vars(args) return opts
8bdc260c1dcb779c7b30927651e26c05a9c0d5f5
21,753
def numeric(symbols, negative, value): """Implement the algorithm for `type: numeric`.""" if value == 0: return symbols[0] is_negative = value < 0 if is_negative: value = abs(value) prefix, suffix = negative reversed_parts = [suffix] else: reversed_parts = [] length = len(symbols) value = abs(value) while value != 0: reversed_parts.append(symbols[value % length]) value //= length if is_negative: reversed_parts.append(prefix) return ''.join(reversed(reversed_parts))
4eb41904f1ead6e6f8f6d6a5a7855d917a0029b7
21,754
from typing import Dict def scale_value_dict(dct: Dict[str, float], problem: InnerProblem): """Scale a value dictionary.""" scaled_dct = {} for key, val in dct.items(): x = problem.get_for_id(key) scaled_dct[key] = scale_value(val, x.scale) return scaled_dct
f7ad0cf51129d7abfb85fdba8d64f1c69bba2bad
21,755
import json def get_and_log_environment(): """Grab and log environment to use when executing command lines. The shell environment is saved into a file at an appropriate place in the Dockerfile. Returns: environ (dict) the shell environment variables """ environment_file = FWV0 / "gear_environ.json" log.debug("Grabbing environment from %s", environment_file) with open(environment_file, "r") as f: environ = json.load(f) # Add environment to log if debugging kv = "" for k, v in environ.items(): kv += k + "=" + v + " " log.debug("Environment: " + kv) return environ
2b91378184a442a3a21c8b94a4667dc9ab90290a
21,756
def green(string: str) -> str: """Add green colour codes to string Args: string (str): Input string Returns: str: Green string """ return "\033[92m" + string + "\033[0m"
b6bdefe3e467e88c044b9289ea26a59ccf564f1a
21,757
def from_6x6_to_21x1(T): """Convert symmetric second order tensor to first order tensor.""" C2 = np.sqrt(2) V = np.array([[T[0, 0], T[1, 1], T[2, 2], C2 * T[1, 2], C2 * T[0, 2], C2 * T[0, 1], C2 * T[0, 3], C2 * T[0, 4], C2 * T[0, 5], C2 * T[1, 3], C2 * T[1, 4], C2 * T[1, 5], C2 * T[2, 3], C2 * T[2, 4], C2 * T[2, 5], T[3, 3], T[4, 4], T[5, 5], C2 * T[3, 4], C2 * T[4, 5], C2 * T[5, 3]]]).T return V
177d766ee251dfb52396f88b4e77d101956afe79
21,758
def post_add_skit_reply(): """ removes a skit if authored by the current user """ email = is_authed(request) if email and csrf_check(request): # same as args, form data is also immutable request.form = dict(request.form) request.form['email'] = email p_resp = proxy(RUBY, request) return create_response(p_resp) return BADUSER
17b64bba949bb2df57cbdf796c0b895387672018
21,759
from datetime import datetime def register(request): """Register new account.""" token_int = int(datetime.datetime.strftime( datetime.datetime.now(), '%Y%m%d%H%M%S%f')) token = short_url.encode_url(token_int) if (not request.playstore_url and not request.appstore_url and not request.winstore_url and not request.default_url): return False, 'Insufficient information to register.', None account = models.Accounts( playstore_url=request.playstore_url, appstore_url=request.appstore_url, winstore_url=request.winstore_url, default_url=request.default_url, title=request.title, banner=request.banner, description=request.description, token=token ) account.put() return True, None, token
649c413011ec76bdb2244bbbfe7f4810230d3202
21,760
def stringify_parsed_email(parsed): """ Convert a parsed email tuple into a single email string """ if len(parsed) == 2: return f"{parsed[0]} <{parsed[1]}>" return parsed[0]
6552987fe6a06fdbb6bd49e5d17d5aadaae3c832
21,761
import math def standard_simplex_vol(sz: int): """Returns the volume of the sz-dimensional standard simplex""" result = cm_matrix_det_ns(np.identity(sz, dtype=DTYPE)) if result == math.inf: raise ValueError(f'Cannot compute volume of standard {sz}-simplex') return result
1b0d806312ee722f3251e1099e604a18d4e762a7
21,762
def all_saveable_objects(scope=None): """ Copied private function in TF source. This is what tf.train.Saver saves if var_list=None is passed. """ return (tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope) + tf.get_collection(tf.GraphKeys.SAVEABLE_OBJECTS, scope))
4c0b8ec0dd65160a113d4e6151a1a5b6d8454926
21,763
def base_to_str( base ): """Converts 0,1,2,3 to A,C,G,T""" if 0 == base: return 'A' if 1 == base: return 'C' if 2 == base: return 'G' if 3 == base: return 'T' raise RuntimeError( 'Bad base: %d' % base )
f1c98b7c24fae91c1f809abe47929d724c886168
21,764
import argparse def parse_args(args): """ function parse_args takes arguments from CLI and return them parsed for later use. :param list args : pass arguments from sys cmd line or directly :return dict: parsed arguments """ parser = argparse.ArgumentParser() required = parser.add_argument_group() required.add_argument("-d", "--date", help="Date of Flight", required=True) # name origin used because of conflicting name "from" in tests etc. cannot use parser.from required.add_argument("-fr", "--from", help="IATA code of Departure", dest="origin", required=True) required.add_argument("-t", "--to", help="IATA code of Destination", required=True) days_in_destination = parser.add_mutually_exclusive_group() days_in_destination.add_argument("-o", "--one-way", action="store_const", help="Oneway ticket", dest="days_in_destination", const="oneway") days_in_destination.add_argument("-r", "--return", action="store", help="Round ticket followed by number of days in destination", dest="days_in_destination") days_in_destination.set_defaults(days_in_destination='oneway') sort = parser.add_mutually_exclusive_group() sort.add_argument("-c", "--cheapest", action="store_const", help="Book cheapest flight", dest="sort", const="price") sort.add_argument("-fa", "--fastest", action="store_const", help="Book fastest flight", dest="sort", const="duration") sort.set_defaults(sort='price') parser.add_argument("-b", "--bags", help="Number of checked-in baggages", default='0') parser.add_argument("-v", "--verbose", help="sets verbose output", action='store_true') return parser.parse_args(args)
e3783dc173696b5758d8fc90bc810841e043be0a
21,765
def attr(accessing_obj, accessed_obj, *args, **kwargs): """ Usage: attr(attrname) attr(attrname, value) attr(attrname, value, compare=type) where compare's type is one of (eq,gt,lt,ge,le,ne) and signifies how the value should be compared with one on accessing_obj (so compare=gt means the accessing_obj must have a value greater than the one given). Searches attributes *and* properties stored on the checking object. The first form works like a flag - if the attribute/property exists on the object, the value is checked for True/False. The second form also requires that the value of the attribute/property matches. Note that all retrieved values will be converted to strings before doing the comparison. """ # deal with arguments if not args: return False attrname = args[0].strip() value = None if len(args) > 1: value = args[1].strip() compare = 'eq' if kwargs: compare = kwargs.get('compare', 'eq') def valcompare(val1, val2, typ='eq'): "compare based on type" try: return CF_MAPPING.get(typ, 'default')(val1, val2) except Exception: # this might happen if we try to compare two things that cannot be compared return False # first, look for normal properties on the object trying to gain access if hasattr(accessing_obj, attrname): if value: return valcompare(str(getattr(accessing_obj, attrname)), value, compare) return bool(getattr(accessing_obj, attrname)) # will return Fail on False value etc # check attributes, if they exist if (hasattr(accessing_obj, 'has_attribute') and accessing_obj.has_attribute(attrname)): if value: return (hasattr(accessing_obj, 'get_attribute') and valcompare(accessing_obj.get_attribute(attrname), value, compare)) return bool(accessing_obj.get_attribute(attrname)) # fails on False/None values return False
8b3944ee8ef64938314766cc21e893ccbf48d9e1
21,766
def group_connected(polygon_map, mask=None): """Group all connected nodes.""" # Wrap :c:`group_connected()` from ``polygon_map.c``. polygon_map = mask_polygon_map(polygon_map, mask) queue = Queue(len(polygon_map) + 1) group_ids = np.full(len(polygon_map), -1, np.intp, order="C") groups_count: int groups_count = slug.dll.group_connected(ptr(polygon_map), ptr(polygon_map.ctypes.shape), ptr(group_ids), queue._raw._ptr) return group_ids, groups_count
2239feab1ef914156e01ab430f0c561609de0b18
21,767
def dictmask(data, mask, missing_keep=False): """dictmask masks dictionary data based on mask""" if not isinstance(data, dict): raise ValueError("First argument with data should be dictionary") if not isinstance(mask, dict): raise ValueError("Second argument with mask should be dictionary") if not isinstance(missing_keep, bool): raise ValueError("Argument missing_keep should be bool type") res = {} for k, v in data.items(): if k not in mask: if missing_keep is True: res[k] = v continue if mask[k] is None or mask[k] is False: continue if mask[k] is True or data[k] is None: res[k] = v continue if isinstance(data[k], dict) and isinstance(mask[k], dict): res[k] = dictmask(data[k], mask[k]) continue if isinstance(data[k], list) and isinstance(mask[k], list): if len(mask[k]) != 1: raise ValueError("Mask inside list should have only one item") res2 = [] for i in range(len(data[k])): res2.append(dictmask(data[k][i], mask[k][0], missing_keep)) res[k] = res2 else: raise ValueError( f"Cannot proceed key {k} with values of different types:" f"{type(data[k])}, {type(mask[k])}" ) return res
d18f6effb4367628ba85095024189d0f6694dd52
21,768
import subprocess import time from pathlib import Path def connect(file=None, port=8100, counter_max=5000): """Open libreoffice and enable conection with Calc. Args: file (str or pathlib.Path, optional): Filepath. If None, a new Calc instance will be opened. port (int, optional): port for connection. counter_max (int, optional): Max number of tentatives to establish a connection. Returns: Calc object. Examples: Open new instance of Calc: >>> calcObject = calc.connect() Adds one sheet ('Sheet2') at position 1: >>> calcObject.insert_sheets_new_by_name('Sheet2', 1) Add multiple sheets ('Sheet3' and 'Sheet4) at position 2: >>> calcObject.insert_multisheets_new_by_name(['Sheet3', 'Sheet4'], 2) >>> # Get number of sheets >>> print(calcObject.get_sheets_count()) 4 >>> # Remove sheets >>> calcObject.remove_sheets_by_name('Sheet3') >>> # get sheet data >>> sheet1 = calcObject.get_sheet_by_name('Sheet1') >>> sheet2 = calcObject.get_sheet_by_index(0) """ # open libreoffice libreoffice = subprocess.Popen([f"soffice --nodefault --accept='socket,host=localhost,port={port};urp;'"], shell=True, close_fds=True) # connect to libreoffice connected = False counter = 0 while connected == False: time.sleep(0.5) try: context = connect(Socket('localhost', f'{port}')) connected = True except: counter += 1 if counter == counter_max: raise ConnectionError('Cannot establish connection, maybe try increasing counter_max value.') pass if file is None: return Calc(context) else: file = Path(file) return Calc(context, convert_path_to_url(str(file)))
a82386b0eff1c8665f056d705a8a65913beecdf4
21,769
from dallinger.config import default_keys from dallinger.config import Configuration def stub_config(): """Builds a standardized Configuration object and returns it, but does not load it as the active configuration returned by dallinger.config.get_config() """ defaults = { u'ad_group': u'Test ad group', u'approve_requirement': 95, u'assign_qualifications': True, u'auto_recruit': True, u'aws_access_key_id': u'fake aws key', u'aws_secret_access_key': u'fake aws secret', u'aws_region': u'us-east-1', u'base_payment': 0.01, u'base_port': 5000, u'browser_exclude_rule': u'MSIE, mobile, tablet', u'clock_on': True, u'contact_email_on_error': u'error_contact@test.com', u'dallinger_email_address': u'test@example.com', u'database_size': u'standard-0', u'redis_size': u'premium-0', u'database_url': u'postgresql://postgres@localhost/dallinger', u'description': u'fake HIT description', u'duration': 1.0, u'dyno_type': u'free', u'heroku_auth_token': u'heroku secret', u'heroku_team': u'', u'host': u'0.0.0.0', u'id': u'some experiment uid', u'keywords': u'kw1, kw2, kw3', u'lifetime': 1, u'logfile': u'-', u'loglevel': 0, u'mode': u'debug', u'notification_url': u'https://url-of-notification-route', u'num_dynos_web': 1, u'num_dynos_worker': 1, u'organization_name': u'Monsters University', u'sentry': True, u'smtp_host': u'smtp.fakehost.com:587', u'smtp_username': u'fake email username', u'smtp_password': u'fake email password', u'threads': u'1', u'title': u'fake experiment title', u'us_only': True, u'webdriver_type': u'phantomjs', u'whimsical': True } config = Configuration() for key in default_keys: config.register(*key) config.extend(defaults.copy()) config.ready = True return config
57820943883411f7e1081d1ff39ac5677d91f41d
21,770
def _prepare_policy_input( observations, vocab_size, observation_space, action_space ): """Prepares policy input based on a sequence of observations.""" if vocab_size is not None: (batch_size, n_timesteps) = observations.shape[:2] serialization_kwargs = init_serialization( vocab_size, observation_space, action_space, n_timesteps ) actions = np.zeros( (batch_size, n_timesteps - 1) + action_space.shape, dtype=action_space.dtype, ) reward_mask = np.ones((batch_size, n_timesteps - 1), dtype=np.int32) (policy_input, _) = serialization_utils.serialize_observations_and_actions( observations=observations, actions=actions, mask=reward_mask, **serialization_kwargs ) return policy_input else: return observations
9799357e00453a1259551c3af1b5bf5b58603186
21,771
def RGB2raw(R, G, B): """Convert RGB channels to Raw image.""" h, w = R.shape raw = np.empty(shape=(2*h, 2*w), dtype=R.dtype) raw[::2, ::2] = R raw[1::2, 1::2] = B raw[1::2, 0::2] = G raw[0::2, 1::2] = G return raw
7adb2ccef65c85c7e5d1ac223f397ef2f90dd9d3
21,772
def get_algs_from_ciphersuite_name(ciphersuite_name): """ Return the 3-tuple made of the Key Exchange Algorithm class, the Cipher class and the HMAC class, through the parsing of the ciphersuite name. """ tls1_3 = False if ciphersuite_name.startswith("TLS"): s = ciphersuite_name[4:] if s.endswith("CCM") or s.endswith("CCM_8"): kx_name, s = s.split("_WITH_") kx_alg = _tls_kx_algs.get(kx_name) hash_alg = _tls_hash_algs.get("SHA256") cipher_alg = _tls_cipher_algs.get(s) hmac_alg = None else: if "WITH" in s: kx_name, s = s.split("_WITH_") kx_alg = _tls_kx_algs.get(kx_name) else: tls1_3 = True kx_alg = _tls_kx_algs.get("TLS13") hash_name = s.split('_')[-1] hash_alg = _tls_hash_algs.get(hash_name) cipher_name = s[:-(len(hash_name) + 1)] if tls1_3: cipher_name += "_TLS13" cipher_alg = _tls_cipher_algs.get(cipher_name) hmac_alg = None if cipher_alg is not None and cipher_alg.type != "aead": hmac_name = "HMAC-%s" % hash_name hmac_alg = _tls_hmac_algs.get(hmac_name) elif ciphersuite_name.startswith("SSL"): s = ciphersuite_name[7:] kx_alg = _tls_kx_algs.get("SSLv2") cipher_name, hash_name = s.split("_WITH_") cipher_alg = _tls_cipher_algs.get(cipher_name.rstrip("_EXPORT40")) kx_alg.export = cipher_name.endswith("_EXPORT40") hmac_alg = _tls_hmac_algs.get("HMAC-NULL") hash_alg = _tls_hash_algs.get(hash_name) return kx_alg, cipher_alg, hmac_alg, hash_alg, tls1_3
cc2ab3fcae87feeb7877bad091446fb2d20be6b0
21,773
def centroid(window): """Centroid interpolation for sub pixel shift""" ip = lambda x : (x[2] - x[0])/(x[0] + x[1] + x[2]) return ip(window[:, 1]), ip(window[1])
e1cf0398261637f682c74340f99566d19e342b66
21,774
def _format_warning(message, category, filename, lineno, line=None): """ Replacement for warnings.formatwarning that disables the echoing of the 'line' parameter. """ return "{}:{}: {}: {}\n".format(filename, lineno, category.__name__, message)
8267150c5890759d2f2190ccf4b7436ea8f55204
21,775
from typing import List def precision_at_k(predictions: List[int], targets: List[int], k: int = 10) -> float: """Computes `Precision@k` from the given predictions and targets sets.""" predictions_set = set(predictions[:k]) targets_set = set(targets) result = len(targets_set & predictions_set) / float(len(predictions_set)) return result
4c6e566db7c488416139545f5f845ff80b7af434
21,776
def wordify_open(p, word_chars): """Prepend the word start markers.""" return r"(?<![{0}]){1}".format(word_chars, p)
8b267aaca897d6435a84f22064f644727ca6e83c
21,777
def Mt_times_M(M): """Compute M^t @ M Args: M : (batched) matrix M Returns: tf.Tensor: solution of M^t @ M """ if isinstance(M, tf.Tensor): linop = tf.linalg.LinearOperatorFullMatrix(M) return linop.matmul(M, adjoint=True) elif isinstance(M, (tf.linalg.LinearOperatorFullMatrix, tf.linalg.LinearOperatorLowerTriangular)): return M.matmul(M.to_dense(), adjoint=True) elif is_diagonal_linop(M): return diagonal_M_times_Mt(M) else: raise TypeError("cannot compute M_times_Mt, invalid type")
cfb8023711186821faf0ff8bfa1277d6585d40de
21,778
import typing def make_values(ints: typing.Iterable[int]): """Make datasets. """ return [ ('int', ints), ('namedtuple', [IntNamedTuple(i) for i in ints]), ('class', [IntObject(i) for i in ints]), ]
700bbd4a43bff38a0154bbc595c1cd10cc2ec9d9
21,779
import collections def attach_trans_dict(model, objs): """Put all translations from all non-deferred translated fields from objs into a translations dict on each instance.""" # Get the ids of all the translations we need to fetch. try: deferred_fields = objs[0].get_deferred_fields() except IndexError: return fields = [ field for field in model._meta.translated_fields if field.attname not in deferred_fields ] ids = [ getattr(obj, field.attname) for field in fields for obj in objs if getattr(obj, field.attname, None) is not None ] if ids: # Get translations in a dict, ids will be the keys. It's important to # consume the result of sorted_groupby, which is an iterator. qs = Translation.objects.filter(id__in=ids, localized_string__isnull=False) else: qs = [] all_translations = { field_id: sorted(list(translations), key=lambda t: t.locale) for field_id, translations in sorted_groupby(qs, lambda t: t.id) } def get_locale_and_string(translation, new_class): """Convert the translation to new_class (making PurifiedTranslations and LinkifiedTranslations work) and return locale / string tuple.""" converted_translation = new_class() converted_translation.__dict__ = translation.__dict__ return (converted_translation.locale.lower(), str(converted_translation)) # Build and attach translations for each field on each object. for obj in objs: if not obj: continue obj.translations = collections.defaultdict(list) for field in fields: t_id = getattr(obj, field.attname, None) field_translations = all_translations.get(t_id, None) if not t_id or field_translations is None: continue obj.translations[t_id] = [ get_locale_and_string(t, field.remote_field.model) for t in field_translations ]
933e87b050eac0dfbead141c0c3c56a2add9751f
21,780
def list_known_protobufs(): """ Returns the list of known protobuf model IDs """ return [k for k in proto_data_structure]
a4b80f948792a4d2a965eac507a118719fa106f5
21,781
def hash_value(*args): """ hash_value(NodeConstHandle t) -> std::size_t hash_value(BufferConstHandle t) -> std::size_t hash_value(FileConstHandle t) -> std::size_t """ return _RMF.hash_value(*args)
c85277fec7f329eeba26d053a43205bf1eda0662
21,782
import re def read_examples(input_file): """Read a list of `InputExample`s from an input file.""" examples = [] unique_id = 0 # with tf.gfile.GFile(input_file, "r") as reader: with open(input_file, "r") as reader: while True: line = tokenization.convert_to_unicode(reader.readline()) if not line: break line = line.strip() text_a = None text_b = None m = re.match(r"^(.*) \|\|\| (.*)$", line) if m is None: text_a = line else: text_a = m.group(1) text_b = m.group(2) examples.append( InputExample(unique_id=unique_id, text_a=text_a, text_b=text_b)) unique_id += 1 return examples
82380f9409bd91e16fc4324fc345d335fa9e96dc
21,783
from datasets import load_dataset def get_finance_sentiment_dataset(split: str='sentences_allagree') -> list: """ Load financial dataset from HF: https://huggingface.co/datasets/financial_phrasebank Note that there's no train/validation/test split: the dataset is available in four possible configurations depending on the percentage of agreement of annotators. By default, load just sentences for which all annotators agree. """ dataset = load_dataset("financial_phrasebank", split) return dataset['train']
9f83d4c501ed16e5e617c32090787d1377ec70fd
21,784
def list_databases(): """ List tick databases and associated aggregate databases. Returns ------- dict dict of {tick_db: [agg_dbs]} """ response = houston.get("/realtime/databases") houston.raise_for_status_with_json(response) return response.json()
d2ba438a0496f5863ad1c16cb8e694b54276d01e
21,785
def nice_range(bounds): """ Given a range, return an enclosing range accurate to two digits. """ step = bounds[1] - bounds[0] if step > 0: d = 10 ** (floor(log10(step)) - 1) return floor(bounds[0]/d)*d, ceil(bounds[1]/d)*d else: return bounds
66f538649b8f1c55d301b2a0e293a4968b3665d9
21,786
def connect_to_portal(config): """ The portal/metadata schema is completely optional. """ if config.portal_schema: return aws.connect_to_db(**config.rds_config, schema=config.portal_schema)
2aa76ae2ad8d9ea16ea7ba227627f02d3c044d70
21,787
import json def get_words_for_source(): """ Gets JSON to populate words for source """ source_label = request.args.get("source") source = create_self_summary_words(source_label) return json.dumps(source)
2a2fc02a6f77cd109f3e0fded026676109ae014c
21,788
def raster(event_times_list): """ Creates a raster plot Parameters ---------- event_times_list : iterable a list of event time iterables color : string color of vlines Returns ------- ax : an axis containing the raster plot """ color='k' ax = plt.gca() for ith, trial in enumerate(event_times_list): plt.vlines(trial, ith + .5, ith + 1.5, color=color) plt.ylim(.5, len(event_times_list) + .5) return ax
3c5b485bdc3992602a7c7bb227329b2e74c611d9
21,789
def FindOneDocument(queryDocument, database='NLP', collection="Annotations", host='localhost', port='27017'): """ This method returns the first document in the backing store that matches the criteria specified in queryDocument. :param queryDocument: [dict] A pymongo document used to query the MongoDB instance. :param database: [string] The name of the MongoDB database instance that holds "collection". Defaults to NLP. :param collection: [string] The name of the collection that stores the document instances. Defaults to "Annotations". :param host: [string] The host IP address, defaults to localhost. :param port: [string] The port on which the MongoDB server is listening. Defaults to 27017. :return: [object | None] A single Document object if the query matches any documents, otherwise None. """ client = MongoClient('mongodb://%s:%s/' % (host, port)) collection = client[database][collection] mongoDoc = collection.find_one(queryDocument) client.close() return constructAnnotationDocument(mongoDoc)
fb5f683f4451144ae3cbe374162bef36918130ba
21,790
def user_info(context, **kwargs): """ Отображает информацию о текущем авторизованом пользователе, либо ссылки на авторизацию и регистрацию Пример использования:: {% user_info %} :param context: контекст :param kwargs: html атрибуты оборачивающего тега :return: """ request = context['request'] return {'user': request.user, 'data': kwargs}
20321056fd5fdf8f51e79fb66d335272e85ada0d
21,791
from functools import reduce from datetime import datetime def transactions(request): """Transaction list""" tt = TimerThing('transactions') # Get profile profile = request.user.profile characters = Character.objects.filter( apikeys__user=request.user, apikeys__valid=True, apikeys__key_type__in=[APIKey.ACCOUNT_TYPE, APIKey.CHARACTER_TYPE] ).distinct() character_ids = [c.id for c in characters] corporation_ids = Corporation.get_ids_with_access(request.user, APIKey.CORP_ASSET_LIST_MASK) corporations = Corporation.objects.filter( pk__in=corporation_ids ) tt.add_time('init') # Get a QuerySet of transactions by this user transaction_ids = Transaction.objects.filter( ( Q(character__in=character_ids) & Q(corp_wallet__isnull=True) ) | Q(corp_wallet__corporation__in=corporation_ids) ) transaction_ids = transaction_ids.order_by('-date') # Get a QuerySet of transactions IDs by this user # characters = list(Character.objects.filter(apikeys__user=request.user.id).values_list('id', flat=True)) # transaction_ids = Transaction.objects.filter(character_id__in=characters) # transaction_ids = transaction_ids.order_by('-date') # Get only the ids, at this point joining the rest is unnecessary transaction_ids = transaction_ids.values_list('pk', flat=True) tt.add_time('transaction ids') # Parse and apply filters filters = parse_filters(request, FILTER_EXPECTED) if 'char' in filters: qs = [] for fc, fv in filters['char']: if fc == 'eq': qs.append(Q(character=fv)) elif fc == 'ne': qs.append(~Q(character=fv)) transaction_ids = transaction_ids.filter(reduce(q_reduce_or, qs)) if 'corp' in filters: qs = [] for fc, fv in filters['corp']: if fc == 'eq': qs.append(Q(corp_wallet__corporation=fv)) elif fc == 'ne': qs.append(~Q(corp_wallet__corporation=fv)) transaction_ids = transaction_ids.filter(reduce(q_reduce_or, qs)) # Client is a special case that requires some extra queries if 'client' in filters: qs = [] for fc, fv in filters['client']: if fc == 'eq': qs.append(Q(name=fv)) elif fc == 'ne': qs.append(~Q(name=fv)) elif fc == 'in': qs.append(Q(name__icontains=fv)) qs_reduced = reduce(q_reduce_or, qs) char_ids = list(Character.objects.filter(qs_reduced).values_list('id', flat=True)) corp_ids = list(Corporation.objects.filter(qs_reduced).values_list('id', flat=True)) transaction_ids = transaction_ids.filter( Q(other_char_id__in=char_ids) | Q(other_corp_id__in=corp_ids) ) if 'date' in filters: qs = [] for fc, fv in filters['date']: if fc == 'eq': try: start = datetime.datetime.strptime(fv, '%Y-%m-%d') end = datetime.datetime.strptime('%s 23:59:59' % (fv), '%Y-%m-%d %H:%M:%S') qs.append(Q(date__range=(start, end))) except ValueError: pass elif fc == 'bt': parts = fv.split(',') if len(parts) == 2: try: start = datetime.datetime.strptime(parts[0], '%Y-%m-%d') end = datetime.datetime.strptime('%s 23:59:59' % (parts[1]), '%Y-%m-%d %H:%M:%S') if start < end: qs.append(Q(date__range=(start, end))) except ValueError: pass if qs: transaction_ids = transaction_ids.filter(reduce(q_reduce_or, qs)) if 'item' in filters: qs = [] for fc, fv in filters['item']: if fc == 'eq': qs.append(Q(item__name=fv)) elif fc == 'ne': qs.append(~Q(item__name=fv)) elif fc == 'in': qs.append(Q(item__name__icontains=fv)) transaction_ids = transaction_ids.filter(reduce(q_reduce_or, qs)) if 'total' in filters: qs = [] for fc, fv in filters['total']: if fc == 'eq': if fv < 0: qs.append(Q(buy_transaction=True, total_price=abs(fv))) else: qs.append(Q(buy_transaction=False, total_price=fv)) elif fc == 'ne': qs.append(~Q(total_price=fv)) elif fc == 'gt': if fv > 0: qs.append(Q(buy_transaction=False, total_price__gt=fv)) else: qs.append( Q(buy_transaction=False, total_price__gt=abs(fv)) | Q(buy_transaction=True, total_price__lt=abs(fv)) ) elif fc == 'gte': if fv >= 0: qs.append(Q(buy_transaction=False, total_price__gte=fv)) else: qs.append( Q(buy_transaction=False, total_price__gte=abs(fv)) | Q(buy_transaction=True, total_price__lte=abs(fv)) ) elif fc == 'lt': if fv > 0: qs.append( Q(buy_transaction=False, total_price__lt=fv) | Q(buy_transaction=True, total_price__gt=0) ) else: qs.append(Q(buy_transaction=True, total_price__gt=abs(fv))) elif fc == 'lte': if fv >= 0: qs.append( Q(buy_transaction=False, total_price__lte=fv) | Q(buy_transaction=True, total_price__gte=0) ) else: qs.append(Q(buy_transaction=True, total_price__gte=abs(fv))) transaction_ids = transaction_ids.filter(reduce(q_reduce_or, qs)) tt.add_time('filters') # Create a new paginator paginator = Paginator(transaction_ids, profile.entries_per_page) # If page request is out of range, deliver last page of results try: paginated = paginator.page(request.GET.get('page')) except PageNotAnInteger: # Page is not an integer, use first page paginated = paginator.page(1) except EmptyPage: # Page is out of range, deliver last page paginated = paginator.page(paginator.num_pages) tt.add_time('paginator') # Do page number things hp = paginated.has_previous() hn = paginated.has_next() prev = [] next = [] if hp: # prev and next, use 1 of each if hn: prev.append(paginated.previous_page_number()) next.append(paginated.next_page_number()) # no next, add up to 2 previous links else: for i in range(paginated.number - 1, 0, -1)[:2]: prev.insert(0, i) else: # no prev, add up to 2 next links for i in range(paginated.number + 1, paginator.num_pages)[:2]: next.append(i) # Build the transaction queryset now to avoid nasty subqueries transactions = Transaction.objects.filter(pk__in=paginated) transactions = transactions.select_related('corp_wallet__corporation', 'item', 'station', 'character', 'other_char', 'other_corp') transactions = transactions.order_by('-date') transactions = list(transactions) tt.add_time('transactions') # Build filter links, urgh for transaction in transactions: transaction.z_client_filter = build_filter(filters, 'client', 'eq', transaction.other_char or transaction.other_corp) transaction.z_item_filter = build_filter(filters, 'item', 'eq', transaction.item.name) tt.add_time('build links') # Ready template things values = { 'chars': characters, 'corps': corporations, } tt.add_time('template bits') # Render template out = render_page( 'thing/transactions.html', { 'json_data': _json_data(characters, corporations, filters), 'transactions': transactions, 'show_item_icons': request.user.profile.show_item_icons, 'paginated': paginated, 'next': next, 'prev': prev, 'values': values, }, request, character_ids, corporation_ids, ) tt.add_time('template') if settings.DEBUG: tt.finished() return out
46585859baa2a665f30d6abd48455c9abefa1c1e
21,792
def to_bytes(val): """Takes a text message and return a tuple """ if val is NoResponse: return val val = val.replace('\\r', '\r').replace('\\n', '\n') return val.encode()
9f5a45d9c69a18eec22c85c6691f8b3d46742af4
21,793
def _create_fake_data_fn(train_length=_DATA_LENGTH, valid_length=50000, num_batches=40): """ Creates fake dataset Data is returned in NCHW since this tends to be faster on GPUs """ logger = _get_logger() logger.info("Creating fake data") data_array = _create_data(_BATCHSIZE, num_batches, (_HEIGHT, _WIDTH), _CHANNELS) labels_array = _create_labels(_BATCHSIZE, num_batches, 1000) def fake_data_generator(): for i in range(num_batches): yield data_array[i * _BATCHSIZE : (i + 1) * _BATCHSIZE], labels_array[ i * _BATCHSIZE : (i + 1) * _BATCHSIZE ] train_data = tf.data.Dataset().from_generator( fake_data_generator, output_types=(tf.float32, tf.int32), output_shapes=( tf.TensorShape([None, _CHANNELS, _HEIGHT, _WIDTH]), tf.TensorShape([None]), ), ) train_data = train_data.shuffle(40 * _BATCHSIZE).repeat().prefetch(_BUFFER) validation_data = tf.data.Dataset().from_generator( fake_data_generator, output_types=(tf.float32, tf.int32), output_shapes=( tf.TensorShape([None, _CHANNELS, _HEIGHT, _WIDTH]), tf.TensorShape([None]), ), ) validation_data = validation_data.prefetch(_BUFFER) def _train_input_fn(): return train_data.make_one_shot_iterator().get_next() def _validation_input_fn(): return validation_data.make_one_shot_iterator().get_next() _train_input_fn.length = train_length _validation_input_fn.length = valid_length _train_input_fn.classes = 1000 _validation_input_fn.classes = 1000 return _train_input_fn, _validation_input_fn
33533cc4b1d43aaeba48db8470c93cbc058ad3dc
21,794
def watershed(src): """ Performs a marker-based image segmentation using the watershed algorithm. :param src: 8-bit 1-channel image. :return: 32-bit single-channel image (map) of markers. """ # cv2.imwrite('{}.png'.format(np.random.randint(1000)), src) gray = src.copy() img = cv2.cvtColor(gray, cv2.COLOR_GRAY2BGR) # h, w = gray.shape[:2] # block_size = (min(h, w) // 4 + 1) * 2 + 1 # thresh = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, block_size, 0) _ret, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) # noise removal kernel = np.ones((3, 3), np.uint8) opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=2) # sure background area sure_bg = cv2.dilate(opening, kernel, iterations=3) # Finding sure foreground area dist_transform = cv2.distanceTransform(opening, cv2.DIST_L2, 5) # dist_transform = opening & gray # cv2.imshow('dist_transform', dist_transform) # _ret, sure_bg = cv2.threshold(dist_transform, 0.2 * dist_transform.max(), 255, cv2.THRESH_BINARY_INV) _ret, sure_fg = cv2.threshold(dist_transform, 0.2 * dist_transform.max(), 255, cv2.THRESH_BINARY) # Finding unknown region # sure_bg = np.uint8(sure_bg) sure_fg = np.uint8(sure_fg) # cv2.imshow('sure_fg', sure_fg) unknown = cv2.subtract(sure_bg, sure_fg) # Marker label lingret, marker_map = cv2.connectedComponents(sure_fg) # Add one to all labels so that sure background is not 0, but 1 marker_map = marker_map + 1 # Now, mark the region of unknown with zero marker_map[unknown == 255] = 0 marker_map = cv2.watershed(img, marker_map) return marker_map
6915b6a924e64d12340e02b28085290685dddc9b
21,795
def sub(x, y): """Returns the difference of compositions. Parameters ---------- x : NumPy array, shape (n,) or (k,n) The composition that will be subtracted from. y : NumPy array, shape (n,) or (k,n) The composition to be subtracted. Returns ------- z : NumPy array, shape (n,) or (k,n) The result of y subtracted from x. """ z = perturbation(x, power(y, -1.0)) # 1.0 and not 1 forces coercion return z
c9e1fb31abb22a6efb9903c6e9f7cdc06cc110d0
21,796
def _broadcast_concatenate(arrays, axis): """Concatenate arrays along an axis with broadcasting.""" arrays = _broadcast_arrays(arrays, axis) res = np.concatenate(arrays, axis=axis) return res
5032ec0a90dde25d74906dbf661248086c785485
21,797
def get_final_bmi(data_dic, agex_low, agex_high, mrnsForFilter=[], filter=True): """ Function to get the distinct bmi percentile readings for predictions. Returns outcome percentiles and labels #### PARAMETERS #### data_dic: dictionary of patient data agex_low: low age range for outcome prediction agex_high: high age range for outcome prediction mrnsForFilter: list of mrns to get outcomes for filter: default==True; if True returns mrn filtered data only, otherwise returns all data with either a 0 or '' """ outcome = np.zeros(len(data_dic.keys()), dtype=float) outcome_pct = np.zeros(len(data_dic.keys()), dtype=float) outcome_labels = [''] * len(data_dic.keys()) indices = np.zeros(len(data_dic.keys())) for (ix, k) in enumerate(data_dic): if (len(mrnsForFilter) > 0) & (str(data_dic[k]['mrn']) not in mrnsForFilter): continue bmi, pct, label = get_final_bmi_single(data_dic[k], agex_low, agex_high) if pct == 0 and label == '': continue outcome[ix] = bmi outcome_pct[ix] = pct outcome_labels[ix] = label indices[ix] = 1 if filter: indices = (indices == 1) return outcome[indices], outcome_pct[indices], np.array(outcome_labels)[indices] else: return outcome, outcome_pct, np.array(outcome_labels)
e9793adf7470a695bd730f66817b735451df71a2
21,798
import sqlite3 def add_group_sub(uid:int, group_id:int) -> bool: #添加订阅信息 """ 向已存在的表中插入群记录, 如果群已经存在则什么都不做 :param uid: 唯一标识用户的数字uid :param group_id: 监听该用户的群id """ connection = sqlite3.connect(DB_PATH) cursor = connection.cursor() success = True group_exist = cursor.execute( f'select count(*) from _{uid} where group_id={group_id};').fetchone()[0] if not group_exist: cursor.execute(f'insert into _{uid} values({group_id}, 1);') # 默认开启翻译 connection.commit() else: success = False logger.warning(f'群{group_id} 已存在表_{uid}中') cursor.close() connection.close() return success
de71f03aa56bf4ae963877281e7e70876f5e72ff
21,799