content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def mysin(x): """ My sine. Note that this is only accurate for small x. """ return x
9589b1a3106b13ff230e614dcfc1477b422c9879
11,091
import logging def get_module_logger(module_name, level=None): """ Get a logger for a specific module. :param module_name: str Logic module name. :param level: int :param sh_level: int Stream handler log level. :param log_format: str :return: Logger Logger object. """ if level is None: level = logging.INFO module_name = "qlib.{}".format(module_name) # Get logger. module_logger = logging.getLogger(module_name) module_logger.setLevel(level) return module_logger
0fd2c8be1463da464700964c2d5ad895a40f4a9c
11,092
import glob def getAllOfAFile(file_dir, ext): """ Returns a list of all the files the direcotry with the specified file extenstion :param file_dir: Directory to search :param ext: The file extension (IE: ".py") """ return glob.glob(file_dir+"/*"+ext)
876a4f4b30653bd08454db9ee425d56fe408623d
11,093
def set_weight_decay(model, weight_decay, skip_list={"decoder.attention.v", "rnn", "lstm", "gru", "embedding"}): """ Skip biases, BatchNorm parameters, rnns. and attention projection layer v """ decay = [] no_decay = [] for name, param in model.named_parameters(): if not param.requires_grad: continue if len(param.shape) == 1 or any([skip_name in name for skip_name in skip_list]): no_decay.append(param) else: decay.append(param) return [{ 'params': no_decay, 'weight_decay': 0. }, { 'params': decay, 'weight_decay': weight_decay }]
7e2702d544497b03fffda4558f4aa5fcd2dd4f43
11,094
def extract_sample_info(sample_str): """Extract kit, sample, and technical replicate from sample_str. Inputs - sample_str - string from sample name Returns - tuple (kit, biological sample name, technical replicate) """ s = sample_str.replace('Ftube', '') # The biological sample in now the first character in name bio = s[0] # Extract what kit is in sample kit = '' if 'kapabc' in s.lower(): kit = 'Kapa' elif 'pbat' in s.lower(): kit = 'PBAT' elif 'neb' in s.lower(): kit = 'NEB' elif 'swift' in s.lower(): kit = 'Swift' # Determine if low or high input if '10ng' in s: kit = 'Low ' + kit # Determine technical replicate rep = '1' if 'rep2' in s.lower(): rep = '2' if (bio not in ['A', 'B']) or (kit == ''): print('[extract_sample_info] ERROR: Incorrect entry') return ('', '', '') return (kit, bio, rep)
f67440528391e1c463bb940cad9e59e8e61aa4c3
11,095
def OffsetPosition(in_ra,in_dec,delta_ra,delta_dec): """ Offset a position given in decimal degrees. Parameters ---------- in_ra: float Initial RA (decimal degrees). in_dec: float Initial DEC (demical degrees). delta_ra: float Offset in RA (decimal degrees). delta_dec: float Offset in DEC (decimal degrees). Returns ------- ra: float Offset RA. dec: float Offset DEC. """ ra = in_ra dec = in_dec + delta_dec if dec > 90.: dec = 180 - dec ra = 180 + ra if dec < -90.: dec = -180 - dec ra = 180 + ra ra = ra + delta_ra if ra > 360.: ra = ra - 360. if ra < 0.: ra = ra + 360. return ra,dec
7b027a2e0bf87dba9d1136e68af258b21223cedb
11,096
def get_prime_factors(n): """ Find all prime factors in N and return them as a list. """ i = 2 factors = set() while n > 1: if n % i == 0: factors.add(i) n = n / i else: # only increment if we did not find a factor. i = i + 1 if len(factors) == 0: return None else: return factors
da6f13b1c910f4b90cf84bbb7cfe6ecc2b735863
11,097
def ping(): # pragma: no cover """ ignore connection validation with task server (celery) """ """Simple task that just returns 'pong'.""" return "pong"
9967a01f8c509e272e7215a843e513ce4dc9a2be
11,098
def get_database_user(config, credentials): """ Returns the database user from the credentials. """ return credentials.get('user', 'unknown')
90e0a25a888e2de73ea7688a4da2e5de2ae02fbb
11,099
from typing import OrderedDict def _list_to_dict_list(list_items): """ Takes a list and creates a dict with the list values as keys. Returns a list of the created dict or an empty list """ if list_items: k = OrderedDict() for item in list_items: k["%s" % item] = "" return [k] return []
48e3448b79530e4bc7c4674a168429208b9f62b4
11,100
def dms2deg(valin): """ Converts DMS input to decimal degrees. Input can be either a string delimeted by : or spaces, or a list of [D,M,S] numbers. Parameters ---------- valin: float Input value in DMS. Can be either: \n - a string delimeted by : or spaces \n - a list of [D,M,S] numbers (floats or ints) \n Returns ------- valout : float Degrees corresponding to the DMS value Examples -------- # e.g., '-78:12:34.56' corresponds to -77.7904 deg \n obs.dms2deg('-78:12:34.56') #--> -77.79039999999999 \n obs.dms2deg('-78 12 34.56') #--> -77.79039999999999 \n obs.dms2deg([-78,12,34.56]) #--> -77.79039999999999 """ if type(valin)==str: if ':' in valin: ra=[float(val) for val in valin.split(':')] else: ra=[float(val) for val in valin.split(' ')] else: ra=valin valout=ra[0]+ra[1]/60.+ra[2]/3600. return valout
3efac9d11c8a7b5933766a0610f49a884e20925b
11,101
def featureScale(x, xRef=None): """Helper function to perform feature scaling. INPUTS: x: pandas DataFrame or Series. xRef: reference pandas DataFrame. If only x is provided, x will be normalized against itself. If xRef is additionally supplied, x will be normalized against xRef OUTPUTS: xPrime: pandas DataFrame (or Series, depending on type of x). Each column is scaled to that all values fall in the range [0, 1] """ if xRef is None: xRef = x xPrime = (x - xRef.min()) / (xRef.max() - xRef.min()) # If an entire column is NaN, zero it out. if len(xPrime.shape) > 1: # Pandas DataFrame (multi-dimensional) NaNSeries = xPrime.isnull().all() elif len(xPrime.shape) == 1: # Pandas Series (1-dimensional) NaNSeries = xPrime.isnull() else: raise UserWarning('Something went wrong in featureScale...') # Loop and zero out. for index in NaNSeries.index[NaNSeries]: xPrime[index] = 0 return xPrime
bffa3403e8083efaa8deb24e03b48d0210d39652
11,103
def EncodeRadioRowCol(container, row, col): """ :param container: :param row: :param col: """ RadValue = container * 100000 + row * 1000 + col return RadValue
92d9c8842432fded1b1b5e526ad735a9900cc219
11,106
import getpass def get_user(prompt=None): """ Prompts the user for his login name, defaulting to the USER environment variable. Returns a string containing the username. May throw an exception if EOF is given by the user. :type prompt: str|None :param prompt: The user prompt or the default one if None. :rtype: string :return: A username. """ # Read username and password. try: env_user = getpass.getuser() except KeyError: env_user = '' if prompt is None: prompt = "Please enter your user name" if env_user is None or env_user == '': user = input('%s: ' % prompt) else: user = input('%s [%s]: ' % (prompt, env_user)) if user == '': user = env_user return user
fc392cfacc931ee915bb218a80e5db46245f2a1f
11,108
from collections import OrderedDict import os import torch import copy def load_checkpoint(cfg, checkpoint_file, model, optimizer=None, active_sampling=False): """Loads the checkpoint from the given file.""" assert os.path.exists(checkpoint_file), "Checkpoint '{}' not found".format( checkpoint_file ) # Load the checkpoint on CPU to avoid GPU mem spike temp_checkpoint = torch.load(checkpoint_file, map_location="cpu") checkpoint = copy.deepcopy(temp_checkpoint) # Account for the DDP wrapper in the multi-gpu setting ms = model # if not active_sampling: print("==============================") print("cfg.NUM_GPUS: ", cfg.NUM_GPUS) print("==============================") ms = model.module if cfg.NUM_GPUS > 1 else model isModuleStrPresent = False if "model_state" in checkpoint: checkpoint = checkpoint["model_state"] for k in checkpoint.keys(): if k.find("module.") == -1: continue isModuleStrPresent = True break # remove module if isModuleStrPresent: print("Loaded checkpoint contains module present in keys.") print("So now removing 'module' strings") # remove module strings new_ckpt_dict = OrderedDict() for k, v in checkpoint.items(): tmp_key = k.replace("module.", "") new_ckpt_dict[tmp_key] = v checkpoint = copy.deepcopy(new_ckpt_dict) print("Done!!") ms.load_state_dict(checkpoint) ms.cuda(torch.cuda.current_device()) # Load the optimizer state (commonly not done when fine-tuning) if optimizer: optimizer.load_state_dict(temp_checkpoint["optimizer_state"]) return 0 if isModuleStrPresent else temp_checkpoint["epoch"]
bc788bbd6f802ac839424d041122a77afff2fb25
11,109
import json def load_commands(filename="commands.json"): """ load all known command """ with open(filename) as file_: commands = json.load(file_) return commands
66d90ecbcc0fcee182c0e7f25b1ff66d26c54145
11,110
def menu_item_flag(context, flag_type='', flag_iso='', flag_style='', flag_classes='', **kwargs): """ Templatetag menu_item_flag :param context: Getting context :param flag_type: Default empty, It accepts the string 'square' :param flag_iso: Default empty, ISO language country code :param flag_style: Pass inline styles to the img tag :param flag_classes: Pass classes to use on the img tag :param kwargs: Classes to HTML tags :return: A dict with classes """ icon_full_path = f'icons/{flag_type}/{flag_iso}.svg' default = dict(li_class='', a_class='') classes = dict(default, **kwargs) return { 'icon_class': flag_type, 'icon_path': icon_full_path, 'icon_iso': flag_iso, 'icon_style': flag_style, 'icon_classes': flag_classes, 'classes': classes, 'redirect_to': context.request.get_full_path }
2520a67ea2436743a1b5dec5a7d0321c68f31221
11,111
def _full_link(provider, word): """Return a website link for dictionary provider and word.""" return 'http://' + provider + word
56681e50523910a0519e29f7446355a20d932284
11,113
def element_text(member_elt, elt_name): """Extract all `para` text from (`elt_name` in) `member_elt`.""" text = [] if elt_name: elt = member_elt.find(elt_name) else: elt = member_elt if elt: paras = elt.findAll('para') for p in paras: text.append(p.getText(separator=u' ').strip()) return '\n\n'.join(text)
13ff356e1a584bcaa9c905c93dcafaa787ca936f
11,114
import torch def to_device(data, device): """Move data to device Arguments: data {TupleTree, tensor} -- Tensors that should be moved to device. device {str, torch.device} -- Device data is moved to. Returns: TupleTree, tensor -- Data moved to device """ if type(data) is not torch.Tensor: raise RuntimeError(f"Need 'data' to be tensors, not {type(data)}.") return data.to(device)
9e0661951e7793a92d7f1953bfb481ccf4ec4ca9
11,117
import os def lookUpDirTree(fileName): """ This is called when we are using a default name for either the FontMenuNameDB or the GlyphOrderAndAliasDB files. These are often located several dir levels above the font file, as they are shared by the font family. """ MAX_LEVELS = 4 path = None i = 0 dirPath, fileName = os.path.split(fileName) while i <= MAX_LEVELS: path = os.path.join(dirPath, fileName) if os.path.exists(path): return path dirPath = os.path.join(dirPath, "..") i += 1 return path
350622e51187284eaadd1ed0a588fa0efaf181d3
11,118
import argparse import os def get_args(): """Get arguments.""" parser = argparse.ArgumentParser(description=""" This application translates your articles on Qiita into English ones with googletrans, and upload them to Qiita automatically. Requirements: googletrans, Qiita access token(Set as Environment variable QIITA_ACCESS_TOKEN) """) parser.add_argument('--gist', action = 'store_true', help = 'upload translated article to gist') parser.add_argument('--tweet', action = 'store_true', help = 'tweet about translated article') parser.add_argument('--private', action = 'store_true', help = 'set publish format to private') parser.add_argument('--auto', action = 'store_true', help = 'execute translation and upload automatically') parser.add_argument('--token', default = os.getenv('QIITA_ACCESS_TOKEN'), help = 'set Qiita\'s access token') args = parser.parse_args() return args
117e638e7ad542db5a89a6e100f3e70848b4c3f3
11,120
def _convert_line(line): """ Parameters ---------- line: str Returns ------- list """ line = line.upper().split() tmp = [] for i in line: if '.' in i: try: tmp.append(float(i)) except: tmp.append(i) else: try: tmp.append(int(i)) except: tmp.append(i) return tmp
179b4ea803160679399195da15f147d2ff311d5f
11,121
def word_list_to_string(word_list, delimeter=" "): """Creates a single string from a list of strings This function can be used to combine words in a list into one long sentence string. Args: word_list (list/tuple): A list (or other container) of strings. delimeter (str, Optional): A string to delimit the strings in the list when combining the strings. Returns: A string. """ string = "" for word in word_list: string+=word+delimeter nchar = len(string) return str(string[0:nchar-1])
040479df7e0d5aadda0b12dc944a53d4b380f044
11,122
def is_s3(url: str) -> bool: """Predicate to determine if a url is an S3 endpoint.""" return url is not None and url.lower().startswith('s3')
f1e36654ae86057fb4ae73a90648095119f1b5af
11,123
def C2K(degC): """degC -> degK""" return degC+273.15
877f52078bd0da13cd21a8665a6c89cc0fa90848
11,124
import sys import os def find_exe(name): """Finds an executable first in the virtualenv if available, otherwise falls back to the global name. """ if hasattr(sys, 'real_prefix'): path = os.path.join(sys.prefix, 'bin', name) if os.path.isfile(path): return path return name
57837a0dd5a329441b5f7ab717c94a998d3fe9a7
11,125
def cfn_context(): """ Context object, blank for now """ return ""
a71bffc39ce00e00236667040feb156bb9a4f9a2
11,126
def hello_world(): """Just an empty route with a string.""" return "Hello there. This route doesn't do anything."
f6dcaa52d51aab51234f51b4f7a46e6e55aeb524
11,128
def dict2tsv(condDict): """Convert a dict into TSV format.""" string = str() for i in condDict: string += i + "\t" + "{%f, %f}" % condDict[i] + "\n" return string
c73f8e3158ade699cc4589d541f05397f559d190
11,129
import os def createSegmentSpecificPath(path, gpPrefix, segment): """ Create a segment specific path for the given gpPrefix and segment @param gpPrefix a string used to prefix directory names @param segment a GpDB value """ return os.path.join(path, '%s%d' % (gpPrefix, segment.getSegmentContentId()))
999ce16e3ce4d3923bd22d871a27655bd64d91af
11,130
def rescale_score_by_abs(score, max_score, min_score): """ Normalize the relevance value (=score), accordingly to the extremal relevance values (max_score and min_score), for visualization with a diverging colormap. i.e. rescale positive relevance to the range [0.5, 1.0], and negative relevance to the range [0.0, 0.5], using the highest absolute relevance for linear interpolation. """ # CASE 1: positive AND negative scores occur -------------------- if max_score > 0 and min_score < 0: if max_score >= abs(min_score): # deepest color is positive if score >= 0: return 0.5 + 0.5 * (score / max_score) else: return 0.5 - 0.5 * (abs(score) / max_score) else: # deepest color is negative if score >= 0: return 0.5 + 0.5 * (score / abs(min_score)) else: return 0.5 - 0.5 * (score / min_score) # CASE 2: ONLY positive scores occur ----------------------------- elif max_score > 0 and min_score >= 0: if max_score == min_score: return 1.0 else: return 0.5 + 0.5 * (score / max_score) # CASE 3: ONLY negative scores occur ----------------------------- elif max_score <= 0 and min_score < 0: if max_score == min_score: return 0.0 else: return 0.5 - 0.5 * (score / min_score)
fe1df85166bb6ab34f6f30d06003d7946a92138e
11,131
def calculate_percentile_rank(array, score): """Get a school score's percentile rank from an array of cohort scores.""" true_false_array = [value <= score for value in array] if len(true_false_array) == 0: return raw_rank = float(sum(true_false_array)) / len(true_false_array) return int(round(raw_rank * 100))
bfdc64168c10d00c33294bf05851982e0712e230
11,132
def _one_recursive_step( list_pair, size_task, current_doubled_size_task): """ """ i = 0 j = 0 a_list = list_pair[:size_task] b_list = list_pair[size_task:] c_list = [] for k in range(current_doubled_size_task): #print 'i',i, 'j', j, 'size_task', size_task # Защита от выхода за границы if i > size_task - 1: c_list.append(b_list[j]) continue if j > size_task - 1: c_list.append(a_list[i]) continue # Штатное сравнение if a_list[i] < b_list[j]: c_list.append(a_list[i]) i += 1 else: c_list.append(b_list[j]) j += 1 return c_list
ae0fe3440b8af38730de80fde4762f3c7fa94623
11,134
def gain_com(exp, num, value): """Change the pmt gain in a job. Return a list with parts for the cam command. """ return [ ("cmd", "adjust"), ("tar", "pmt"), ("num", str(num)), ("exp", str(exp)), ("prop", "gain"), ("value", str(value)), ]
d0bd65b62c4ef0f9f002cef9b929ec37a725389c
11,136
def root(): """Returns hola perro.""" return 'Welcome'
c83c0d159cbabdb82905595b01058d531a03ddd1
11,140
import re def deduce_look_back(in_features, target_features): """From the feature names, determine how large of a look back is used. Args: in_features (list of str): Names of input features target_features (list of str): Names of target features. Returns: int: Number of look back features. int: Look back value. """ def is_shared(target_feature): for in_feature in in_features: if re.match(re.escape(target_feature) + r'\d+$', in_feature): return True return False shared_features = list(filter(is_shared, target_features)) if len(shared_features) == 0: return 0, None look_backs = [] for shared_feature in shared_features: look_backs.append(0) for in_feature in in_features: if re.match(re.escape(shared_feature) + r'\d+$', in_feature): look_backs[-1] += 1 if look_backs.count(look_backs[0]) != len(look_backs): raise ValueError('Inconsistent look back.') return len(look_backs), look_backs[0]
bae20baec986c888acfff159b491635e2e75a455
11,142
def round_based_player(*bot_moves): """ A Player which makes a decision dependent on the round index in a dict or list. (Or anything which responds to moves[idx].) Parameters ---------- moves : list or dict of moves the moves to make, a move is determined by moves[round] """ def move(bot, state): try: next_move = bot_moves[bot.turn][bot.round] return (bot.position[0] + next_move[0], bot.position[1] + next_move[1]) except (IndexError, KeyError): return bot.position return move
2c3e57aae7d6a89c76f735bd7a267ff965d67256
11,143
import argparse def _error_rate_arg(val): """Validates the error_rate for the arg parser""" try: val = float(val) except ValueError: raise argparse.ArgumentTypeError(f"{val} is not a floating-point literal") if val >= 0.0 and val <= 1.0: return val raise argparse.ArgumentTypeError(f"{val} not in range [0.0, 1.0]")
22e985cd3df59746c364316f289b65fe7bc768d2
11,145
def permutations_exact(n, k): """Calculates permutations by integer division. Preferred method for small permutations, but slow on larger ones. Note: no error checking (expects to be called through permutations()) """ product = 1 for i in range(n - k + 1, n + 1): product *= i return product
4244e446aad6b36185575c8c7991bd05984b0796
11,146
def get_group(machine, num): """ """ cores = [] idx = 0 r = 0 while num>0: assert r<machine.get_cores_per_node() cores.append(idx) idx += machine.get_cores_per_node() if idx>=machine.get_num_cores(): r += 1 idx = r num -= 1 return cores
2a295b0dc11cc9e195fc54787b5355e340dce366
11,147
def attachment(url: str, filename="") -> dict: """ Returns a dictionary using the expected dicitonary format for attachments. When creating an attachment, ``url`` is required, and ``filename`` is optional. Airtable will download the file at the given url and keep its own copy of it. All other attachment object properties will be generated server-side soon afterward. Note: Attachment field values muest be **an array of objects**. Usage: >>> table = Table(...) >>> profile_url = "https://myprofile.com/id/profile.jpg >>> rec = table.create({"Profile Photo": [attachment(profile_url)]}) { 'id': 'recZXOZ5gT9vVGHfL', 'fields': { 'attachment': [ { 'id': 'attu6kbaST3wUuNTA', 'url': 'https://aws1.discourse-cdn.com/airtable/original/2X/4/411e4fac00df06a5e316a0585a831549e11d0705.png', 'filename': '411e4fac00df06a5e316a0585a831549e11d0705.png' } ] }, 'createdTime': '2021-08-21T22:28:36.000Z' } """ return {"url": url} if not filename else {"url": url, "filename": filename}
24564ca3e7dfb8cc35242b1d16fb7351fc9576ce
11,148
def NIST_SU(results): """Number of segmentation errors (missed segments and false alarm segments) over number of reference segments. """ assert len(results) == 3 TPs = results[0] FPs = results[1] FNs = results[2] if (FNs + FPs) == 0: return 0.0 return ((FNs + FPs)/(TPs + FNs)) * 100
3c60a612223dc247109d24be45b32739af8587ef
11,150
def pathCountX(stairs: int, X): """number of unique ways to climb N stairs using 1 or 2 steps""" #we've reached the top if stairs == 0: return 1 elif stairs < 0: return 0 else: validSteps = [] for num in X: if stairs >= num: validSteps.append(num) sum = 0 for steps in validSteps: sum += pathCountX(stairs - steps, X) return sum
8ee8bf554f6176b5f666fe33eea51a3ee982deee
11,151
def current_cloud_token(service): """Get the the current state of the account. Args: service: Drive API service instance. Returns: int """ response = service.changes().getStartPageToken().execute() return response.get('startPageToken')
dc2b61c2c0e5548269a6c746cf8dc1d152ec953d
11,152
def sort_stack(stack_object: list) -> list: """ Sorts stack. :param stack_object: stack object, iterable object :return: sorted stack, iterable object """ tmp_stack = [] while stack_object: # complexity check print(f'stack 1') element = stack_object.pop(-1) while tmp_stack and tmp_stack[-1] > element: stack_object.append(tmp_stack.pop()) tmp_stack.append(element) return tmp_stack
ebdca3c263dfdc72bcdb0843b9e6e7d397ba2236
11,155
import torch def calculate_interaction_nominal(genotypes_t, phenotypes_t, interaction_t, residualizer, return_sparse=False, tstat_threshold=None): """ genotypes_t: [num_genotypes x num_samples] phenotypes_t: [num_phenotypes x num_samples] interaction_t: [1 x num_samples] """ ng, ns = genotypes_t.shape nps = phenotypes_t.shape[0] # centered inputs g0_t = genotypes_t - genotypes_t.mean(1, keepdim=True) gi_t = genotypes_t * interaction_t gi0_t = gi_t - gi_t.mean(1, keepdim=True) i0_t = interaction_t - interaction_t.mean() p0_t = phenotypes_t - phenotypes_t.mean(1, keepdim=True) # residualize rows g0_t = residualizer.transform(g0_t, center=False) gi0_t = residualizer.transform(gi0_t, center=False) p0_t = residualizer.transform(p0_t, center=False) i0_t = residualizer.transform(i0_t, center=False) i0_t = i0_t.repeat(ng, 1) # regression (in float; loss of precision may occur in edge cases) X_t = torch.stack([g0_t, i0_t, gi0_t], 2) # ng x ns x 3 Xinv = torch.matmul(torch.transpose(X_t, 1, 2), X_t).inverse() # ng x 3 x 3 # Xinv = tf.linalg.inv(tf.matmul(X_t, X_t, transpose_a=True)) # ng x 3 x 3 # p0_tile_t = tf.tile(tf.expand_dims(p0_t, 0), [ng,1,1]) # ng x np x ns p0_tile_t = p0_t.unsqueeze(0).expand([ng, *p0_t.shape]) # ng x np x ns # calculate b, b_se # [(ng x 3 x 3) x (ng x 3 x ns)] x (ng x ns x np) = (ng x 3 x np) b_t = torch.matmul(torch.matmul(Xinv, torch.transpose(X_t, 1, 2)), torch.transpose(p0_tile_t, 1, 2)) dof = residualizer.dof - 2 if nps==1: r_t = torch.matmul(X_t, b_t).squeeze() - p0_t rss_t = (r_t*r_t).sum(1) b_se_t = torch.sqrt(Xinv[:, torch.eye(3, dtype=torch.uint8).bool()] * rss_t.unsqueeze(1) / dof) b_t = b_t.squeeze(2) # r_t = tf.squeeze(tf.matmul(X_t, b_t)) - p0_t # (ng x ns x 3) x (ng x 3 x 1) # rss_t = tf.reduce_sum(tf.multiply(r_t, r_t), axis=1) # b_se_t = tf.sqrt( tf.matrix_diag_part(Xinv) * tf.expand_dims(rss_t, 1) / dof ) else: # b_t = tf.matmul(p0_tile_t, tf.matmul(Xinv, X_t, transpose_b=True), transpose_b=True) # convert to ng x np x 3?? r_t = torch.matmul(X_t, b_t) - torch.transpose(p0_tile_t, 1, 2) # (ng x ns x np) rss_t = (r_t*r_t).sum(1) # ng x np b_se_t = torch.sqrt(Xinv[:, torch.eye(3, dtype=torch.uint8).bool()].unsqueeze(-1).repeat([1,1,nps]) * rss_t.unsqueeze(1).repeat([1,3,1]) / dof) # b_se_t = tf.sqrt(tf.tile(tf.expand_dims(tf.matrix_diag_part(Xinv), 2), [1,1,nps]) * tf.tile(tf.expand_dims(rss_t, 1), [1,3,1]) / dof) # (ng x 3) -> (ng x 3 x np) tstat_t = (b_t.double() / b_se_t.double()).float() # (ng x 3 x np) # calculate MAF n2 = 2*ns af_t = genotypes_t.sum(1) / n2 ix_t = af_t <= 0.5 maf_t = torch.where(ix_t, af_t, 1 - af_t) # tdist = tfp.distributions.StudentT(np.float64(dof), loc=np.float64(0.0), scale=np.float64(1.0)) if not return_sparse: # calculate pval # pval_t = tf.scalar_mul(2, tdist.cdf(-tf.abs(tstat_t))) # (ng x 3 x np) # calculate MA samples and counts m = genotypes_t > 0.5 a = m.sum(1).int() b = (genotypes_t < 1.5).sum(1).int() ma_samples_t = torch.where(ix_t, a, b) a = (genotypes_t * m.float()).sum(1).round().int() # round for missing/imputed genotypes ma_count_t = torch.where(ix_t, a, n2-a) return tstat_t, b_t, b_se_t, maf_t, ma_samples_t, ma_count_t else: # sparse output tstat_g_t = tstat_t[:,0,:] # genotypes x phenotypes tstat_i_t = tstat_t[:,1,:] tstat_gi_t = tstat_t[:,2,:] m = tstat_gi_t.abs() >= tstat_threshold tstat_g_t = tstat_g_t[m] tstat_i_t = tstat_i_t[m] tstat_gi_t = tstat_gi_t[m] ix = m.nonzero() # indexes: [genotype, phenotype] return tstat_g_t, tstat_i_t, tstat_gi_t, maf_t[ix[:,0]], ix
f3270e7623d8c3d3e6cdd706142b43650d21a7b4
11,156
def parse_default_kv(default, default_dict): """parse a string in form key1=value1;key2=value2,... as used for some template fields Args: default: str, in form 'photo=foto;video=vidéo' default_dict: dict, in form {"photo": "fotos", "video": "vidéos"} with default values Returns: dict in form {"photo": "fotos", "video": "vidéos"} """ default_dict_ = default_dict.copy() if default: defaults = default[0].split(";") for kv in defaults: try: k, v = kv.split("=") k = k.strip() v = v.strip() default_dict_[k] = v except ValueError: pass return default_dict_
4d461589118915cde5461b6b8ea7cd5e5e4d5165
11,157
def prefix_connection_id(connection_id, parent_connection_id): """Used to distinguish connection ids when they have the same id as a parent_connection. """ if not len(connection_id) > len(parent_connection_id): return parent_connection_id + connection_id return connection_id
bdc1f92625a03c6b88dd6c81097aca0522db0929
11,158
import os def read_tree(path): """ Read the directory structure of a path into a dictionary where files are True values, and subdirectories are more dictionaries.""" tree = {} for dirpath, dirnames, filenames in os.walk(path): d = tree for x in os.path.relpath(dirpath, path).split(os.sep): if x == '.': continue d = d[x] for dirname in dirnames: d[dirname] = {} for filename in filenames: d[filename] = True return tree
f340ac968ad3617843c3643f9af58ef0738ab72c
11,159
import os def idl_basename(f): """returns the base name of a file with the last extension stripped""" return os.path.basename(f).rpartition('.')[0]
201756c58254c1ac70de136289a23917e86718be
11,160
import re def extract_intro_and_title(filename, docstring): """ Extract the first paragraph of module-level docstring. max:95 char""" # lstrip is just in case docstring has a '\n\n' at the beginning paragraphs = docstring.lstrip().split('\n\n') # remove comments and other syntax like `.. _link:` paragraphs = [p for p in paragraphs if not p.startswith('.. ')] if len(paragraphs) <= 1: raise ValueError( "Example docstring should have a header for the example title " "and at least a paragraph explaining what the example is about. " "Please check the example file:\n {}\n".format(filename)) # Title is the first paragraph with any ReSTructuredText title chars # removed, i.e. lines that consist of (all the same) 7-bit non-ASCII chars. # This conditional is not perfect but should hopefully be good enough. title = paragraphs[0].strip().split('\n') title = ' '.join(t for t in title if len(t) > 0 and (ord(t[0]) >= 128 or t[0].isalnum())) # Concatenate all lines of the first paragraph and truncate at 95 chars first_paragraph = re.sub('\n', ' ', paragraphs[1]) first_paragraph = (first_paragraph[:95] + '...' if len(first_paragraph) > 95 else first_paragraph) return first_paragraph, title
f1ae63f112f0c4ff91ba8863313b9bd978e16eb4
11,162
import os def get_html_directory(): """ Returns directory for raw HTML to scrape. """ return os.path.join(os.path.dirname(__file__), 'html')
9c34953712760652f42d7f5700fd9c5966061012
11,164
def is_on(S, j): """ Returns 1 if and only if the `j`-th item of the set `S` is on. Examples ======== Check if the 3-th and then 2-nd item of the set is on: >>> S = 0b101010 >>> is_on(S, 3), is_on(S, 2) (1, 0) """ return (S & (1 << j)) >> j
76034f083372ee2cbe0c711e3a09daf26634550a
11,165
def _get_pil_image_dimensions(pil_image): """Gets the dimensions of the Pillow Image. Args: pil_image: Image. A file in the Pillow Image format. Returns: tuple(int, int). Returns height and width of the image. """ width, height = pil_image.size return height, width
d191ae705df97b1729be2dd03e9a5ff4ddcb4518
11,167
import heapq def solve(n, times): """ 小根堆/优先队列 """ if n == 0: return 0 heap = [] heapq.heappush(heap, times[0][1]) for time in times[1:]: if time[0] >= heap[0]: # 不需要新的客服 heapq.heappop(heap) heapq.heappush(heap, time[1]) print(heap) return len(heap)
064d582ef76dcae4e69b2c037d3fae07fde3b143
11,168
def wrap_list(item): """ Returns an object as a list. If the object is a list, it is returned directly. If it is a tuple or set, it is returned as a list. If it is another object, it is wrapped in a list and returned. """ if item is None: return [] elif isinstance(item, list): return item elif isinstance(item, (tuple, set)): return list(item) else: return [item]
6b2af543af39058f7df28e7d89dbb9231cf2b247
11,169
from typing import Union import zlib def zlib_compress(data: Union[bytes, str]) -> bytes: """ Compress things in a py2/3 safe fashion >>> json_str = '{"test": 1}' >>> blob = zlib_compress(json_str) """ if isinstance(data, str): return zlib.compress(bytes(data, "utf-8")) return zlib.compress(data)
0e7eaf018873ce335b06c4ca4857f9bf8b58864b
11,170
import functools import logging def logdec(func): """A logging decorator wrapping a function with a standard logging mechanism. This decorator wraps a function with a standard logging mechanism, providing the following functionalities: * Logs the calling of the function and the parameters passed to it when called. * If no exception is raised during the function run - returns the result and logs the succesful completion (not the result). * If an exception is raised - logs the exception and returns None. Based on https://medium.com/swlh/add-log-decorators-to-your-python-project-84094f832181 Parameters ---------- func : the function to be wrapped Returns ------- result or None The result of the function if no exception was raised, None otherwise """ @functools.wraps(func) def logged(*args, **kwargs): # Create a list of the positional arguments passed to function args_passed_in_function = [repr(a) for a in args] # Create a list of the keyword arguments kwargs_passed_in_function = [f"{k}={v!r}" for k, v in kwargs.items()] # The lists of positional and keyword arguments is joined together to form final string formatted_arguments = ", ".join( args_passed_in_function + kwargs_passed_in_function ) logger = logging.getLogger(func.__module__) logger.info( f"Starting function {func.__name__} with arguments: ({formatted_arguments})" ) try: result = func(*args, **kwargs) logger.info(f"Succesfully finished function {func.__name__}") return result except Exception as e: # Log exception if occurs in function logger.exception(f"Exception in {func.__name__}: {e}") raise e return logged
fd1ddf475f17758c09d3f44492a8b2ff66cf38de
11,171
def predict(sample, relations): """ Predict entities and interactions of given sample w.r.t set of known relations. :param sample: Dict with `id` and `text` as strings :param relations: Set of relation tuples :return: Sample augmented with extracted entities and interactions. """ text = sample['text'] interactions = [] sample['interactions'] = interactions entities = [] sample['entities'] = entities entity_registry = dict() def find_mentions(entity): start = -1 while True: start = text.find(entity, start + 1) if start < 0: break end = start + len(entity) yield start, end def register_entity(entity): if entity in entity_registry: return entity_registry[entity] idx = len(entities) mentions = list(find_mentions(entity)) entities.append({ 'is_state': False, 'label': 'protein', 'names': { entity: { 'is_mentioned': True, 'mentions': mentions } }, 'is_mentioned': True, 'is_mutant': False }) entity_registry[entity] = idx return idx for a, b in relations: if a not in text or b not in text: continue # As the database is symmetric, omit duplicates if a >= b: continue # Ensure we have entity registered a_idx = register_entity(a) b_idx = register_entity(b) interactions.append({ 'participants': [a_idx, b_idx], 'type': 'bind', 'label': 1 }) return sample
44046de044685609cf6252f2f106a60c2ecf5f7a
11,174
def get_manual_iface(manual_iface, node_class): """ Returns standardized interface dict based on manual_iface """ iface_dict = { "node_id": "manual", "node_uri": None, "node_name": None, "node_addr": None, "node_fqdn": None, "node_class": node_class, "iface_id": "manual", "iface_uri": None, "iface_name": None, "iface_addr": None, "iface_speed": None, } iface_dict.update(manual_iface) return iface_dict
070207a1ba399b660147f0d3cb95419347e0344e
11,175
def travel_cost(): """ Returns the yearly costs of travel, 22£ per week. :return: The cost of travel. """ # There are 52 weeks in a year minus two for Christmas and Summer holidays. return 22 * 50
fa51fba03ab3db9deea7f3e075c392d4c8db6cef
11,176
import string import re def tokenize_count(s: str) -> int: """ Tokenizes the given strings to count the number of words. :param s: :return: number of words """ s = s.translate(str.maketrans('', '', string.punctuation + "„“–")) return len(re.split(r'\W+', s))
c68822f313a2ffcab11edf0c0ce146d758cb8e3f
11,177
def set_idle_override(isUserActive: bool, isScreenUnlocked: bool) -> dict: """Overrides the Idle state. Parameters ---------- isUserActive: bool Mock isUserActive isScreenUnlocked: bool Mock isScreenUnlocked **Experimental** """ return { "method": "Emulation.setIdleOverride", "params": {"isUserActive": isUserActive, "isScreenUnlocked": isScreenUnlocked}, }
21b51d27edef13f66818d8d72583745e6c3449e9
11,178
def format_strings(*strings): """Take an arbitrary number of strings and format them nicely. Returns the nicely formatted string. """ accum_string = "" for str in strings: accum_string = "{0} {1}\n".format(accum_string, str) return accum_string
7785cf052ed04ce3185035b0921e98bd0d952449
11,179
def is_explicitly_rooted(path): """Return whether a relative path is explicitly rooted relative to the cwd, rather than starting off immediately with a file or folder name. It's nice to have paths start with "./" (or "../", "../../", etc.) so, if a user is that explicit, we still find the path in the suffix tree. """ return path.startswith(('../', './')) or path in ('..', '.')
bde26849889ac5c951160e441cdd0c3c60871ab1
11,181
from typing import List def snip_out(file_str:str, start_key:str)->str: """From an anvil.yaml file, snips out only the string you want: the database description.""" good_string:List[str]=[] save_this_one = False for line in file_str.split('\n'): if line.startswith(start_key): good_string.append(line) save_this_one=True elif save_this_one is False: continue elif line[0]==' ' or line[0]=='\t': good_string.append(line) else: save_this_one=False return '\n'.join(good_string)
8e9ebde180fb5ff6faefcbd92629c75f260ce518
11,182
def colour_code_segmentation(image, label_values, array_type): """ Given a 1-channel array of class keys, colour code the segmentation results. # Arguments image: single channel array where each value represents the class key. label_values # Returns Colour coded image for segmentation visualization """ # w = image.shape[0] # h = image.shape[1] # x = np.zeros([w,h,3]) # colour_codes = label_values # for i in range(0, w): # for j in range(0, h): # x[i, j, :] = colour_codes[int(image[i, j])] colour_codes = array_type(label_values) x = colour_codes[image.astype(int)] return x
e9a6e9d0bb705cc17e30899126ca3081369f3a99
11,183
def init_step(idx, cols): """Helper function to find init suffix in a column Parameters ---------- idx: int Index of 'init' column in cols. cols: list[str] List of column names. """ for i in range(idx, len(cols)): if cols[i] != 'init': return 'init-' + cols[i] return None
ec056ef39c56ec9dbc534e70105d55aa9bbf7be5
11,184
def next_nuc(seq, pos, n): """ Returns the nucleotide that is n places from pos in seq. Skips gap symbols. """ i = pos + 1 while i < len(seq): if seq[i] != '-': n -= 1 if n == 0: break i += 1 if i < len(seq) : return seq[i] else : return 'N'
dbe3d204d3399167630cf83c74b0f1742d1c8367
11,186
import numpy def avg_3_op(array_1, array_2, array_3, nodata): """Average 3 arrays. Skip nodata.""" result = numpy.empty_like(array_1) result[:] = nodata valid_mask = ( ~numpy.isclose(array_1, nodata) & ~numpy.isclose(array_2, nodata) & ~numpy.isclose(array_3, nodata)) result[valid_mask] = ( array_1[valid_mask] + array_2[valid_mask] + array_3[valid_mask]) / 3. return result
6e6a47b8e5e7cae065fa933275122d1f9cee7f88
11,187
import argparse def parse_argv(): """Parse sys.argv""" desc = 'Convert phitar yield files to a datagen input file' parser = argparse.ArgumentParser(description=desc) parser.add_argument('--nrg_of_int', required=True, help='beam energy of interest') parser.add_argument('-o', '--out', required=True, help='output file to become a datagen input file') parser.add_argument('file', nargs='+', help='ordered list of phitar yield files') return parser.parse_args()
caa54cdf2df15a7ce3b9bd896939b4ade2706f5d
11,188
def _mt_repr(self): """Print mutable tuple like dict.""" return '{{{0}}}'.format(', '.join('\'{0}\': {1!r}'.format(name, getattr(self, name)) for name in self._fields))
bc01cb22d8263ce7df77e90214d587f0553627a7
11,189
def get_k_set_interval(vec: str, k: int, to_counter_plus_one: bool = False) -> tuple: """return tupl Parameters ---------- vec : str String of {0,1}*. k : int ???????????????????? ???????????????????? ## to_counter_plus_one : bool ## If True, assume vec beggins in 1. Returns ------- tuple of integers. (start, end) vec[start - 1] == the index of the "k"'th set bit in vec. vec[end] == the index of the "k + 1"'th set bit in vec. ## Remark: If "to_counter_plus_one" == True, then: ## vec[start - 1] == min{the index of the [k-1]'th set bit in vec, 0} ## vec[end] == the index of the "k"'th set bit in vec. """ assert(k >= 0) assert set(vec) <= {"1", "0"} # to_minus = -int(to_counter_plus_one) set_bit_counter = 0 continue_from_index = 0 start_index = -1 end_index = -1 for i in range(len(vec)): if set_bit_counter == k: start_index = i continue_from_index = i break if vec[i] == '1': set_bit_counter += 1 for i in range(continue_from_index, len(vec)): if vec[i] == '1': return start_index, i assert False
ac0dcfe0e9af6dae77942ba7beb682c9d52b57f0
11,191
def load_h5(h5f): """ Load fiberbundles configurations from a hdf5 class Parameters ---------- h5f : hdf5 class h5-file or group object Returns ------- res : list(list(fiber)), fibers are (n,4)-arrays with (x,y,z,radii) for each fiber point """ fiber_bundles = [] fb_list = list(map(int, list(h5f.keys()))) fb_list.sort() for fb in fb_list: fiber_bundles.append([]) f_list = list(map(int, list(h5f[str(fb)].keys()))) f_list.sort() for f in f_list: fiber_bundles[-1].append(h5f[str(fb)][str(f)][:].astype(float)) return fiber_bundles
47487b43ae375c5ade27c82ec083570ee9655e27
11,195
def fizz_buzzable(num): """ We're only interested in valid fizz buzz numbers. """ return num % 3 == 0 or num % 5 == 0
4eba37245525ead66f4f30370d9e3d97c05aca55
11,196
from typing import List def load_file(file_path: str) -> List[str]: """ Just loader for file with lines :param file_path: path to file :return: list of lines of your data """ data: List[str] = list() with open(file_path) as file_object: for line in file_object: data.append(line.strip()) return data
9aadf5c0a90f5c65868862e4366276c448077944
11,198
def uniquify_tablewidgetitems(a): """ Eliminates duplicate list entries in a list of TableWidgetItems. It relies on the row property being available for comparison. """ if (len(a) == 0): tmp = [] else: tmp = [a[0]] # XXX: we need to compare row #s because # PySide doesn't allow equality comparison for QTableWidgetItems tmp_rows = set() tmp_rows.add(a[0].row()) for i in range(1,len(a)): if (a[i].row() not in tmp_rows): tmp.append(a[i]) tmp_rows.add(a[i].row()) return tmp
942eb3fb5e172185fe7f857f5e6e5e523984b75c
11,199
import random def random_colors(*commands): """From tuple of commands, generate random but unique colors.""" colors = ["blue", "green", "red", "yellow", "magenta", "cyan", "white"] num_colors = len(colors) num_commands = len(commands) if num_commands >= num_colors: colors += colors unique_colors = random.sample(colors, num_commands) commands_fmt = {} for i, cmd in enumerate(commands): commands_fmt.update({cmd: {"fg": unique_colors[i], "bold": True}}) commands_fmt.update({"--help": {"fg": "white"}}) return commands_fmt
2f55ce480a8550d94b595b94017371e525b8a8b4
11,200
def greeting() -> str: """The standard greeting. >>> assert greeting() == 'hello' """ return 'hello'
eba4ada1f6dd154dc8197bce0c0128fea3d7e31e
11,201
import random def mutation_chromossome(chromossomes, mutation_function, mutation_rate): """ :param: chromossomes - :param: mutation_function - :param: mutation_rate - :return: chromossomes - """ number_chromossomes_to_mutate = int(len(chromossomes) * mutation_rate) random.shuffle(chromossomes) for i in range (0,number_chromossomes_to_mutate): chromossomes[i] = mutation_function(chromossomes[i]) return chromossomes
30a9a92724a91e70d10ba5c35fef0e1aef85300d
11,203
def pcmd(progressive, lr, fb, vv, va): """ Makes the drone move (translate/rotate). Parameters: progressive -- True: enable progressive commands, False: disable (i.e. enable hovering mode) lr -- left-right tilt: float [-1..1] negative: left, positive: right rb -- front-back tilt: float [-1..1] negative: forwards, positive: backwards vv -- vertical speed: float [-1..1] negative: go down, positive: rise va -- angular speed: float [-1..1] negative: spin left, positive: spin right The above float values are a percentage of the maximum speed. """ assert type(progressive) == bool assert all(map(lambda x: abs(x) <= 1, (lr, fb, vv, va))) return progressive, float(lr), float(fb), float(vv), float(va)
4534ac48f00a39c944b1be01ea0818235aea2559
11,204
def b2h(n): """Convert bytes int into human friendly string format. >>> b2h(10000) '9.8 KB' >>> b2h(100001221) '95.4 MB' """ t = "{0:.2f} {1}".format symbols = ("KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB") prefix = {s: 1 << (i + 1) * 10 for i, s in enumerate(symbols)} for s in reversed(symbols): if n >= prefix[s]: value = float(n) / prefix[s] return t(value, s) return t(n, "B")
6337fa1d1c7a2e324bcbe99eac28225551f84ef5
11,205
import numpy as np def f_calculate_thresholds(combined_uncertainties, phi=0.95): """ Calculates EDMF thresholds for provided target reliability of identification Developed by : Sai G.S. Pai (ETH Singapore) Contact : saiganesh89@gmail.com Date: June 30, 2020 INPUTS: combined_uncertainties : numpy array with each column containing combined uncertainty samples for each measurement point phi : target reliability of identification OUTPUTS: thresholds : numpy array with each row containing lower and upper bound uncertainty threshold values for each measurement point NOTE: Requires numpy """ # set phi phi = float(phi) ** (1 / combined_uncertainties.shape[1]) # float to ensure value is numeric not list print("Phi = ", phi) # Check Phi # Initialize search step_size = 1 length = int((1 - phi) * combined_uncertainties.shape[0] / step_size) perc_calculation = np.zeros(shape=(length, 3)) thresholds = np.zeros(shape=(combined_uncertainties.shape[1], 2)) print("*** Starting search for thresholds ***") for sens_num in range(0, combined_uncertainties.shape[1]): temp_array = np.sort(combined_uncertainties[:, sens_num]) # sort samples in ascending order for iter_num in range(0, length): temp = np.zeros(shape=(1, 3)) endA_init_samples = np.arange(0, 1 + (iter_num * step_size)) # end A is for the lower bound endB_init_samples = np.arange(len(endA_init_samples), len(endA_init_samples) + np.around(phi * len(temp_array)), dtype=int) # end B is for the upper bound temp[0, 0] = np.max(temp_array[endA_init_samples]) temp[0, 1] = np.max(temp_array[endB_init_samples]) # calculate percentile range for each step temp[0, 2] = np.max(temp_array[endB_init_samples]) - np.max(temp_array[endA_init_samples]) perc_calculation[iter_num, :] = temp threshold_idx = np.where(perc_calculation[:, 2] == np.amin(perc_calculation[:, 2])) # get index of lowest percentile range # EDMF thresholds corresponding to lowest percentile range thresholds[sens_num, :] = perc_calculation[threshold_idx, [0, 1]] print(thresholds) return thresholds # numpy array with size number_measurements x 2
b82feca1823c2bd772f1f6ba06a6053f6d30ad15
11,208
def addToSession(session, flaskform): """ Function used in the hiringmanger tab. It must keep backward compatibility with the fields named commentOne..., commentTwo..., commentThree..., commentFour... since the DB was built in this way at first. """ if 'evaluatorName' in flaskform: session['evaluatorName'] = flaskform.get('evaluatorName').strip() session['userID'] = flaskform.get('userID').strip() session['intervieweeFirstName'] = flaskform.get('intervieweeFirstName').strip() session['intervieweeLastName'] = flaskform.get('intervieweeLastName').strip() session['intervieweeRole'] = flaskform.get('intervieweeRole').strip() session['interviewDate'] = flaskform.get('interviewDate').strip() session['commentGeneral'] = flaskform.get('commentGeneral').strip() session['overallScore'] = flaskform.get('slider') elif 'commentOneCognitive' in flaskform: session['commentOneCognitive'] = flaskform.get('commentOneCognitive').strip() session['commentTwoCognitive'] = flaskform.get('commentTwoCognitive', '').strip() session['commentThreeCognitive'] = flaskform.get('commentThreeCognitive', '').strip() session['commentFourCognitive'] = flaskform.get('commentFourCognitive', '').strip() session['cognitiveScore'] = flaskform.get('slider') elif 'commentOneRoleRelated' in flaskform: session['commentOneRoleRelated'] = flaskform.get('commentOneRoleRelated').strip() session['commentTwoRoleRelated'] = flaskform.get('commentTwoRoleRelated', '').strip() session['commentThreeRoleRelated'] = flaskform.get('commentThreeRoleRelated', '').strip() session['commentFourRoleRelated'] = flaskform.get('commentFourRoleRelated', '').strip() session['rolerelatedScore'] = flaskform.get('slider') elif 'commentOneCoolness' in flaskform: session['commentOneCoolness'] = flaskform.get('commentOneCoolness').strip() session['commentTwoCoolness'] = flaskform.get('commentTwoCoolness', '').strip() session['commentThreeCoolness'] = flaskform.get('commentThreeCoolness', '').strip() session['commentFourCoolness'] = flaskform.get('commentFourCoolness', '').strip() session['coolnessScore'] = flaskform.get('slider') elif 'commentOneLeadership' in flaskform: session['commentOneLeadership'] = flaskform.get('commentOneLeadership').strip() session['commentTwoLeadership'] = flaskform.get('commentTwoLeadership', '').strip() session['commentThreeLeadership'] = flaskform.get('commentThreeLeadership', '').strip() session['commentFourLeadership'] = flaskform.get('commentFourLeadership', '').strip() session['leadershipScore'] = flaskform.get('slider') return session
8ecaf782373730efa6c251a4354de4355cf31884
11,210
import os import errno def makedirs(directory): """ Create a directory and any missing parent directories. It is not an error if the directory already exists. :param directory: The pathname of a directory (a string). :returns: :data:`True` if the directory was created, :data:`False` if it already exists. """ try: os.makedirs(directory) return True except OSError as e: if e.errno == errno.EEXIST: return False else: raise
b5eecd5e27b086f8d5619802a205c85ae697980c
11,211
def check_legal_moves(board): """Return the legal moves on the board. """ return board[:, :, 0] == board[:, :, 1]
a8643c6cd06722b0110a09d8c12f34a20f03a31d
11,212
from typing import List from typing import Tuple def plotlines( optical_start: List[float], ratio_wanted: List[float] ) -> Tuple[List[float], List[float], List[float]]: """Draws the 'Distance Lines' for the main plot. Takes inputs of desired optical and radio limits of where the distance lines should be drawn on the plot. Args: optical_start: The optical flux values where the lines should start from. radio_wanted: The desired radio fluxes where the lines should end. Return: The optical points to plot along with the radio range to plot. Also returns the radio wanted (I can't remember why!). """ radio_range = [] radio_range.reverse() optical_points = [] OFLUX = optical_start optical_points.append(OFLUX) this_radio = optical_points[0] * ratio_wanted radio_range.append(this_radio) while this_radio < 99999.: this_radio *= 10. NEWOFLUX = this_radio / ratio_wanted optical_points.append(NEWOFLUX) radio_range.append(this_radio) return optical_points, radio_range, ratio_wanted
5c2b83b881c8b8101a5177709096c79dfce8c16c
11,214
import tempfile import os import yaml def save(config): """Save a clang config to a new file and returns the name of the file.""" (fd, name) = tempfile.mkstemp() f = os.fdopen(fd, "a") f.write(yaml.dump(config)) f.close() return name
39651cfc9e60547469fddebf7424e3ee9aa4e84b
11,215
def get_unique_fields(fld_lists): """Get unique namedtuple fields, despite potential duplicates in lists of fields.""" flds = [] fld_set = set([f for flst in fld_lists for f in flst]) fld_seen = set() # Add unique fields to list of fields in order that they appear for fld_list in fld_lists: for fld in fld_list: # Add fields if the field has not yet been seen if fld not in fld_seen: flds.append(fld) fld_seen.add(fld) assert len(flds) == len(fld_set) return flds
0e131c5b3fe695670fafb51810c674e859c29b63
11,217
def cmp_func_different_hash(request): """Return a comparison function that checks whether two hashes are different.""" return request.param
5e917de2db60c03d17fc5536e5af48e0328423fc
11,218
def winner(board): """This function accepts the Connect Four board as a parameter. If there is no winner, the function will return the empty string "". If the user has won, it will return 'X', and if the computer has won it will return 'O'.""" for row in range(7): count = 0 last = '' for col in range(7): row_win = board[row][col] if row_win == " ": count = 0 continue if row_win == last: count += 1 else: count = 1 if count >= 4: return row_win last = row_win for col in range(7): count = 0 last = '' for row in range(7): col_win = board[row][col] if col_win == " ": count = 0 continue if col_win == last: count += 1 else: count = 1 if count >= 4: return col_win last = col_win # No winner: return the empty string return ""
7c7983ec33cf6aca89283a2c09b9fabf2045f1b9
11,219
import torch def picp(target, predictions:list, total = True): """ Calculate PICP (prediction interval coverage probability) or simply the % of true values in the predicted intervals Parameters ---------- target : torch.Tensor true values of the target variable predictions : list - predictions[0] = y_pred_upper, predicted upper limit of the target variable (torch.Tensor) - predictions[1] = y_pred_lower, predicted lower limit of the target variable (torch.Tensor) total : bool, default = True - When total is set to True, return overall PICP - When total is set to False, return PICP along the horizon Returns ------- torch.Tensor The PICP, which depending on the value of 'total' is either a scalar (PICP in %, for significance level alpha = 0.05, PICP should >= 95%) or 1d-array over the horizon, in which case it is expected to decrease as we move along the horizon. Generally, higher is better. """ # coverage_horizon = torch.zeros(targets.shape[1], device= targets.device,requires_grad=True) # for i in range(targets.shape[1]): # # for each step in forecast horizon, calcualte the % of true values in the predicted interval # coverage_horizon[i] = (torch.sum((targets[:, i] > y_pred_lower[:, i]) & # (targets[:, i] <= y_pred_upper[:, i])) / targets.shape[0]) * 100 assert len(predictions) == 2 #torch.set_printoptions(precision=5) y_pred_upper = predictions[0] y_pred_lower = predictions[1] coverage_horizon = 100. * (torch.sum((target > y_pred_lower) & (target <= y_pred_upper), dim=0)) / target.shape[0] coverage_total = torch.sum(coverage_horizon) / target.shape[1] if total: return coverage_total else: return coverage_horizon
ef075b4cf3904a5f854ab4db3ecfbc6ba66ad674
11,220
import io import yaml import hashlib def compute_parse_tree_hash(tree): """Given a parse tree, compute a consistent hash value for it.""" if tree: r = tree.as_record(code_only=True, show_raw=True) if r: r_io = io.StringIO() yaml.dump(r, r_io, sort_keys=False) result = hashlib.blake2s(r_io.getvalue().encode("utf-8")).hexdigest() return result return None
38f5af485d6f008d46b8b4165096f30e5b004d0b
11,221
def knot_vector_from_params(degree, params, periodic=False): """Computes a knot vector from parameters using the averaging method. Please refer to the Equation 9.8 on The NURBS Book (2nd Edition), pp.365 for details. Parameters ---------- degree : int The degree of the curve params : list of float Parameters on the curve in the range of [0, 1]. Returns ------- list of float The knot vector. Notes ----- Is the same as geomdl.fitting.compute_knot_vector """ kv = [0.0 for _ in range(degree + 1)] for j in range(1, len(params) - degree): v = (1.0 / degree) * sum([params[j] for j in range(j, j + degree)]) kv.append(v) kv += [1.0 for _ in range(degree + 1)] return kv
059463258011eaa4f76dfd8c640b62aa663d8226
11,223
import re def time_to_frame(line: str, fps=24000 / 1001) -> int: """ Converts a timestamp in the format <hours>:<minutes>:<seconds>.<milliseconds> into the corresponding frame number. <hours> and <milliseconds> are optional, and milliseconds can have arbitrary precision (which means they are no longer milliseconds :thonking: ). A parameter for the framerate can be passed if required. Valid example inputs: '1:23.456', '01:10:00', '0:00:00.000', '24:30.2' """ timestamp = re.match(r'(\d{1,2}:)?\d{1,2}:\d{1,2}(\.\d{1,3})', line) if not timestamp: return -1 times = timestamp.group(0).split(':') if '.' in times[-1]: # milliseconds are specified times[-1], ms = times[-1].split('.') frame = fps * (int(ms) / 10**len(ms)) else: frame = 0 for t in reversed(times): frame += (fps * int(t)) fps *= 60 return round(frame)
0942ce5526592eeb8c6a858631382530ad2dccbd
11,224
import torch def get_test_dict(idx): """ Get FRCNN style dict """ num_objs = 0 boxes = torch.zeros((num_objs, 4), dtype=torch.float32) return {'boxes': boxes, 'labels': torch.ones((num_objs,), dtype=torch.int64), 'image_id': torch.tensor([idx]), 'area': (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0]), 'iscrowd': torch.zeros((num_objs,), dtype=torch.int64), 'visibilities': torch.zeros((num_objs), dtype=torch.float32)}
bd7e37499c30b9bd23d9b99ed187795fb9e5e6e1
11,225
import os def here(file_name): """ Get the given file name relative to the working directory """ return os.path.abspath(os.path.join(os.path.dirname(__file__), file_name))
51015b8e6f7443c3980a23b7ce0a27d9b5916748
11,227
from typing import List def checksum(rows: List[List[int]]) -> int: """ Solves the AOC second puzzle. """ check = 0 for row in rows: minimum = min(row) maximum = max(row) check += maximum - minimum return check
e95ff4418899d1f09671aac83feb4f5d35072808
11,228
def file_order(entry): """ For a PlaylistEntry, return its original order in the Playlist. """ return entry['lineno']
8bbd6546e120cec018c0f7628fd1473ae5926dad
11,229