content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import subprocess def executeCommand(cmdName, params="", showSpinner=True): """Executes the given cmdName command and returns its output""" myProc = subprocess.Popen(cmdName + " " + params, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) stdout, stderr = myProc.communicate() if len(str(stdout)) > len(str(stderr)): return stdout.decode() else: return stderr.decode()
9c639fb157a0542829bb39aa70aae5f95e3eb635
13,250
def _default_filter(line): """Default bad header filter. Filters out lines that start with ">From" or "From " (no colon). """ line = line.strip() return (False if line.startswith('>From') or line.startswith('From ') else True)
4867ba376914d7d688038c8471f09405b0a17c24
13,253
def extract_number_missing(data, min_valid, drop_cols=['sample'], group='group'): """ Counts how many valid values exist in each column and filters column labels with more valid values than the minimum threshold defined. :param data: pandas DataFrame with group as rows and protein identifier as column. :param str group: column label containing group identifiers. If None, number of valid values is counted across all samples, otherwise is counted per unique group identifier. :param int min_valid: minimum number of valid values to be filtered. :param list drop_columns: column labels to be dropped. :return: List of column labels above the threshold. Example:: result = extract_number_missing(data, min_valid=3, drop_cols=['sample'], group='group') """ if group is None: groups = data.loc[:, data.notnull().sum(axis=0) >= min_valid] else: groups = data.copy() groups = groups.drop(drop_cols, axis=1) groups = groups.set_index(group).notnull().groupby(level=0).sum(axis=1) groups = groups[groups >= min_valid] groups = groups.dropna(how='all', axis=1) return groups.columns.unique().tolist()
bd1762374b978a9470cef1fa301bc32b996ad5e7
13,254
import numpy def truncate_curve(x, y, length): """Truncate a curve to target a given length.""" # Compute cumulative sum of the segments length. segment_lengths = numpy.sqrt((x[1:] - x[:-1])**2 + (y[1:] - y[:-1])**2) cumul = numpy.cumsum(numpy.insert(segment_lengths, 0, 0.0)) # Find the index of the point just before reaching the distance. idx = numpy.where(cumul < length)[0][-1] # Interpolate the point between indices `idx` and `idx + 1`. extra_length = length - cumul[idx] # remainder segment_length = cumul[idx + 1] - cumul[idx] alpha = extra_length / segment_length xi = x[idx] + alpha * (x[idx + 1] - x[idx]) yi = y[idx] + alpha * (y[idx + 1] - y[idx]) # Keep the section of interest. xs = numpy.append(x[:idx + 1], xi) ys = numpy.append(y[:idx + 1], yi) return xs, ys
f64cca96b8646dc94a3dbc745241ef0e427a8c2a
13,256
import torch def dot(input_, other): """Wrapper of `torch.dot`. Parameters ---------- input_ : DTensor The first operand. other : DTensor The second operand. """ if input_._data.ndim == 1 and other._data.ndim == 1: return torch.dot(input_._data, other._data) return torch.matmul(input_._data, other._data)
e02d6215764f88263912d69720acb6a8a103ed49
13,257
def parse_starlog(path_to_log): """Parse star logfile into a dict. Args: path_to_log: filepath containing the star run log Returns: dict that contains metrics from star log """ qc_dict = {} with open(path_to_log) as f: for line in f: if "|" in line: tokens = line.split("|") qc_dict[tokens[0].strip()] = tokens[1].strip() return qc_dict
ced4f91846a4c658c1801eeeb75430cdecafb32d
13,258
def addBinaryB(a, b): """ :type a: str :type b: str :rtype: str """ a_decimal=0 b_decimal=0 for i,num in enumerate(a): if num == '1': a_decimal+=pow(2,len(a)-i-1) for x,num in enumerate(b): if num == '1': b_decimal+=pow(2,len(b)-x-1) result=a_decimal+b_decimal if result == 0: return "0" last="" while(result>=1): last+=str(result%2) result=result//2 return last[::-1]
099167e14b4889ce0c08e5934acd2124de91145c
13,259
def get_universe_id_from_script(universe_script): """ Get the id of a universe given the universe script """ return int(universe_script.split('.')[0].split('_')[1])
a714d1de25f4972c1cccd6c70453d01e2938af2b
13,260
def _cytomine_parameter_name_synonyms(name, prefix="--"): """For a given parameter name, returns all the possible usual synonym (and the parameter itself). Optionally, the function can prepend a string to the found names. If a parameters has no known synonyms, the function returns only the prefixed $name. Parameters ---------- name: str Parameter based on which synonyms must searched for prefix: str The prefix Returns ------- names: str List of prefixed parameter names containing at least $name (preprended with $prefix). """ synonyms = [ ["host", "cytomine_host"], ["public_key", "publicKey", "cytomine_public_key"], ["private_key", "privateKey", "cytomine_private_key"], ["base_path", "basePath", "cytomine_base_path"], ["id_software", "cytomine_software_id", "cytomine_id_software", "idSoftware", "software_id"], ["id_project", "cytomine_project_id", "cytomine_id_project", "idProject", "project_id"] ] synonyms_dict = {params[i]: params[:i] + params[(i + 1):] for params in synonyms for i in range(len(params))} if name not in synonyms_dict: return [prefix + name] return [prefix + n for n in ([name] + synonyms_dict[name])]
4d8d58571b03182cf31546c370fda81e2caf5800
13,262
def pos(number: int) -> int: """Used for indexing.""" return 0 if number < 0 else number
5b4745f067855234fab4f0eb9bc3e8a10d324341
13,263
def read_prot(filepath): """ This function accepts the filepath of a protein to align, ignores the first line (proceeded by '>' char), strips newlines, and returns the protein as a single string. """ seq = "" with open(filepath) as f: for line in f: if not line.startswith(">"): seq += (line.rstrip()) return seq
e09fab92fca28601d5c92d927ceddccaaf1f5569
13,264
import os import json def get_json_prices(json_path): """ Loading saved the first price all time and last four price for next calculations or analyze. """ if not os.path.exists(json_path): saved_prices = [] with open(json_path, mode="w")as json_file: json.dump(saved_prices, json_file) return saved_prices else: with open(json_path, mode="r")as json_file: data = json.load(json_file) return data
1e09db1c64f2d81bf9c038e165234ad718fdcfab
13,267
def _create_table_query(table_name: str) -> str: """Build SQL query to create metrics table.""" return ( f"CREATE TABLE IF NOT EXISTS {table_name}(" f" id SERIAL PRIMARY KEY," f" page_url TEXT," f" http_code SMALLINT," f" response_time INT," f" timestamp TIMESTAMPTZ" f")" )
427bf61a475c0f012b0242880400d1573ae8bbd1
13,268
def count_instance_attributes(listInst): """Return the number of attributes across all instances, or None if the instances differ in the number of attributes they contain. >>> listInst = [Instance([1,2,3],True), Instance([4,5,6],False)] >>> count_instance_attributes(listInst) 3 >>> count_instance_attributes([Instance([1,2],True),Instance([3],False)]) """ countAttr = len(listInst[0].listAttrs) for inst in listInst: if countAttr != len(inst.listAttrs): return None return countAttr
8fa925d9900973a1e535e7b5a33d2f5fc1f89009
13,269
def basis_string_pn(basis): """ basis_string_pn(basis) -> name Takes basis specified as tuple (basis_name_p, beta_p, basis_name_n, beta_n) and converts to string for use in filename. """ return "%s-%.3f-%s-%.3f" % basis
3c57bc21314dc944f3025b5e992ec9f6dcb0c1dd
13,270
def to_binary(df, variable_names): """ Recode specified variables of dataframe to binary; any positive value is set to 1 and all other values are set to 0. This replaces the existing column(s). Parameters ---------- df : pandas DataFrame dataframe containing variables to be recoded variable_names : dict list of variable names to recode to binary Returns ------- recoded_df : pandas DataFrame dataframe containing new variables with coding reversed """ recoded_df = df.copy() recoded_df[variable_names] = ( recoded_df[variable_names] .astype(bool) .astype("int64") ) return recoded_df
604b5a84a7ade73b9e569fa4d238f32e9039acee
13,271
def get_http_body(http_request): """Given a HTTP request, return the body.""" return http_request.split("\r\n\r\n")[1]
4a11c97f0eeddac933e8b311d9c1cfda6e61674c
13,273
import numpy def column_period(c:numpy.ndarray, thresh:int=0): """ Guess the periodicity of a column of (image) data Parameters ---------- c : ndarray Column of data (e.g. pixel values) thresh : int Optional threshold (default: 0) Returns ------- p : int (or float) Guessed periodicity """ cc = numpy.zeros(c.size//2) for ck in range(1, cc.size): cc[ck] = numpy.corrcoef(c[:-ck],c[ck:])[0,1] cc[numpy.isnan(cc)] = 0.0 ccc = numpy.zeros(cc.size//2) for ck in range(3, ccc.size): ccc[ck-1] = numpy.corrcoef(cc[1:-ck], cc[ck:-1])[0,1] ccc[numpy.isnan(ccc)] = -1.0 ccs = numpy.argsort(-ccc) ccsv = numpy.median(ccc[ccs[0:3]]) * 0.816 ccsl = numpy.sort(ccs[ccc[ccs]>=ccsv]) while thresh > 0 and len(ccsl) > 1 and ccsl[0] < thresh: ccsl = ccsl[1:] if len(ccsl) == 1: return ccsl[0] while len(ccsl) > 3 and ccsl[0] < ccsl[1] // 3: ccsl = ccsl[1:] ccsy = ccsl[-1] ccsx = ccsl[0] ccsr = ccsy % ccsx if ccsr == 0: return ccsx if ccsx - ccsr < (ccsx // 4): ccsr = ccsx - ccsr if ccsr < (ccsx // 4) and ccsx >= 6 and len(ccsl) > 3: ccst = ccsl.astype(numpy.float64) / float(ccsx) ccsi = numpy.trunc(ccst + 0.5) ccsd = float(ccsx) * (ccst - ccsi) ccsx = float(ccsx) + numpy.sum(ccsd) / numpy.sum(ccsi) return ccsx while ccsy % ccsx != 0: (ccsy, ccsx) = (ccsx, ccsy % ccsx) return ccsx
0f6821832f4e6309da8e8130de27ba9eb7fbf5d4
13,274
def parse_gn_flags(gn_lines): """ Parse lines of GN flags into dictionary """ gn_args = {} for line in gn_lines: name, var = line.strip().partition("=")[::2] gn_args[name.strip()] = var.strip() return gn_args
98d8f91e9defea1484a569deb70fb7cc62b2c743
13,275
def DeleteNode(pListHead, pToBeDeleted): """ 在O(1)时间内删除链表结点 :param pListHead: :param pToBeDeleted: :return: """ if not pListHead or not pToBeDeleted: return None if pToBeDeleted.next_ != None: # 要删除的节点后有节点 pToBeDeleted.val = pToBeDeleted.next_.val pToBeDeleted.next_ = pToBeDeleted.next_.next_ pToBeDeleted.next_.__del__() elif pToBeDeleted == pListHead: # 链表中只有一个节点 pToBeDeleted.__del__() pListHead.__del__() else: # 要删除的节点后面没有节点 tmpNode = pListHead while tmpNode.next_ != pToBeDeleted: tmpNode = tmpNode.next_ tmpNode.next_ = None pToBeDeleted.__del__()
41bfc3ed1c0d6a2eb796e2d4e2170888a907367c
13,277
def forcing_accumulator(temperature): """ The accumulated forcing for each observation and doy in the (obs, doy) array. """ return temperature.cumsum(axis=0)
4813d7cb019001b038ef71c143c9e3db696af9ca
13,278
def floor(x,unit=1): """ Returns greatest multiple of 'unit' below 'x' """ return unit*int(x/unit)
b289bf714eb3ecf23072533a51badeac43743686
13,279
def gen_vault_response_kv1(file_contents): """ Provides the vault response for a given processed configuration file """ def _gen_vault_repsonse(file_contents=file_contents, secret_key=""): if secret_key != "": vault_response = {"data": {secret_key: file_contents}} else: vault_response = {"data": {"file": file_contents}} return vault_response return _gen_vault_repsonse
fa9fdda0ac28d2bea2cea4bb4fd24456c1013c2d
13,280
import functools def firmware_command(fn): """Check if we are in the firmware before running commands that require it.""" @functools.wraps(fn) def _decorated(self, *args, **kwargs): # if self.status != 'firmware': # # Note: we could instead reset_to_firmware here. # raise BlitSerialWrongModeException("32Blit is not in firmware mode. Please exit game or reset.") return fn(self, *args, **kwargs) return _decorated
6941c0a755a208c8fbd7860c716bd2dc6848546f
13,281
import hashlib import io def calculate_file_checksum(path): """ Calculate the MD5 sum for a file: Read chunks of a file and update the hasher. Returns the hex digest of the md5 hash. """ hasher = hashlib.md5() with io.FileIO(path, 'r') as fp: while True: buf = fp.read(65536) if not buf: break hasher.update(buf) return hasher.hexdigest()
6bddfd83a8bc326fcfea8fb69affde45f3cb1dd8
13,282
import torch def get_position(lens, maxlen, left_pad=False): """ transform sequence length to a series of positions. e.g., 3 -> 1,2,3""" batch_size = lens.size(0) pos = torch.zeros((batch_size, maxlen), dtype=torch.long, device=lens.device) for i, input_len in enumerate(lens): if not left_pad: pos[i,:input_len] = torch.arange(1, input_len+1, dtype=torch.long, device=lens.device) else: pos[i,maxlen-input_len:] = torch.arange(1, input_len+1, dtype=torch.long, device=lens.device) return pos
48c2ac5c0bde1f554b3b44c82177945ab2c667ab
13,283
def convert_bxd_name(name: str) -> str: """ the names of BXDs are not consistent across all metadata and VCF files, so we sometimes have to convert their names >>> convert_bxd_name("4512-JFI-0469_BXD100_RwwJ_phased_possorted_bam") "BXD100_RwwJ_0469" >>> convert_bxd_name("4512-JFI-0410_BXD013_TyJ_phased_possorted_bam") "BXD013_TyJ_0410" """ bxd_line_name = '_'.join(name.split('_phased')[0].split('_')[1:]) bxd_line_num = name.split('_phased')[0].split('_')[0].split('-')[-1] bxd_line_new = bxd_line_name + '_' + bxd_line_num return bxd_line_new
577f936d81fdd7a87bd02fec069ff5f09e3b1412
13,284
import torch def makeInp(inputs): """Move tensors onto GPU if available. Args: inputs: A dict with a batch of word-indexed data from DataLoader. Contains ['brk_sentence', 'bs_inp_lengths', 'style', 'sentence', 'st_inp_lengths', 'marker', 'mk_inp_lengths'] Returns: inputs: The dict with same structure but stored on GPU. """ if torch.cuda.is_available(): for key in inputs: inputs[key] = inputs[key].cuda() return inputs
af00b78f33627f45e505a6b8c75fb07ef29b94e6
13,285
import warnings def rename_internal_nodes(tree, names=None, inplace=False): """ Names the internal according to level ordering. The tree will be traversed in level order (i.e. top-down, left to right). If `names` is not specified, the node with the smallest label (y0) will be located at the root of the tree, and the node with the largest label will be located at bottom right corner of the tree. Parameters ---------- tree : skbio.TreeNode Tree object where the leafs correspond to the features. names : list, optional List of labels to rename the tip names. It is assumed that the names are listed in level ordering, and the length of the list is at least as long as the number of internal nodes. inplace : bool, optional Specifies if the operation should be done on the original tree or not. Returns ------- skbio.TreeNode Tree with renamed internal nodes. Raises ------ ValueError: Raised if `tree` and `name` have incompatible sizes. """ if inplace: _tree = tree else: _tree = tree.copy() non_tips = [n for n in _tree.levelorder() if not n.is_tip()] if names is not None and len(non_tips) != len(names): raise ValueError("`_tree` and `names` have incompatible sizes, " "`_tree` has %d tips, `names` has %d elements." % (len(non_tips), len(names))) i = 0 for n in _tree.levelorder(): if not n.is_tip(): if names is None: label = 'y%i' % i else: label = names[i] if n.name is not None and label == n.name: warnings.warn("Warning. Internal node (%s) has been replaced " "with (%s)" % (n.name, label), UserWarning) n.name = label i += 1 return _tree
d5df42023afe184af41d7553b2e1491b09d5edc1
13,286
def headers(mime, length): """Returns a list of HTTP headers given the MIME type and the length of the content, in bytes (in integer or sting format).""" return [('Content-Type', mime), ('Content-Length', str(length))]
da5f73591bf9d4bbc8b1d01f4d6babf0de54ce00
13,287
import os def path_to_resources(): """ Returns the path to the resources folder. """ if os.path.isdir("resources"): return "resources" + os.sep if os.path.isdir("tests") and os.path.isdir("src"): return "tests" + os.sep + "resources" + os.sep raise FileNotFoundError( "Couldn't locate the tests/resources directory. Make sure that " + "the pytest command is run from within the base islatu directory" + ", or from within the tests directory." )
79a4ff91b0cb8a60042c3d828d72ad132cac3df4
13,288
import statistics def run_stats(this_set): """ Find standard deviation and mean (average) for the data set. """ # Simply calculate Mean & StdDev for this set. # Return TUPLE of FLOATS return statistics.mean(this_set), statistics.stdev(this_set)
35ce499654bf7e8fe43d862b5456b0578a093fc7
13,289
def highest_match_action(ic, dc, sc, im, dm, sm, cost): """Given the following values, choose the action (insertion, deletion, or substitution), that results in the highest match score (ties are broken using the distance values). This is used within the dynamic programming algorithm. * ic - insertion cost * dc - deletion cost * sc - substitution cost * im - insertion match (score) * dm - deletion match (score) * sm - substitution match (score) """ # pylint: disable=unused-argument best_action = None lowest_cost = float("inf") max_match = max(im, dm, sm) if max_match == sm and cost == 0: best_action = 'equal' lowest_cost = sm elif max_match == sm and cost == 1: best_action = 'replace' lowest_cost = sm elif max_match == im and ic < lowest_cost: best_action = 'insert' lowest_cost = ic elif max_match == dm and dc < lowest_cost: best_action = 'delete' lowest_cost = dc return best_action
d1d5b47e02ec9df8b65d3856b8ebba70511ca4d4
13,290
def generate_csv_url(sheet_url): """ Utility function for generating csv URL from a google sheets link This function generates a link to a csv file from a link used to edit a google sheets file. The gid must be present in the URL. Parameters ---------- sheet_url : str The URL for the google sheet file Returns ------- str URL for the csv file """ if type(sheet_url) == str: if(sheet_url.find("edit#gid") > -1): return sheet_url.replace("edit#gid", "export?format=csv&gid") else: raise ValueError("sheet_url must contain 'edit#gid' phrase") else: raise TypeError("sheet_url must be a string")
d941ef98f3400175b9db4f7ef5da858fc6426caf
13,291
def byte_array_declaration(data: bytes, name: str) -> str: """Generates a byte C array declaration for a byte array""" type_name = '[[maybe_unused]] const std::byte' byte_str = ''.join([f'std::byte{{0x{b:02x}}},' for b in data]) array_body = f'{{{byte_str}}}' return f'{type_name} {name}[] = {array_body};'
015f0cb1a4dfd94b6f488b4eae0b58860da4d1dc
13,295
import os import csv def ReadTimeStamps(path) : """ Look for timestamps.txt and strobo_timstamps.txt in path Return N, the number of lines and timestamps a list containing all data """ file = os.path.join(path,'timestamps.txt') if not os.path.exists(file) : file = os.path.join(path,'strobo_timestamps.txt') if not os.path.exists(file) : print('timestamps.txt and strobo_timestamps.txt not found') return None, None with open(file) as csvfile: reader = csv.reader(csvfile, delimiter=' ') timestamps = list() for row in reader: timestamps.append(row) N = reader.line_num; return N, timestamps
9c5f9d54fa32b27c330d27255b1ded3f9a92cbf7
13,299
import subprocess def os_run(*command, stdout=True): """ Run given command. :param command: command to run :param stdout: prints output to stdout if true, otherwise - returns output :return: Output or None """ to_run = command[0] test = subprocess.Popen(to_run, stdout=subprocess.PIPE) output = test.communicate()[0].decode("utf-8") if stdout: print("%s" % output) else: return output
46de5d4c5b0ee97dcbdbc2371518e55cca5d33b6
13,300
import os def check_converted(file_name, dst_path): """ :param file_name: filename with full path :param dst_path: tfrecord save path :return: flag of converted or new file """ base_name = file_name.split('/')[-1] base_name = base_name.split('.')[0] dst_file = os.path.join(dst_path, base_name + '.tfrecord') return os.path.exists(dst_file)
a3fa5a068b9a2a1d8e8a6bc4056ce4b816227423
13,301
def shorten(s: str, max_len: int = 60) -> str: """Truncate a long string, appending '...' if a change is made.""" if len(s) < max_len: return s return s[:max_len-3] + '...'
8bafe69253e12a67fdb4c476a2d4b55f6ad4d2af
13,302
import json def elasticsearch_bulk_decorate(bulk_index, bulk_type, msg): """ Decorates the msg with elasticsearch bulk format and adds index and message type""" command = json.dumps({'index': {'_index': bulk_index, '_type': bulk_type}}) return '{0}\n{1}\n'.format(command, msg)
e215ab15b347046b22dcc46317632c68fa9d36c0
13,303
import itertools def get_hp_fold_iterator(hp_grid, num_folds): """ Create an iterator over all combinations of hyperparameters and folds """ hp_grid = list(hp_grid) folds = list(range(num_folds)) hp_fold_it = itertools.product(hp_grid, folds) hp_fold_it = list(hp_fold_it) return hp_fold_it
ea16c3b3867dc87a18f2dc1dc2f59e5a48f7c04d
13,304
import re def make_systematic_name(name): """ Something like AddAppealBaseBonusChangingHpRateMax -> Add appeal base bonus changing hp rate max . Word soup for most enumerated skill names but is a good starting point """ return " ".join(re.findall(r"([A-Z]+[a-z]*)", name)).capitalize()
6bd2b1a78091476456b9e2d95a085032c19d28c4
13,305
def yp_processed_reviews(yelp_username): """ Raw form of reviews that contains ony status and reviews. File Type: CSV """ return '../data/processed/{}.csv'.format(yelp_username)
d4be5addf2f8014ebb096493ce9e037b1c4550ac
13,306
def sub_dir_algo(d): """ build out the algorithm portion of the directory structure. :param dict d: A dictionary holding BIDS terms for path-building """ return "_".join([ '-'.join(['tgt', d['tgt'], ]), '-'.join(['algo', d['algo'], ]), '-'.join(['shuf', d['shuf'], ]), ])
73d91f4f5517bbb6966109bd04f111df5f6302e4
13,307
def _get_words(data_dir): """Get list of words from history file. Parameters ---------- data_dir : pathlib.Path Directory where data is saved. Returns ------- word_list : list of str List of words. """ words_file = data_dir.joinpath("words.txt") word_list = [] if not words_file.is_file(): return word_list with open(words_file, mode="r") as f: for l in f: line = l.rstrip() word_list.append(line) return word_list
40728b61ed4af05d26a8ece678661cddf838602a
13,308
import requests import json def load_data(): """ Get list of events from SloComps. Returns: list: List of telegram messages. """ # Download data. login_url = 'https://comps.sffa.org/user/login' request_utl = 'https://comps.sffa.org/calendar' payload = { 'name': 'telegram-bot', 'pass': 'vjUD`<bWen*9UdBz', 'form_build_id': 'form-YtrArwGYXwquwFw-38gr0VbCC_MD2hzfbpugAcFRyrY', 'form_id': 'user_login', 'op': 'Log+in' } base_directory = 'telegram_comps/' with requests.Session() as session: post = session.post(login_url, data=payload) r = session.get(request_utl) comps_json = json.loads(r.text) return comps_json
fa69238a2684c52eb29ef6deda167061638b1d89
13,310
def containsAll(target, keys): """Returns true iff target contains all keys. """ result = ((i in target) for i in keys) return all(result)
75fda44a515e2249bd4c6ee1146c36eb91657b56
13,312
def _roitmpl2roiname(roitmpl): """ generate roiNames out of roitempl written in an amazing function to easyly change pattern for roiNames in all further function at once. This way keep the featureAttributes (fa) consistent """ roiRname = 'Right'+roitmpl roiLname = 'Left'+roitmpl return roiLname,roiRname
4e2d198821f8e7cbca80722f41aed811dc0bd598
13,314
def _strip_external_workspace_prefix(path): """Either 'external/workspace_name/' or '../workspace_name/'.""" # Label.EXTERNAL_PATH_PREFIX is due to change from 'external' to '..' in Bazel 0.4.5. # This code is for forwards and backwards compatibility. # Remove the 'external/' check when Bazel 0.4.4 and earlier no longer need to be supported. if path.startswith("../") or path.startswith("external/"): return "/".join(path.split("/")[2:]) return path
3fe846b5349f0859fa0bcaa9274a09a64687bc84
13,315
def get_one_differing_var(vars1, vars2): """Checks to see if two sets of variables have one differing one?? """ if len(vars1) != len(vars2): return None ans = None for var in vars1: if var in vars2: if vars1[var] != vars2[var]: if ans is None: ans = var else: return None else: return None return ans
64aeb8694331668133dc8ffcf97d32d0bf8ae58b
13,319
from typing import Collection from typing import Mapping def _get_val_id_from_collection( val: Collection[object], # noqa: WPS110 ) -> str | object: """Extract a suitible test ID string from a collection, if possible.""" if isinstance(val, Mapping): # static analysis: ignore[undefined_attribute] val_iter = iter(val.values()) else: val_iter = iter(val) if len(val) == 1: val_id: object = next(val_iter) return val_id if all(isinstance(val_item, str) for val_item in val): # static analysis: ignore[incompatible_argument] return " ".join(val) # type: ignore[arg-type] return val
6fcb2e1b6636decabc29f8508cfa75ac7c12367f
13,322
import os def splitext(p): """パスの拡張子以外の部分と拡張子部分の分割。 os.path.splitext()との違いは、".ext"のような 拡張子部分だけのパスの時、(".ext", "")ではなく ("", ".ext")を返す事である。 """ p = os.path.splitext(p) if p[0].startswith(".") and not p[1]: return (p[1], p[0]) return p
dfbc9599a04b0271e867769566e957a60bb7f87c
13,323
import re def parse_log_entry(msg): """ Parse a log entry from the format $ip_addr - [$time_local] "$request" $status $bytes_sent $http_user_agent" to a dictionary """ data = {} # Regular expression that parses a log entry search_term = '(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})\s+\-\s+\[(.*)]\s+'\ '"(\/[/.a-zA-Z0-9-]+)"\s+(\d{3})\s+(\d+)\s+"(.*)"' values = re.findall(search_term, msg) if values: val = values[0] data['ip'] = val[0] data['date'] = val[1] data['path'] = val[2] data['status'] = val[3] data['bytes_sent'] = val[4] data['agent'] = val[5] return data
f237696e8215793b4cf17e7b20543ac01494b686
13,325
def selection(): """Returns user selection and catches input errors. """ input_value = input("Select a number: ").strip() if input_value == "break": return "break" # catch input errors try: input_value = int(input_value) if (input_value > 0) and (input_value < 5): return input_value else: print("You need to select a value from 1-3.") return selection() except: print("You need to select a value from 1-3.") return selection()
ddbd04a0f14395543bf37ac8d29e19c9b3ccd8f2
13,326
import numpy def partition(a, b, n): """ Utility routine to construct the non-uniform partition, t, for the interval [a,b] using (n+1) nodes. For convenience I just use the Chebyshev nodes of the second kind. Returns t as a column vector. """ return ((a+b)/2 - (b-a)/2*numpy.cos(numpy.linspace(0,n,n+1)*numpy.pi/n))
68d6e0093a6e7115ebeadfc5852132f531e2260e
13,331
def make_subfolder(path, subfolder): """helper method that puts given file in given subfolder""" basename, dirname = path.name, path.parent path = dirname.joinpath(subfolder, basename) path.parent.mkdir(exist_ok=True, parents=True) return path
473f71f6ac74fe40eeb1b96c584a37f4d16d130b
13,332
import tempfile def get_logprop_file(logger, template, pattern, project): """ Return the filename of file with logging properties specific for given project. """ with open(template, 'r') as f: data = f.read() data = data.replace(pattern, project) with tempfile.NamedTemporaryFile(delete=False) as tmpf: tmpf.write(data.encode()) return tmpf.name
061254a5e65a08c7e5ee8367c94abaee04238e61
13,335
def Iyy_beam(b, h): """gets the Iyy for a solid beam""" return 1 / 12. * b * h ** 3
8912b769b1bf8f91d2bb7eefad261c0e4f9f4026
13,336
def _get_grouped_pathways(end_nodes_dict): """ From end_nodes_dict, makes a list of grouped_pathway lists""" grouped_pathways = [] for end_nodes in end_nodes_dict: for reactions in end_nodes_dict[end_nodes]: for scores in end_nodes_dict[end_nodes][reactions]: grouped_pathways.append(end_nodes_dict[end_nodes][reactions][scores]) return grouped_pathways
7d969fa8f18061847f3f71de0a02799e845fc55d
13,337
import logging import os import itertools def get_handler(filename: 'str | os.PathLike[str]') -> logging.FileHandler: """Cycle log files, then give the required file handler.""" name, ext = os.path.splitext(filename) suffixes = ('.5', '.4', '.3', '.2', '.1', '') try: # Remove the oldest one. try: os.remove(name + suffixes[0] + ext) except FileNotFoundError: pass # Go in reverse, moving each file over to give us space. for frm, to in zip(suffixes[1:], suffixes): try: os.rename(name + frm + ext, name + to + ext) except FileNotFoundError: pass try: return logging.FileHandler(filename, mode='x') except FileExistsError: pass except PermissionError: pass # On windows, we can't touch files opened by other programs (ourselves). # If another copy of us is open, it'll hold access. # In that case, just keep trying suffixes until we find an empty file. for ind in itertools.count(start=1): try: return logging.FileHandler( '{}.{}{}'.format(name, ind, ext), mode='x', ) except (FileExistsError, PermissionError): pass else: raise AssertionError
3968dd6d12353b3e05116b12d6a4964f6b39c878
13,340
def read_user_header(head): """ The 'User-Id' header contains the current online user's id, which is an integer -1 if no user online :param head: the request's header :return: online user's id, abort type error if the header is not an integer. """ try: return int(head['User-Id']) except TypeError: print('user_id header of wrong variable type')
95586f32b698283f33c9271b5ffdf61a934dbb4a
13,341
import uuid def GenerateUID(): """This will generate a unique ID which will be assigned""" return str(uuid.uuid1()).lower()
2b93b6db1de6139419dd13084147efe3e39de5ee
13,342
def site_details(site: dict) -> dict: """ Describe site details in a simple data structure designed to be used from SQL. """ return { "type": site.get("type"), }
12295cec8616fc9beb5d1d0e0b41e9ed5026a828
13,344
def y_intercept_line(slope, point): """ Calculate a y-intercept of a line for given values of slope and point. Parameters ---------- slope : float A value of slope line. point : tuple A tuple with xy coordinates. Returns ------- y-intercept : float A vaule of the y-intercept for the line. """ return point[1] - slope*point[0]
839cdaa4b4646ebbdc5a2b941e5e14b45587cd24
13,345
import os def expand_path(path, abs=False): """Path expansion util to get the right slashes and variable expansions. Note expanduser is needed, ~ is not expanded by expandvars """ path = os.path.normpath(os.path.expandvars(os.path.expanduser(path))) if abs: return os.path.abspath(path) else: return path
06076dfc7ed8b270883bf371bdbbc83ec7e17c5e
13,347
def tp_group(tp_group_df, k): """Function called to find the top k for each time group""" #take the top k from each group tp_group_df = tp_group_df.head(k) #return the total tp in each group return tp_group_df['y_true'].sum()
b6cb7da179d7b5ad54b2d503ee107a2b922e1d8c
13,350
def get_client_ip(rq): """Try to get client's real IP address.""" x_forwarded_for = rq.META.get('HTTP_X_FORWARDED_FOR') if x_forwarded_for: ip = x_forwarded_for.split(',') ip = ip[-1] else: ip = rq.META.get('REMOTE_ADDR') return ip
adae8af8841dad91fb1af9df196b3d424f53d33d
13,351
import itertools def _groupby_leading_idxs(shape): """Group the indices for `shape` by the leading indices of `shape`. All dimensions except for the rightmost dimension are used to create groups. A 3d shape will be grouped by the indices for the two leading dimensions. >>> for key, idxs in _groupby_leading_idxs((3, 2, 2)): ... print('key: {}'.format(key)) ... print(list(idxs)) key: (0, 0) [(0, 0, 0), (0, 0, 1)] key: (0, 1) [(0, 1, 0), (0, 1, 1)] key: (1, 0) [(1, 0, 0), (1, 0, 1)] key: (1, 1) [(1, 1, 0), (1, 1, 1)] key: (2, 0) [(2, 0, 0), (2, 0, 1)] key: (2, 1) [(2, 1, 0), (2, 1, 1)] A 1d shape will only have one group. >>> for key, idxs in _groupby_leading_idxs((2,)): ... print('key: {}'.format(key)) ... print(list(idxs)) key: () [(0,), (1,)] """ idxs = itertools.product(*[range(s) for s in shape]) return itertools.groupby(idxs, lambda x: x[:-1])
cc71304b9da4df2c9a32e009e3159bc9777d3e70
13,353
def _recvall(sock, size): """ Read exactly the specified amount of data from a socket. If a ``recv()`` call returns less than the requested amount, the ``recv()`` is retried until it either returns EOF or all the requested data has been read. :param sock: A socket from which bytes may be read. :param int size: The amount of data to read from the socket. :returns: The requested data. """ data = b'' while len(data) < size: buf = sock.recv(size - len(data)) # Break out if we got an EOF if not buf: # Return None for an EOF return data or None data += buf return data
6a456ee4925b7faa25646e9bc6e702848d047783
13,354
import re def split_authors(authors_list): """ :param authors_list A list of authors to further split :return A cleaner version of the authors list """ test_split = re.split(",", authors_list) # Remove 'and' or & symbol from the last author if it exists splits = [re.split("and |& | ", x) for x in test_split] final_authors = [] for authors in splits: # Remove empty splits authors_split = list(filter(None, authors)) # Abbreviate anything except the last name last_name = 0 first_name = 1 authors_split = [authors_split[last_name]] + [''.join([x[:first_name] for x in authors_split[first_name:]])] final_authors.append(" ".join(authors_split)) # Make last author appear with the & symbol if len(final_authors) > 1: final_authors[-1] = "& " + final_authors[-1] return final_authors
57f8f8c52ab07baddf9f3410c5be58ef482536ab
13,357
def get_expected_name(logfile): """get the name of the file with the expected json""" return logfile.with_name(logfile.stem + "-expected.json")
7bc886fdbc23a53610817391db3fccbe53fbab81
13,359
def decode(obj): """ Decodes JSON / msgpack objects into the corresponding PhaseMap types. """ return obj
821d72ed0c1fe51441f3ac40e1389ee450241087
13,360
def byte_instruction(name, bytecode, offset): # """ """ slot = bytecode.code[offset + 1] print("{:16s} {:4d}".format(name, slot)) return offset + 2
5a1a95f2d2a7cb6bad05ed3941efdb15c0ec5969
13,361
def get_free_symbols(s, symbols, free_symbols=None): """ Returns free_symbols present in `s`. """ free_symbols = free_symbols or [] if not isinstance(s, list): if s in symbols: free_symbols.append(s) return free_symbols for i in s: free_symbols = get_free_symbols(i, symbols, free_symbols) return free_symbols
873d0bac2fa24db3b4ded6d9d867ca703d0a2314
13,362
import torch def hflip(input): """ Horizontally flip a tensor image or a batch of tensor images. """ w = input.shape[-1] return input[..., torch.arange(w - 1, -1, -1, device=input.device)]
0d0606cc5c1858e574c6ba41130c1feba612aa37
13,363
def zeropad_1501(name): """ Arxiv IDs after yymm=1501 are padded to 5 zeros """ if not '/' in name: # new ID yymm, num = name.split('.') if int(yymm) > 1500 and len(num) < 5: return yymm + ".0" + num return name
d2a46fccf649a59194b55ff0542bd6bfbe0d8629
13,364
def __labels(known_labels, trans, labels): """Convert list of label names to label dictionaries with all info. """ return { lbl: { "title": trans.gettext(known_labels[lbl]['title']), "description": trans.gettext(known_labels[lbl]['description']), "severity": known_labels[lbl].get('severity', "primary"), } for lbl in labels if lbl in known_labels.keys() }
55652386496f1312ae66f64ae7673ad43d1e1879
13,365
import binascii def hexlify(data): """The binascii.hexlify function returns a bytestring in Python3, but sometimes we want to use its output to show a log message. This function returns an unicode str, which is suitable for it.""" return binascii.hexlify(data).decode('ascii')
a24bf82a71a15dab12b2f683833c8a250a63097e
13,366
import os def physical_disks(release_root, config): """Convert a root path into a list of physical disks containing data. Parameters ---------- release_root : :class:`str` The "official" path to the data. config : :class:`dict` A dictionary containing path information. Returns ------- :func:`tuple` A tuple containing the physical disk paths. """ try: pd = config['physical_disks'] except KeyError: return (release_root,) if not pd: return (release_root,) broot = os.path.basename(config['root']) if ((len(pd) == 1) and (pd[0] == broot)): return (release_root,) if pd[0].startswith('/'): return tuple([os.path.join(d, os.path.basename(release_root)) for d in pd]) return tuple([release_root.replace(broot, d) for d in pd])
dbba276263d4d32dfd7fd91be6d1107d67bd2ac9
13,367
def _make_head_map(): """Hardcoded Head variables mapping.""" return {"predictions/kernel:0": "dense/kernel", "predictions/bias:0": "dense/bias"}
1fde16e3ccabc4afa7764a2240be16719d146368
13,368
def extract_token_argument(args): """Return token specified on command line. """ if args.token_from is None: return args.token return args.token_from.read().rstrip()
63f640282cc723ea9df9b802857c4d4b68b1ddec
13,369
def y_flip_var(): """ Label noise: - float for global label noise - tuple for separate label noise for every class """ return [0.01]
11c0dfefa1c475794416e0559cc50e026a5244c5
13,372
def extract_atoms_from_residue_group(residue_group): """ Given a residue_group object, which may or may not have multiple conformations, extract the relevant atoms for each conformer, taking into account any atoms shared between conformers. This is implemented separately from the main validation routine, which accesses the hierarchy object via the chain->conformer->residue API. Returns a list of hashes, each suitable for calling calculate_ideal_and_deviation. """ atom_groups = residue_group.atom_groups() if (len(atom_groups) == 1): relevant_atoms = {} for atom in atom_groups[0].atoms(): relevant_atoms[atom.name] = atom return [ relevant_atoms ] else : all_relevant_atoms = [] expected_names = [" CA ", " N ", " CB ", " C "] main_conf = {} for atom_group in atom_groups : if (atom_group.altloc.strip() == ''): for atom in atom_group.atoms(): if (atom.name in expected_names): main_conf[atom.name] = atom else : relevant_atoms = {} for atom in atom_group.atoms(): if (atom.name in expected_names): relevant_atoms[atom.name] = atom if (len(relevant_atoms) == 0) : continue for atom_name in main_conf.keys(): if (not atom_name in relevant_atoms): relevant_atoms[atom_name] = main_conf[atom_name] if (len(relevant_atoms) != 0): all_relevant_atoms.append(relevant_atoms) if (len(main_conf) == 4): all_relevant_atoms.insert(0, main_conf) return all_relevant_atoms
1c68f45c739db1fbbe1999b8a59bcfbd1d329f03
13,374
def normalize_user_link(url: str, user_id: int) -> str: """Strip off end e.g. 123/user_name""" return f"{url.split(str(user_id))[0]}{user_id}"
d891425c3765286b2d16dc1541100d1aed69f31b
13,375
import functools def etag_headers(method): """ A decorator that adds `ETag` and `Last-Modified` headers to `.get` method responses if not already present. Intended to be use with Timestamped models (or any object with a modified_time field). """ @functools.wraps(method) def add_etag_headers_to_response(*args, **kwargs): response = method(*args, **kwargs) if len(args) > 0: obj = args[0].get_object() if hasattr(obj, "modified_time"): if response.get("Last-Modified", None) is None: response["Last-Modified"] = obj.modified_time.strftime( "%a, %d %b %Y %H:%M:%S GMT" ) if response.get("ETag", None) is None: # NGINX strips out 'strong' ETags by default, so we use a weak (W/) ETag response["ETag"] = f'W/"{obj.modified_time.isoformat()}"' return response return add_etag_headers_to_response
10f50aff637804870b5268cd2ce92c4c042faa6d
13,376
def fchr(char): """ Print a fancy character :param char: the shorthand character key :type char: str :return: a fancy character :rtype: str """ return { 'PP': chr(10003), 'FF': chr(10005), 'SK': chr(10073), '>>': chr(12299) }.get(char, '')
d953a66546bc99cc44958d84b426d011089c8f97
13,378
def command_eval(expr: str): """Evaluates a given expression.""" # Command callbacks can return values which will be passed to the caller of `dispatch()` return eval(expr)
0664e4c3f6f0aad320642b000e4bbf242a5b219a
13,381
def accounting(Eddy, old_ind, centlon, centlat, eddy_radius_s, eddy_radius_e, amplitude, uavg, teke, rtime, new_eddy, first_record, contour_e=None, contour_s=None, uavg_profile=None, shape_error=None, cent_temp=None, cent_salt=None): """ Accounting for new or old eddy (either cyclonic or anticyclonic) Eddy : eddy_tracker.tracklist object old_ind : index to current old centlon, centlat: arrays of lon/lat centroids eddy_radius_s: speed-based eddy radius eddy_radius_e: effective eddy radius from fit_circle amplitude : eddy amplitude/intensity (max abs vorticity/f in eddy) uavg : average velocity within eddy contour teke : sum of EKE within Ceff rtime : ROMS time in seconds cent_temp : array of temperature at centroids cent_salt : array of salinity at centroids bounds : index array(imin,imax,jmin,jmax) defining location of eddy new_eddy : flag indicating a new eddy first_record : flag indicating that we're on the first record """ #if teke == 999: print 'tekekkkkkkkkkkkkkkkkk' if first_record: # is True then all eddies are new new_eddy = True if Eddy.VERBOSE: print('------ writing first record') #kwargs = {'temp': cent_temp, 'salt': cent_salt, #'contour_e': contour_e, 'contour_s': contour_s, #'uavg_profile': uavg_profile, 'shape_error': shape_error} kwargs = {'temp': cent_temp, 'salt': cent_salt, 'contour_e': contour_e, 'contour_s': contour_s, 'shape_error': shape_error} if not new_eddy: # it's an old (i.e., active) eddy #if teke == 999: print 'oooooooooooooooooooooooooo' Eddy.insert_at_index('new_lon', old_ind, centlon) Eddy.insert_at_index('new_lat', old_ind, centlat) Eddy.insert_at_index('new_radii_s', old_ind, eddy_radius_s) Eddy.insert_at_index('new_radii_e', old_ind, eddy_radius_e) Eddy.insert_at_index('new_amp', old_ind, amplitude) Eddy.insert_at_index('new_uavg', old_ind, uavg) Eddy.insert_at_index('new_teke', old_ind, teke) if 'ROMS' in Eddy.PRODUCT: pass #Eddy.insert_at_index('new_temp', old_ind, cent_temp) #Eddy.insert_at_index('new_salt', old_ind, cent_salt) if Eddy.TRACK_EXTRA_VARIABLES: Eddy.insert_at_index('new_contour_e', old_ind, contour_e) Eddy.insert_at_index('new_contour_s', old_ind, contour_s) #Eddy.insert_at_index('new_uavg_profile', old_ind, uavg_profile) Eddy.insert_at_index('new_shape_error', old_ind, shape_error) args = (old_ind, centlon, centlat, rtime, uavg, teke, eddy_radius_s, eddy_radius_e, amplitude) Eddy.update_track(*args, **kwargs) else: # it's a new eddy # We extend the range of array to old_ind Eddy.insert_at_index('new_lon', Eddy.index, centlon) Eddy.insert_at_index('new_lat', Eddy.index, centlat) Eddy.insert_at_index('new_radii_s', Eddy.index, eddy_radius_s) Eddy.insert_at_index('new_radii_e', Eddy.index, eddy_radius_e) Eddy.insert_at_index('new_amp', Eddy.index, amplitude) Eddy.insert_at_index('new_uavg', Eddy.index, uavg) Eddy.insert_at_index('new_teke', Eddy.index, teke) if 'ROMS' in Eddy.PRODUCT: #Eddy.insert_at_index('new_temp', Eddy.index, cent_temp) #Eddy.insert_at_index('new_salt', Eddy.index, cent_salt) pass if Eddy.TRACK_EXTRA_VARIABLES: Eddy.insert_at_index('new_contour_e', Eddy.index, contour_e) Eddy.insert_at_index('new_contour_s', Eddy.index, contour_s) #Eddy.insert_at_index('new_uavg_profile', Eddy.index, uavg_profile) Eddy.insert_at_index('new_shape_error', Eddy.index, shape_error) if Eddy.new_list: # initialise a new list print(('------ starting a new track list for %s' % Eddy.SIGN_TYPE.replace('nic', 'nes'))) Eddy.new_list = False args = (centlon, centlat, rtime, uavg, teke, eddy_radius_s, eddy_radius_e, amplitude) Eddy.add_new_track(*args, **kwargs) Eddy.index += 1 return Eddy
6f6cd845d30086d8cdbb12cc193d8e3d6974e069
13,382
def delete_blank_lines(raw_file, start): """ Opens the file, checks that file has more lines than start, go through the file deleting the blank lines and return it. """ with open(raw_file, 'r', encoding='utf-8') as file: file_length = len(file.readlines()) clean_file = '' c = 0 if (file_length < start): raise Exception() for line in file: c += 1 if c >= start: if line not in ['\n', '\r\n']: clean_file = clean_file+line file.close() return clean_file
cd957f69a91403df6a15953648cae2933bd294f1
13,384
import requests def get_episode_data(cfg, episode_id): """ Requests episode information from Opencast. :param episode_id: Unique identifier for episode :param cfg: Opencast configuration :return: Tuple of episode title, parent seriesId and parent series title """ url = cfg['uri'] + "/api/events/" + episode_id r = requests.get(url=url, auth=(cfg['user'], cfg['password'])) x = r.json() return x['title'], x['is_part_of'], x['series']
41aa3053312827d7081776fcb3c414e358f0cc61
13,385
def isempty(prt_r): """Test whether the parent of a missingness indicator is empty""" return len(prt_r) == 0
167e6cb0a63116c6157291a6ba225b14fbfebe11
13,386
import csv def read_csv(filename): """Read csv file and separate time and voltage into respective lists :param filename: :return: """ csvfile = open(filename, "r") time = [] voltage = [] temp = csv.reader(csvfile, delimiter=",") for row in temp: time.append(row[0]) voltage.append(row[1]) return time, voltage
0e846d2074092cc78cf334840e4346018e609e23
13,388
import math def vortex_feldtkeller(beta, center, right_handed=True, polarity=+1): """ Returns a function f: (x,y,z) -> m representing a vortex magnetisation pattern where the vortex lies in the x/y-plane (i.e. the magnetisation is constant along the z-direction), the vortex core is centered around the point `center` and the m_z component falls off exponentially as given by the following formula, which is a result by Feldtkeller and Thomas [1].: m_z = exp(-2*r^2/beta^2). Here `r` is the distance from the vortex core centre and `beta` is a parameter, whose value is taken from the first argument to this function. [1] E. Feldtkeller and H. Thomas, "Struktur und Energie von Blochlinien in Duennen Ferromagnetischen Schichten", Phys. Kondens. Materie 8, 8 (1965). """ beta_sq = beta ** 2 def f(pt): x = pt[0] y = pt[1] # To start with, create a right-handed vortex with polarity 1. xc = x - center[0] yc = y - center[1] phi = math.atan2(yc, xc) r_sq = xc ** 2 + yc ** 2 mz = math.exp(-2.0 * r_sq / beta_sq) mx = -math.sqrt(1 - mz * mz) * math.sin(phi) my = math.sqrt(1 - mz * mz) * math.cos(phi) # If we actually want a different polarity, flip the z-coordinates if polarity < 0: mz = -mz # Adapt the chirality accordingly if ((polarity > 0) and (not right_handed)) or\ ((polarity < 0) and right_handed): mx = -mx my = -my return (mx, my, mz) return f
4714dcdfb2b96413562a6f531be944963a3a3b2f
13,389
from typing import Union from typing import Optional import re def convert_to_int(value: Union[str, float]) -> Optional[int]: """ Converts string and float input to int. Strips all non-numeric characters from strings. Parameters: value: (string/float) Input value Returns: integer/None Integer if successful conversion, otherwise None """ if isinstance(value, str): str_val = re.sub(r'[^0-9\-\.]', '', value) try: return int(str_val) except (ValueError, TypeError): try: return int(float(str_val)) except (ValueError, TypeError): return None elif isinstance(value, (int, float)): return int(value) else: return None
15036374ca499edfa58e2865d4f5fb70c3aecb40
13,393
import re from typing import Any def expand_template(match: re.Match, template: Any) -> Any: """ Return the string obtained by doing backslash substitution on the template. :param match: The match object. :param template: The template. :return: The template replaced with backslash substitution symbols. """ if template is None: return match.group(0) elif isinstance(template, str): return match.expand(template.replace(r'\0', r'\g<0>')) elif isinstance(template, list): return [expand_template(match, t) for t in template] elif isinstance(template, dict): return {expand_template(match, k): expand_template(match, v) for k, v in template.items()} else: return template
82effee652443574f09d4ef136568a1a3e8f01b0
13,394
def format_describe_str(desc, max_len=20): """Returns a formated list for the matplotlib table cellText argument. Each element of the list is like this : ['key ','value '] Number of space at the end of the value depends on len_max argument. Parameters ---------- desc: dict Dictionnary returned by the variable.describe function len_max: int (default 20) Maximum length for the values Returns ------- list(list): Formated list for the matplotlib table cellText argument """ res = {} _max = max([len(str(e)) for k, e in desc.items()]) max_len = _max if _max < max_len else max_len n_valid = desc['valid values'] n_missing = desc['missing values'] n = n_valid + n_missing for k, e in desc.items(): if k == 'valid values': e = str(e) + ' (' + str(int(n_valid*100/n)) + '%)' elif k == 'missing values': e = str(e) + ' (' + str(int(n_missing*100/n)) + '%)' else: e = str(e) e = e.ljust(max_len) if len(e) <= 15 else e[:max_len] res[k.ljust(15).title()] = e return [[k, e] for k, e in res.items()]
9d172ea7afc7fbcb10b4fdc3e120cd955b9eb196
13,395
import tempfile import os def save_request(request): """ From: https://gist.github.com/TheWaWaR/bd26ef76dabca2d410dd """ req_data = {} req_data['endpoint'] = request.endpoint req_data['method'] = request.method req_data['cookies'] = request.cookies req_data['data'] = request.data req_data['headers'] = dict(request.headers) req_data['headers'].pop('Cookie', None) req_data['args'] = request.args req_data['form'] = request.form req_data['remote_addr'] = request.remote_addr files = [] for name, fs in request.files.iteritems(): dst = tempfile.NamedTemporaryFile() fs.save(dst) dst.flush() filesize = os.stat(dst.name).st_size dst.close() files.append({'name': name, 'filename': fs.filename, 'filesize': filesize, 'mimetype': fs.mimetype, 'mimetype_params': fs.mimetype_params}) req_data['files'] = files return req_data
fa9d1e7bbbfade09957120dbf9aa6d26f2743ec5
13,396
def source_is_connected(source, user): """ Return True if the given source is connected (has the required data for retrieving the user's data, like a huID or an access token). """ try: return getattr(user, source).is_connected except: # pylint: disable=bare-except return False
448122f82723d91132045aef89fc5c2126a44e16
13,397
def scoreboard(board, depthleft, evaluate): """ This method uses a Negated Minimax method. Instead of two subroutines for maximising and minimising players, it passes on the negated score due to following mathematical relation: max(a, b) == -min(-a, -b) """ # Note, if it is blacks turn alpha and beta have been reversed. bestscore = -9999 if depthleft == 0: # Return end leaf return (evaluate(board)) # Negated minimax for move in board.legal_moves: # Get score for each possible move board.push(move) # Recurssive depth-first search. This will follow one path down the # search tree until depthleft == 0. The negative reverse white/black # each moved score = -scoreboard(board, depthleft - 1, evaluate) # Restore the previous position. board.pop() # Record new best score if discovered if score > bestscore: bestscore = score # Return best score from the starting position given to alphabeta search return bestscore
079873b5500085c376015c60b40ff760af680ffa
13,398
def minutes_to_seconds(minutes): """A function that converts minutes to seconds :param minutes: The number of minutes do be converted :return: The number of seconds in a give number of minutes """ return 60 * minutes
b9dee8e669f7659fe669e5b97ef79e8e035b63bf
13,399