content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import torch def gen_perturb(data, sigma_perturb=1.0, sigma_end=0.01): """ perturb given protein structure with gaussian noise """ pos_init = data.pos step_size = 0.00002 * (sigma_perturb / torch.tensor(sigma_end)) ** 2 noise = torch.randn_like(pos_init) * torch.sqrt(step_size * 2) pos = pos_init + noise data.pos = pos return data
99b2685e4c20c7a9e773aad320a3b85c8eccd154
17,320
def _sub_complete_column(column, index): # noqa: F811 """ This function processes the `columns` argument, to create a pandas Index. Args: column : str index: pandas Index Returns: pd.Index: A pandas Index with a single level """ arr = index.get_level_values(column) if not arr.is_unique: arr = arr.drop_duplicates() return arr
9668c944623a038321b35796bad918af87b40e9c
17,322
def paren(iterable): """Return generator that parenthesizes elements.""" return ('(' + x + ')' for x in iterable)
c841c9145c35a0f600a39845484176a01b6492be
17,323
def all_suffixes(li): """ Returns all suffixes of a list. Args: li: list from which to compute all suffixes Returns: list of all suffixes """ return [tuple(li[len(li) - i - 1:]) for i in range(len(li))]
ff1a2cf4fa620d50ecb06e124a4c2ca192d5d926
17,324
from typing import List from typing import Dict from typing import Union import warnings def _get_group_id( recorded_group_identifier: List[str], column_links: Dict[str, int], single_line: List[str], ) -> Union[str, None]: """Returns the group_name or group_id if it was recorded or "0" if not. Favors the group_name over the group_id. Parameters ---------- recorded_group_identifier: List[str] List of all recorded group identifiers. Group identifiers are "group_id" or "group_name". column_links: Dict[str, int] Dictionary with column index for relevant recorded columns. 'column_links[column] = index' The following columns are currently considered relevant: floodlight id: 'column name in Kinexon.csv-file - time: 'ts in ms' - sensor_id: 'sensor id' - mapped_id: 'mapped id' - name: 'full name' - group_id: 'group id' - x_coord: 'x in m' - y_coord: 'y in m' single_line: List[str] Single line of a Kinexon.csv-file that has been split at the respective delimiter, eg. ",". Returns ------- group_id: str The respective group id in that line or "0" if there is no group id. """ # check for group identifier has_groups = len(recorded_group_identifier) > 0 if has_groups: # extract group identifier if "group_name" in recorded_group_identifier: group_identifier = "group_name" elif "group_id" in recorded_group_identifier: group_identifier = "group_id" else: warnings.warn("Data has groups but no group identifier!") return None group_id = single_line[column_links[group_identifier]] # no groups else: group_id = "0" return group_id
ea8e64d8377513d00205cb8259cb01f8711bf135
17,325
import warnings def parse_sacct(sacct_str): """Convert output of ``sacct -p`` into a dictionary. Parses the output of ``sacct -p`` and return a dictionary with the full (raw) contents. Args: sacct_str (str): stdout of an invocation of ``sacct -p`` Returns: dict: Keyed by Slurm Job ID and whose values are dicts containing key-value pairs corresponding to the Slurm quantities returned by ``sacct -p``. """ result = {} cols = [] for lineno, line in enumerate(sacct_str.splitlines()): fields = line.split('|') if lineno == 0: cols = [x.lower() for x in fields] else: record = {} jobidraw = fields[0] if jobidraw in result: warnings.warn("Duplicate raw jobid '%s' found" % jobidraw) for col, key in enumerate(cols): if key: record[key] = fields[col] result[jobidraw] = record return result
2719e08dea13305c6e6fcef19bd4320072ad7647
17,326
import socket def get_unused_port_and_socket_ipv6(): """ Returns an unused port on localhost and the open socket from which it was created, but uses IPv6 (::1). """ s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM) s.bind(('::1', 0)) # Ignoring flowinfo and scopeid... addr, port, flowinfo, scopeid = s.getsockname() return (port, s)
d5164fdfd9ba136c4835e8c2e49ae8e0a88c846e
17,327
import pytz def format_date(dt): """ Format a datetime into Zulu time, with terminal "Z". """ return dt.astimezone(pytz.utc).strftime('%Y-%m-%dT%H:%M:%S.%fZ')
524ee5803e5a5e26e7b9f8a1b6b680d7d739b3b1
17,328
import random import string def generate_random_string(len: int): """ Creates a randomly generated string of uppercase letters Args: len (int): The desired length Returns: random_string (str) """ return ''.join(random.choices(string.ascii_uppercase, k=len))
5414ecb7a6e212379000e43fef07e4642c0e63a0
17,329
def modular_exponentiation(b, e, m): """produced modular exponentiation. https://en.wikipedia.org/wiki/Modular_exponentiation :param b: a base number. :param e: an exponent. :param m: a modulo. :return: a reminder of b modulo m. """ x = 1 y = b while e > 0: if e % 2 == 0: x = (x * y) % m y = (y * y) % m e = int(e / 2) return x % m
389cab70e83bb2c2972c39583edbb2bca8efeacb
17,330
def load_file(cert_path): """Load a file""" with open(cert_path) as cer: cert = cer.read() return cert
5910e60bff0fc60aab376f47a35986df4a89e00a
17,331
def expected_bfw_size(n_size): """ Calculates the number of nodes generated by a single BFW for a single root node. :param n_size: <list> The number of neighbours at each depth level :return: The size of the list returned by a single BFW on a single root node """ total = [] for i, d in enumerate(n_size): if i == 0: total.append(d) else: total.append(total[-1] * d) return sum(total) + 1
f275af1c152c4d4704c0be5ee43f3d0f8802900b
17,332
import random import string def username(): """生成随机用户名""" return "test_" + "".join(random.sample(string.ascii_letters + string.digits, 10))
bc86aa6c38651aa29ee5debed8af10e43e20c21e
17,333
import csv def parse_csv_dict(filename): """ Parses csv file and returns header columns and data. """ data = [] with open(filename) as csvfile: reader = csv.DictReader(csvfile) for row in reader: data.append(row) return reader.fieldnames, data
8ffc34a6906cf59926ef618076732eb24f0561fa
17,334
def combineRulesWithOperator(listOfRules, operator): """ Takes a list of rules and makes an overall rule that ties them together with the AND or the OR operator Parameters ---------- listOfRules: list A list of string representation of rules operator: str Should be either AND or OR Returns ------- total: str String representation of the rules combined using the given operator """ assert type(listOfRules) == list assert all(map(lambda r: type(r) == str, listOfRules)) assert operator.lower() in ['and', 'or'] if len(listOfRules) == 1: return listOfRules[0] operator = operator.lower() total = listOfRules[0] for i in range(1, len(listOfRules)): total = operator + "(" + total + ", " + listOfRules[i] + ")" return total
7dfc4988f9c56e05318704a7bd6990fc9b00d604
17,335
def make_product(digits, k, start): """ Compute k numbers product from start position. :param digits: :param k: :param start: :return: (product, next_start) """ index = 0 product = 1 while index < k and start + index < len(digits): if digits[start + index]: product *= digits[start + index] index += 1 else: # if 0 found, change start to position after 0 and compute product again start += index + 1 index = 0 product = 1 if index < k: # no k digit product without 0 return 0, start else: # return k digit product starting at start position return product, start
caa4b9da545c7c575291ed2af1ae2579a681930d
17,340
def get_yearly_minutes(): """Get yearly minutes.""" return 525600
c2bb066c156deb105c0722bd45bdea5a27c35564
17,341
def _unroll_into_samples(out_ndim, *arrs): """Flatten samples, slices, and batches dims into one: - `(batches, slices, samples, *output_shape)` -> `(batches * slices * samples, *output_shape)` - `(batches, samples, *output_shape)` -> `(batches * samples, *output_shape)` `*arrs` are standardized (fed after :meth:`_transform_eval_data`), so the minimal case is `(1, 1, *output_shape)`, which still correctly reshapes into `(1, *output_shape)`. Cases: >>> # (32, 1) -> (32, 1) ... # (1, 32, 1) -> (32, 1) ... # (1, 1, 32, 1) -> (32, 1) ... # (1, 3, 32, 1) -> (66, 1) ... # (2, 3, 32, 1) -> (122, 1) """ ls = [] for x in arrs: # unroll along non-out (except samples) dims x = x.reshape(-1, *x.shape[-(out_ndim - 1):]) while x.shape[0] == 1: # collapse non-sample dims x = x.squeeze(axis=0) ls.append(x) return ls if len(ls) > 1 else ls[0]
c7bfe261f163c528d4a3903de56b964261e50a31
17,342
from typing import List def parse_mbr_beam(parts: List[str]): """ Structure: $corpus.mbr.$utility_function.beam.$length_penalty_alpha.$num_samples.$metric Example: dev.mbr.sentence-meteor.beam.1.0.10.meteor :param parts: :return: """ corpus, decoding_method, utility_function, sample_origin, length_penalty_alpha_a, length_penalty_alpha_b, \ num_samples, metric = parts length_penalty_alpha = ".".join([length_penalty_alpha_a, length_penalty_alpha_b]) seed = "-" return corpus, decoding_method, sample_origin, num_samples, seed, length_penalty_alpha, utility_function, metric
04c36de5b2b05fd7c8b27a62f47e526527fb9404
17,343
def is_footnote_label(text): """ Retorna True quando text representa "label" de nota de rodapé. Padrões: Primeiro caracter é dígito Primeiro caracter não é alfanumérico Uma letra, minúscula """ if text: return any( [ text[0].isdigit(), not text[0].isalnum(), text[0].isalpha() and len(text) == 1 and text[0].lower() == text[0] ] )
306caca320e46bd12e76a16a1309e46ebfe75257
17,344
def decode_topic_names(topic_name): """Separate topic names. # Arguments topic_name: byte string subscribed topic name # Returns topic_names: list a list of strings """ tmp_topic_name = topic_name.decode("utf-8") return tmp_topic_name.split("/")
5abb98c8d220f5133ab59e90ecd9e6cfc7a86c2d
17,345
def _get_image_offsets(width, height, segments_x, segments_y): """ Gets offsets for the segments :param width: map width :param height: map height :param segments_x: number of x segments :param segments_y: number of y segments :return: lists of x offsets, lists of y offsets """ offsets_x = [] offsets_y = [] for i in range(segments_x): offsets_x.append((int(width / segments_x * i))) for i in range(segments_y): offsets_y.append((int(height / segments_y * i))) offsets_x.append(width) offsets_y.append(height) return offsets_x, offsets_y
97fbde70318ca592643f99af332cfa1862da00f0
17,346
import torch def compute_N_L(lambda_L, Gram_L, G_out): """Compute N_L using KRR Parameters ---------- lambda_L: float Last layer regularization parameter Gram_L: torch.Tensor of shape (n_samples, n_samples) Last layer Gram matrix G_out: torch.Tensor of shape (n_samples, n_samples) Output Gram matrix Returns ------- N_L: torch.Tensor of shape (n_samples, n_samples), default None Optimal last layer coefficients' dot products computed thanks to KRR """ n = Gram_L.shape[0] M_L = Gram_L + n * lambda_L * torch.eye(n, dtype=torch.float64) M_L_1 = torch.inverse(M_L) N_L = torch.mm(M_L_1, torch.mm(G_out, M_L_1)) # Should be independent from previous layers: .data return N_L.data
53ab4781898ae262e491d5a6ca577bcdd7fa8144
17,347
import re def check_method(d, string): """check method is support.""" method = set(re.findall(r'[a-zA-Z0-9_]+(?=\()', string)) method_support = {'zip', 'replace', 'range', 'tuple', 'set', 'list', 'str', 'strftime', 'sorted', 'int', 'float', 'type', 'round', 'len', 'cmp', 'max', 'min', 'index', 'pop', 'count', 'remove', 'findall', 'time'} if not method < method_support: d['err'] += 'Method is not supported: ' + str(method - method_support).\ replace('{', '').replace('}', '') + '\n' return method
08fc7068a7d4c099c86d4bff6e0e31bcf32b1778
17,348
def Mceta(m1, m2): """Compute chirp mass and symmetric mass ratio from component masses""" Mc = (m1*m2)**(3./5.)*(m1+m2)**(-1./5.) eta = m1*m2/(m1+m2)/(m1+m2) return Mc, eta
fa55e1cfc669f3e180aed1a3cc838ed82147ddbc
17,349
import decimal import os import csv def make_feature_matrix(directory_name, feature_number, num_objects, start_object=0): """ The function is used to parse multiple csv-files in order to create matrix with list of feature values. Returned values are to be used in Shannon's or Kullback's methods or in method of accumulated frequencies. :param directory_name: name of a directory to parse; it should contain CSV-files with the same feature set :param feature_number: number of feature to handle :param num_objects: amount of objects to take into account :param start_object: number of first object to take into account :return: list of class names (actually file names) and list with lists of feature values """ def decimalize_matrix(feature_matrix): for column in feature_matrix: for i in range(len(column)): column[i] = decimal.Decimal(column[i]) file_list = os.listdir(directory_name) feature_table = list() class_list = list() file_number = -1 for filename in file_list: class_list.append(filename) feature_class = list() file_number += 1 file = open(directory_name + "/" + filename, 'r') csv_reader = csv.reader(file, delimiter=';') current_object = 0 for row in csv_reader: if current_object < start_object: current_object += 1 continue feature_class.append(row[feature_number]) current_object += 1 if current_object >= num_objects: break feature_table.append(feature_class) decimalize_matrix(feature_table) return class_list, feature_table
c197fc14c7aecef9e0161781dfa0bc70142c9693
17,352
def split_sents(notes, nlp): """ Split the text in pd.Series into sentences. Parameters ---------- notes: pd.Series series with text nlp: spacy language model Returns ------- notes: pd.DataFrame df with the sentences; a column with the original note index is added """ print(f'Splitting the text in "{notes.name}" to sentences. This might take a while.', flush=True) to_sentence = lambda txt: [str(sent) for sent in nlp(txt).sents] sents = notes.apply(to_sentence).explode().rename('text').reset_index().rename(columns={'index': 'note_index'}) print(f'Done! Number of sentences: {sents.shape[0]}') return sents
0aae3af46a2a0c29fff9c3bb5725b0ddcb8ed796
17,354
import os import glob def find_input_images(working_dir=os.getcwd(), image_format='jpg'): """Find the input images. Defaults to looking for all .jpg files in current working directory.""" images = glob.glob(os.path.join(working_dir, '*.%s' % image_format)) if images is None: return [] return images
4a3760277e5333aa88892492644f8e89b05f71d7
17,355
def compute_mm_force(sim, positions): """compute mm forces given a position""" sim.context.setPositions(positions) return sim.context.getState(getForces=True).getForces(asNumpy=True)
aad2d8c5d628af33d40637bda343e3d9c8066b6a
17,357
def _is_header_line(line: str) -> bool: """ Determine if the specified line is a globals.csv header line Parameters ---------- line : str The line to evaluate Returns ------- is_header_line : bool If True, `line` is a header line """ return "kT" in line
06f8ff10deeac60e92b2fd92059873d5abafa367
17,358
from typing import Set def parse_modes(modes: Set[str]) -> Set[str]: """A function to determine which modes to run on based on a set of modes potentially containing blacklist values. ```python m = fe.util.parse_modes({"train"}) # {"train"} m = fe.util.parse_modes({"!train"}) # {"eval", "test", "infer"} m = fe.util.parse_modes({"train", "eval"}) # {"train", "eval"} m = fe.util.parse_modes({"!train", "!infer"}) # {"eval", "test"} ``` Args: modes: The desired modes to run on (possibly containing blacklisted modes). Returns: The modes to run on (converted to a whitelist). Raises: AssertionError: If invalid modes are detected, or if blacklisted modes and whitelisted modes are mixed. """ valid_fields = {"train", "eval", "test", "infer", "!train", "!eval", "!test", "!infer"} assert modes.issubset(valid_fields), "Invalid modes argument {}".format(modes - valid_fields) negation = set([mode.startswith("!") for mode in modes]) assert len(negation) < 2, "cannot mix !mode with mode, found {}".format(modes) if True in negation: new_modes = {"train", "eval", "test", "infer"} for mode in modes: new_modes.discard(mode.strip("!")) modes = new_modes return modes
312467ea55d3d254f7b6cd601a2e8b999539508f
17,359
def endsWith(str, suffix): """Return true iff _str_ ends with _suffix_. >>> endsWith('clearly', 'ly') 1 """ return str[-len(suffix):] == suffix
9e052562d4f6d862fe7a1c9ff34db3465fdd2152
17,360
def _patch_center(patch, orient='v'): """ Get coordinate of bar center Parameters ---------- patch : matplotlib patch orient : 'v' | 'h' Returns ------- center : float """ if orient not in 'v h'.split(): raise Exception("Orientation must be 'v' or 'h'") if orient == 'v': x = patch.get_x() width = patch.get_width() xpos = x + width / 2 return xpos else: y = patch.get_y() height = patch.get_height() ypos = y + height / 2 return ypos
235da0e3fdca40d62dd0313d8e8a500a7c533b8d
17,361
def _identify_shade_auth(): """Return shade credentials""" # Note(TheJulia): A logical progression is to support a user defining # an environment variable that triggers use of os-client-config to allow # environment variables or clouds.yaml auth configuration. This could # potentially be passed in as variables which could then be passed # to modules for authentication allowing the basic tooling to be # utilized in the context of a larger cloud supporting ironic. options = dict( auth_type="None", auth=dict(endpoint="http://localhost:6385/",) ) return options
d7f6b403bf956ebe894b7c3e712320fbce3e866d
17,362
import os def list_dir(path): """ Same as os.listdir, but remove any invisible file. """ files = os.listdir(path) cleaned = [] for file in files: if not file.startswith('.'): cleaned.append(file) return cleaned
e2b429861284d1d41e199862caa50d43c2579d73
17,363
import hashlib def get_sha256_of_string(the_string): """Returns SHA-256 hash for given string.""" new_hash = hashlib.new("sha256") new_hash.update(bytes(the_string, "utf-8")) return new_hash
1657bd433e62c9342d5474fae472fc9dff649575
17,364
import re def decamelize(s): """Decamelize the string ``s``. For example, ``MyBaseClass`` will be converted to ``my_base_class``. """ if not isinstance(s, str): raise TypeError('decamelize() requires a string argument') if not s: return '' return re.sub(r'([a-z])([A-Z])', r'\1_\2', s).lower()
fc30254742bcc79047dd6803d0d7b87c951a9f10
17,365
import random import logging def add_break(focal_gen): """ adding break point to contigs """ # selecting contig contig = random.sample(focal_gen.keys(), 1)[0] contig_seq = focal_gen[contig] # selecting split location if len(contig_seq) < 100: focal_gen.pop(contig, None) return 0 c_insert = random.randint(1, len(contig_seq)-1) logging.info(' BR: Insert location: {}'.format(c_insert)) # splitting frag1 = contig_seq[c_insert:] frag2 = contig_seq[:c_insert] ## inserting focal_gen.pop(contig, None) focal_gen[contig + '_b1'] = frag1 focal_gen[contig + '_b2'] = frag2
4e5a2dea897312d25e31fcf1ca0981ca9a706604
17,368
import base64 import os def genkey(): """Get a random short key name""" return base64.b64encode(os.urandom(5)).decode("utf8")
a25748fd166b2c1acf85cf210c17dd0491d0349e
17,369
import uuid def generate_uuid_32(): """Generate a unique ID encoded in hex. Returns ------- str """ return uuid.uuid4().hex
9d8374e6a243c4cb90db151ffa28c9464f368cba
17,370
from typing import List def get_widxs_from_chidxs(chidxs: List[int], offsets: List[List[int]]) -> List[int]: """ Find word indices given character indices :param chidxs: :param offsets: :return: """ last_ch_idx = offsets[0][0] assert max(chidxs) < offsets[-1][1] - last_ch_idx widxs = [] for chidx in chidxs: for oi in range(len(offsets)): if chidx in range(offsets[oi][0] - last_ch_idx, offsets[oi][1] - last_ch_idx): widxs.append(oi) break elif chidx in range(offsets[oi][1] - last_ch_idx, offsets[min(oi + 1, len(offsets))][0] - last_ch_idx): widxs.append(oi) break assert len(chidxs) == len(widxs) return widxs
210a915670bf3b834e1684d4547cd80d9e855b95
17,371
def parse_ppvid(params, names, ppvids): """ 得到ppvid组合对应的name-value(比如处理器-Intel 酷睿 i5 6代) :param params: 存放property信息的字典 :param names: property所有的name(例如处理器,显卡) :param ppvids: ppvid组合 :return: name-value字典 For Example: {"处理器": "Intel 酷睿 i5 6代", "显卡": "核芯/集成显卡" ...} """ name_to_value = {} for index, ppvid in enumerate(ppvids): name = names[index] for value_item in params[name]: if value_item['id'] == ppvid: name_to_value[name] = value_item['value'] return name_to_value
791f2496929415c1c3270940dba18e093b0bdbeb
17,372
def find_boyer_moore(T, P): """Return the index of first occurance of P; otherwise, returns -1.""" n, m = len(T), len(P) if m == 0: return 0 last = {k: i for i, k in enumerate(P)} i = k = m - 1 while i < n: if T[i] == P[k]: if k == 0: return i i -= 1 k -= 1 else: j = last.get(T[i], -1) i += m - min(k, j + 1) k = m - 1 return -1
76ebcc756620177b2e2a900537e9960c9e1c75e5
17,373
import random def columns_many(): """Simulating many columns being returned with varying numbers of entries in each""" def _build_cols(num_items, num_cols): col = [{} for i in range(num_items - random.randint(0, num_items))] cols = [col for i in range(num_cols)] # adding one more line to ensure there is at least one line with the full num_items in it cols.append([{} for i in range(num_items)]) return cols return _build_cols
300d97a4fb30f097748a6ff27616dc0abafb8b34
17,374
def bytes_int( seq ): """8-bit encoded integer as sequence of 1 to 4 bytes, little-endian. """ shift= 0 v= 0 for b in seq: v += b<<shift shift += 8 return v
116296e4d9384b547e3823c75e1bc4399787a850
17,375
def match_with_batchsize(lim, batchsize): """ Function used by modify_datasets below to match return the integer closest to lim which is multiple of batchsize, i.e., lim%batchsize=0. """ if lim % batchsize == 0: return lim else: return lim - lim % batchsize
c37226946c51144df6192adeaf265326ee3bb701
17,376
def get_function_info(func) -> dict: """ return: { 'name': str func_name, 'args': ((str arg_name, str arg_type), ...), ^ [1] 'kwargs': ((str arg_name, str arg_type, any value), ...), 'has_*args': bool, # [3] 'has_**kwargs': bool, # [4] 'return': str return_type, ^ [2] } [1]: there is no empty type. for a callback case, it uses 'str'; for an unknown type, it uses 'Any'. [2]: there is no empty return type. for a callback case, it uses 'None'; for an unknown type, it uses 'Any'. [3][4]: v0.2 doesn't support. relevant: ./stubgen/runtime_stubgen.py """ param_count = func.__code__.co_argcount + func.__code__.co_kwonlyargcount param_names = func.__code__.co_varnames[:param_count] annotations = func.__annotations__ kw_defaults = func.__defaults__ or () # print(func.__name__, param_count, param_names, annotations, kw_defaults) func_name = func.__name__ args: list kwargs: list return_: str type_2_str = { None : 'None', bool : 'bool', bytes: 'bytes', dict : 'dict', float: 'float', int : 'int', list : 'list', set : 'set', str : 'str', tuple: 'tuple', } args = [] if kw_defaults: arg_names = param_names[:-len(kw_defaults)] else: arg_names = param_names for name in arg_names: args.append( (name, type_2_str.get(annotations.get(name, str), 'Any')) ) kwargs = [] if kw_defaults: if isinstance(kw_defaults, tuple): kw_defaults = dict( zip(param_names[-len(kw_defaults):], kw_defaults) ) for name, value in kw_defaults.items(): kwargs.append( (name, type_2_str.get(annotations.get(name, str), 'Any'), value) ) return_ = type_2_str.get(annotations.get('return', None), 'Any') return { 'name' : func_name, 'args' : args, 'kwargs': kwargs, # 'has_*args' : False, # 'has_**kwargs': False, 'return': return_, }
0c2f05af031a0f672fb28ce7728422fd0b71182b
17,378
def superset_data_db_alias() -> str: """The alias of the database that Superset reads data from""" return 'superset-data-read'
c6bab8ae745f915c442145bbc7c4bcffa4141310
17,380
def _make_particle_visible_svg(text,particles,plidx): """ Takes svg file and makes particle visible at specified file location. """ for particle in particles: lidx = text.find("label=\"%s\""%str(particle+1),plidx) text = text[:lidx]+text[lidx:].replace("display:none","display:inline",1) return text
f767fa8e88647e668d3b2aba973c5759c221e35b
17,381
from typing import AnyStr def remove_until(string: AnyStr, marker: AnyStr) -> AnyStr: """Remove all chars in string until string starts with marker""" while not string.startswith(marker) and string: # While don't start with marker and string not empty string = string[1:] # Remove first char return string
8a408c6d350dd818f835fb04a9b55a651ef3a6e5
17,382
def _calc_bs(tp: int, fp: int, fn: int) -> float: """ Calculate the brier score :param tp: :param fp: :param fn: :return: """ return (tp + fp) / (tp + fn)
a48a6284971d50293ba35a1191af11241c257097
17,383
def shifted_ewm(series, alpha, adjust=True): """ calculate exponential weighted mean of shifted data series - vector alpha - 0 < alpha < 1 """ return series.shift().ewm(alpha=alpha, adjust=adjust).mean()
7661e68c9ed5bbd1342a87e0d6cb45001e00005c
17,384
import os def get_config_from_env(): """ Retrieve the api key and secret from the envoronment. Raises: RuntimeError: If the key or secret is not found in the environment. Returns: Dict: config with "api_key" and "api_secret" """ api_key = os.getenv("PODCAST_INDEX_API_KEY") if api_key is None: error_msg = "Could not find PODCAST_INDEX_API_KEY environment variable" raise RuntimeError(error_msg) api_secret = os.environ.get("PODCAST_INDEX_API_SECRET") if api_secret is None: error_msg = "Could not find PODCAST_INDEX_API_SECRET environment variable" raise RuntimeError(error_msg) config = {"api_key": api_key, "api_secret": api_secret} return config
7c81e57c9f88f35231ee89697c0521d1830e5a72
17,386
def _fully_qualified_typename(cls): """Returns a 'package...module.ClassName' string for the supplied class.""" return '{}.{}'.format(cls.__module__, cls.__name__)
f290a5fb3394f151476f5cf3b4785c5935e942b1
17,387
def user_prompt(prompt_string, default=None, inlist=None): """ Takes a prompt string, and asks user for answer sets a default value if there is one keeps prompting if the value isn't in inlist splits a string list with commas into a list """ prompt_string = '%s [%s]: ' % ( prompt_string, default) if default else prompt_string output = input(prompt_string) output = default if output == '' else output if inlist: assert isinstance(inlist, list) while output not in inlist: output = input(prompt_string) output = [x.strip() for x in output.split(',')] if ( isinstance(output, str) and ',' in output) else output return output
5879c8cd7853426d9c94b763292f6b04e9f26e78
17,388
import os def check_join(root_path: str, *args) -> str: """ 检查合并后的路径是否在root_path当中,如果超出抛出异常 :param root_path: 根路径 :param args: 路径块集合 :return: 合并后的绝对路径 """ root_path = os.path.abspath(root_path) result_path = os.path.abspath(os.path.join(root_path, *args)) if not result_path.startswith(root_path): raise ValueError('Illegal path') return result_path
717b418f78164954ce945a6ef2f1778214124d90
17,390
def orders_service(create_service_meta): """ Orders service test instance with `event_dispatcher` dependency mocked """ return create_service_meta('event_dispatcher')
7c6cc11281373ff02d3e5c4c1843ab363283376a
17,391
def kml_folder(name = "",contents=""): """ return kml folders """ kmlfolder = """ <Folder> <name>%s</name> <visibility>1</visibility> %s </Folder> """ return kmlfolder % (name,contents)
3529f6ec7b311fc7290ab3263d7c26a7225c667c
17,393
import os import shutil def delete_dir(path): """ Deletes the specified directory if it exists. All files inside the directory will be deleted. :param path: Path of the directory to delete. :return: True if the path was a directory and was deleted. """ if os.path.isdir(path): shutil.rmtree(path) return True return False
249378e3ec2c28d649a0b85434b877d656f13d19
17,394
import re def remove_links(text): """ Method used to remove the occurrences of links from the text Parameters: ----------------- text (string): Text to clean Returns: ----------------- text (string): Text after removing links. """ # Removing all the occurrences of links that starts with https remove_https = re.sub(r"http\S+", "", text) # Remove all the occurrences of text that ends with .com # and start with http text = re.sub(r"\ [A-Za-z]*\.com", " ", remove_https) return text
0bf0208459f0c93e7bac2848476959791369b5c0
17,395
def rgb_to_hsl(rgb_array): """! @brief Convert rgb array [r, g, b] to hsl array [h, s, l]. @details RGB where r, g, b are in the set [0, 255]. HSL where h in the set [0, 359] and s, l in the set [0.0, 100.0]. Formula adapted from https://www.rapidtables.com/convert/color/rgb-to-hsl.html @param rgb_array RGB array [r, g, b]. @return HSL array [h, s, l]. """ r, g, b = rgb_array r, g, b = (r/255), (g/255), (b/255) min_color, max_color = min(r, g, b), max(r, g, b) h, s, l = None, None, ((max_color+min_color) / 2) if min_color == max_color: h = s = 0 # Color is grayscale/achromatic. else: color_range = max_color - min_color s = color_range / (1 - abs(2 * l - 1)) if max_color == r: h = ((g - b) / color_range) % 6 elif max_color == g: h = (b - r) / color_range + 2 else: h = (r - g) / color_range + 4 h = round(h*60) # Degrees. s = round(s*100, 1) # Percentage [0% - 100%] whole numbers. l = round(l*100, 1) # Percentage [0% - 100%] whole numbers. return [h, s, l]
2c197b7966f5248566fdff41a62bf3f89c222c48
17,396
import argparse def build_parser(args): """ A method to handle argparse. >>> args = build_parser(None) >>> # args = Namespace(archiveonly=False, json=False, location=False, paths=[[]], verbose=True) >>> print args.verbose True """ parser = argparse.ArgumentParser(usage='$ python freeze.py', description='''Turn the flask app into a bunch of html files.''', epilog='') parser.add_argument("-v", "--verbose", dest="verbose", default=False, action="store_true") parser.add_argument(dest="paths", action="append", nargs="*") return parser.parse_args()
7cb544e216d6578bf6aaa2e25d493efdff199fa6
17,397
import hashlib def get_hash(a_string, algorithm="md5"): """str => str Return the hash of a string calculated using various algorithms. .. code-block:: python >>> get_hash('prova','md5') '189bbbb00c5f1fb7fba9ad9285f193d1' >>> get_hash('prova','sha256') '6258a5e0eb772911d4f92be5b5db0e14511edbe01d1d0ddd1d5a2cb9db9a56ba' """ if algorithm == "md5": return hashlib.md5(a_string.encode()).hexdigest() elif algorithm == "sha256": return hashlib.sha256(a_string.encode()).hexdigest() else: raise ValueError("algorithm {} not found".format(algorithm))
5ab144e1aa2e28aa6d80501c252f392cd4ec526e
17,398
def print_type(calendar_lines): """ Function: print_type Description: Sends the lsit of all event type in csv file Input: calendar_lines - object for csv.reader for user's calendar file Output: - Sends the lsit of all event types as list and string """ list_types='' rows = [] #print (calendar_lines) for line in calendar_lines: if len(line) > 0: rows.append(line) list_types= list_types + "\nEvent Type: " + str(line[0]) + " prefered time range from " + str(line[1]) +" to " +str(line[2]) temp1= [rows, str(list_types)] return temp1
889c18efa21142330125b6e38f0e6b539fbca362
17,399
def logstash_processor(_, __, event_dict): """ Adds @version field for Logstash. Puts event in a 'message' field. Serializes timestamps in ISO format. """ if 'message' in event_dict and 'full_message' not in event_dict: event_dict['full_message'] = event_dict['message'] event_dict['message'] = event_dict.pop('event', '') for key, value in event_dict.items(): if hasattr(value, 'isoformat') and callable(value.isoformat): event_dict[key] = value.isoformat() + 'Z' event_dict['@version'] = 1 event_dict['_type'] = event_dict['type'] = 'feedhq' return event_dict
f395bc7e4a7c09cdbe9ef29c3dbfdd45a448c21e
17,400
def normalise_series(series): """Normalise a Pandas data series. i.e. subtract the mean and divide by the standard deviation """ ave = series.mean() stdev = series.std() return (series - ave) / stdev
97d53c0697a56e5ab559d2564c5d7386125ed254
17,402
def daxpy(n, da, dx, incx, dy, incy): """ * * -- Reference BLAS level1 routine (version 3.8.0) -- * -- Reference BLAS is a software package provided by Univ. of * Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG * Ltd..-- * November 2017 * """ #* .. Scalar Arguments .. #DOUBLE PRECISION DA #INTEGER INCX,INCY,N #* .. #* .. Array Arguments .. #DOUBLE PRECISION DX(*),DY(*) #dySize = 1 + (n-1)*abs(incy) #dyOut = [0.0e0 for i in range(dySize)] #* .. #* #* ===================================================================== #* #* .. Local Scalars .. #INTEGER I,IX,IY,M,MP1 i = 0 ix = 0 iy = 0 m = 0 mp1 = 0 #* .. #* .. Intrinsic Functions .. #INTRINSIC mod #* .. #IF (n.LE.0) RETURN #IF (da.EQ.0.0d0) RETURN if ( (n > 0) and (da != 0.0e0) ): #IF (incx.EQ.1 .AND. incy.EQ.1) THEN if ( (incx == 1) and (incy == 1) ): #* #* code for both increments equal to 1 #* #* #* clean-up loop #* m = n % 4 #IF (m.NE.0) THEN if (m != 0): #DO i = 1,m for i in range(m): dy[i] = dy[i] + da*dx[i] #END DO #END IF #IF (n.LT.4) RETURN if (n >= 4): mp1 = m + 1 #DO i = mp1,n,4 #print("DAXPY: n ", n, " m ", m, " mp1 ", mp1, " da ", da) for i in range(mp1-1, n, 4): #print("DAXPY 2: i ", i, " dx(i... i+4) ",\ # dx[i], dx[i+1], dx[i+2], dx[i+3]) #print("DAXPY 2: i ", i, " dy(i... i+3) ",\ # dy[i], dy[i+1], dy[i+2], dy[i+3]) dy[i] = dy[i] + da*dx[i] dy[i+1] = dy[i+1] + da*dx[i+1] dy[i+2] = dy[i+2] + da*dx[i+2] dy[i+3] = dy[i+3] + da*dx[i+3] #print("DAXPY 3: i ", i, " dy(i... i+3) ",\ # dy[i], dy[i+1], dy[i+2], dy[i+3]) #END DO else: #* #* code for unequal increments or equal increments #* not equal to 1 #* ix = 1 iy = 1 if (incx < 0): ix = ((-1*n)+1)*incx + 1 if (incy < 0): iy = ((-1*n)+1)*incy + 1 for i in range(n): #print("DAXPY 4: iy ", iy, " dy ", dy[iy],\ # " ix ", ix, " dx ", dx[ix]) dy[iy] = dy[iy] + da*dx[ix] #print("DAXPY 5: iy ", iy, " dy ", dy[iy],\ # " ix ", ix, " dx ", dx[ix]) ix = ix + incx iy = iy + incy return dy
2e88028584e54b33407afacb0b939459455cfcdf
17,403
def is_empty_placeholder(page, slot): """A template filter to determine if a placeholder is empty. This is useful when we don't want to include any wrapper markup in our template unless the placeholder unless it actually contains plugins. """ placeholder = page.placeholders.get(slot=slot) return not placeholder.cmsplugin_set.exists()
032f6e1d038a10e0f6c5459648dcabaf7e4c0259
17,404
import os import re def find_version(*relative_path_parts): """ Find the version string in a file relative to the current directory. :param relative_path_parts: list of path parts relative to the current directory where the file containing the __version__ string lives :type relative_path_parts: list[str] :rtype: str """ currdir = os.path.abspath(os.path.dirname(__file__)) version_file = os.path.join(currdir, *relative_path_parts) with open(version_file, 'r') as f: match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", f.read(), re.M) if not match: raise RuntimeError("Unable to find version string.") return match.group(1)
942f76266760c9de58d63f3ceab26aed42c3b4c7
17,405
import time def generate_genesis_block(): """This function passes a list to merkle_tree.py that creates the root for the genesis block""" time_stamp = time.mktime(time.strptime('11/02/2020-07:59AM', '%m/%d/%Y-%I:%M%p')) # time.mktime() generates a time_stamp in unix time. start_timestamp = time.strftime('%m/%d/%Y-%I:%M%p', time.localtime(time_stamp)) # time.strftime() formats the unix time into a human readable timestamp string. genesis_block = ['0', '0', "Nonce", start_timestamp] return genesis_block
6072df4bfdffde3ef97aa69759349208645fad5b
17,406
def get_smiles_of_user_entity_ids(cursor, unification_table, user_entity_ids): """ Get the smiles using their BIANA user entity ids """ query_smiles = ("""SELECT SM.value, DB.databaseName FROM externalEntitySMILES SM, {} U, externalEntity E, externalDatabase DB WHERE U.externalEntityID = SM.externalEntityID AND SM.externalEntityID = E.externalEntityID AND E.externalDatabaseID = DB.externalDatabaseID AND U.userEntityID = %s """.format(unification_table)) print('\nRETRIEVING SMILES IDS ASSOCIATED TO USER ENTITY IDS...\n') ueid_to_smiles_to_databases = {} for ueid in user_entity_ids: cursor.execute(query_smiles, (ueid,)) for row in cursor: smiles, database = row #print(ueid, smiles) ueid_to_smiles_to_databases.setdefault(ueid, {}) ueid_to_smiles_to_databases[ueid].setdefault(smiles, set()).add(database) print('NUMBER OF USER ENTITIES ASSOCIATED WITH SMILES IDS: {}'.format(len(ueid_to_smiles_to_databases))) return ueid_to_smiles_to_databases
d59175807dde1ad19bb14952eed360363f262992
17,408
def anti_join(df1, df2, **kwargs): """ Anti-joins two dataframes. :param df1: dataframe :param df2: dataframe :param kwargs: keyword arguments as passed to pd.DataFrame.merge (except for 'how'). Specifically, need join keys. :return: dataframe """ return df1.merge(df2, how='left', indicator=True, **kwargs) \ .query('_merge != "both"') \ .drop('_merge', axis=1)
6fdc7481da6728b51549c072879204a6c3d2bcd6
17,410
def _print_final_result_of_plugin( plugin, compute_ids, results, out_plugins, out_plugin): """Print final results of plug-in. Keyword arguments: plugin -- plug-in name compute_ids -- list of compute node IDs results -- results list out_plugins -- list of out plug-ins out_plugin -- used out plug-in """ print_line = '' for id in compute_ids: if out_plugin == 'Gnocchi': if (id, out_plugin, plugin, True) in results: print_line += ' PASS |' elif (id, out_plugin, plugin, False) in results: print_line += ' FAIL |' else: print_line += ' SKIP |' elif out_plugin == 'AODH': if (id, out_plugin, plugin, True) in results: print_line += ' PASS |' elif (id, out_plugin, plugin, False) in results: print_line += ' FAIL |' else: print_line += ' SKIP |' elif out_plugin == 'SNMP': if (id, out_plugin, plugin, True) in results: print_line += ' PASS |' elif (id, out_plugin, plugin, False) in results: print_line += ' FAIL |' else: print_line += ' SKIP |' elif out_plugin == 'CSV': if (id, out_plugin, plugin, True) in results: print_line += ' PASS |' elif (id, out_plugin, plugin, False) in results: print_line += ' FAIL |' else: print_line += ' SKIP |' else: print_line += ' SKIP |' return print_line
6ce8f6385f13c51f4775a1e2a91c2dfe72ef8723
17,411
import functools import six import inspect def map_arg(**maps): """ Apply a mapping on certain argument before calling the original function. Args: maps (dict): {argument_name: map_func} """ def deco(func): @functools.wraps(func) def wrapper(*args, **kwargs): if six.PY2: argmap = inspect.getcallargs(func, *args, **kwargs) else: # getcallargs was deprecated since 3.5 sig = inspect.signature(func) argmap = sig.bind_partial(*args, **kwargs).arguments for k, map_func in six.iteritems(maps): if k in argmap: argmap[k] = map_func(argmap[k]) return func(**argmap) return wrapper return deco
11f74f966c60c7baeeb516da5709f1dfdac041e7
17,412
import math def get_n(k, e, n_max): """ TS 38.212 section 5.3.1 """ cl2e = math.ceil(math.log2(e)) if (e <= (9/8) * 2**(cl2e - 1)) and (k / e < 9 / 16): n1 = cl2e - 1 else: n1 = cl2e r_min = 1 / 8 n2 = math.ceil(math.log2(k / r_min)) n_min = 5 n = max(min(n1, n2, n_max), n_min) return 2**n
4129821ac4c89c47d8c5b4391a77da83cfee2505
17,414
def architecture_is_64bit(arch): """ Check if the architecture specified in *arch* is 64-bit. :param str arch: The value to check. :rtype: bool """ return bool(arch.lower() in ('amd64', 'x86_64'))
4303a53c3d1c8c1e844593aed3203dbedb448d61
17,415
import re def match(text, pattern, limit = -1): """ Matches all or first "limit" number of occurrences of the specified pattern in the provided text :param text: A String of characters. This is the text in which we want to find the pattern. :param pattern: The pattern to look for. Expected to be a regular expression. :param limit: The number of occurrences of the pattern to be returned. If specified, the method returns the first "limit" number of occurrences of the pattern in the text(or all occurrences, whichever is lesser) :return: A list of matching strings and their starting and ending index values in the format (matching_text, start_index, end_index) """ matcher = re.compile(pattern) matches = [] iter = 0 for m in matcher.finditer(text): entry = (m.group(), m.start(), m.end()) matches.append(entry) iter += 1 if limit != -1 and iter == limit: break return matches
656c49ebd197e538acd1c57eb4bc1daf98e5639a
17,416
import os def rootdir() -> str: """The root directory of the test suite.""" return os.path.dirname(os.path.abspath(__file__))
953df3fca8b2a277045f283484546f36e932419f
17,418
import logging def filter_data(dfs, patient_ids): """ Filters the given data frames to the given patient IDs. :param List[pd.DataFrame] dfs :param pd.Series[int] patient_ids :rtype List[pd.DataFrame] """ logging.info('Filtering visit data') results = [df.loc[df.index.intersection(patient_ids)] for df in dfs] logging.info('Results are tables with shapes {}'.format( [x.shape for x in results])) return results
8ed5ee453958304cd67e17fc9a03442be6bfdec5
17,420
def _getVersionTuple(v): """Convert version string into a version tuple for easier comparison. """ return tuple(map(int, (v.split("."))))
57d6c1edbfb2ec66bfcc875fe511357266ffc816
17,421
def eqn9_rule(model,g,g1,d,tm,s): """ Distribution units balance in each grid cell.""" if tm > 1 : return model.num_dist[g,g1,d,tm,s] == model.num_dist[g,g1,d,tm-1,s] \ + model.num_dist_invest[g,g1,d,tm,s] else : return model.num_dist[g,g1,d,tm,s] == model.num_dist_invest[g,g1,d,tm,s] \ + model.INIT_DIST[g,g1,d,tm]
09cf2bb865a69f3712712974fdd8944c762537dc
17,422
def to_ssml(text): """Adds SSML headers to string. """ return '<speak>'+ text + '</speak>'
0f2fda09507e09c5fdf64d5df2e50630b26629ec
17,424
def remove_headers(markdown): """Remove MAINTAINER and AUTHOR headers from md files.""" for header in ['# AUTHOR', '# MAINTAINER']: if header in markdown: markdown = markdown.replace(header, '') return markdown
5e25c86c72819e42d09459d77e57fa3267748797
17,425
import time import logging def timed_func(process_name): """ Adds printed time output for a function Will print out the time the function took as well as label this output with process_name :param process_name: human name of the process being timed """ def decorator(f): def wrapper(*args, **kwargs): start = time.time() ret = f(*args, **kwargs) logging.getLogger("").info("Elapsed time for {}: {}".format(process_name, time.time()-start)) return ret return wrapper return decorator
e07c9b9a72df8b06308de0812d07428cf18af325
17,426
def is_start_byte(b: int) -> bool: """Check if b is a start character byte in utf8 See https://en.wikipedia.org/wiki/UTF-8 for encoding details Args: b (int): a utf8 byte Returns: bool: whether or not b is a valid starting byte """ # a non-start char has encoding 10xxxxxx return (b >> 6) != 2
8d27198671436c4e9accd80198dfe934c43f679b
17,427
def get_s3item_md5(item): """ A remote item's md5 may or may not be available, depending on whether or not it was been downloaded. If a download hasn't occurred, the checksum can be fetched using the files HTTP ETag. """ if item.md5 is not None: return item.md5 else: # Remove ETAG with any quotes removed: return item.etag.replace('"','').replace("'","")
121c2f3b5d2159e23f37a0770b54e818a7d712bd
17,429
from typing import List def list_params(pop: list, gen: int, lamarck: bool, multicore: bool, **extra_params: dict) -> List: """ Internal function to list execution parameters. For advanced users only. Parameters ---------- pop : list List of individuals. gen : int Number of generations. lamarck : bool If Lamarckian Evolution is used. multicore : bool If parallelism is used. **extra_params : dict Extra parameters. For details, please check: https://github.com/PonyGE/PonyGE2/wiki/Evolutionary-Parameters. Returns ------- param_list : List List of parameters. """ param_list = [] if 'population_size' not in extra_params.keys(): param_list.append('--population_size={0}'.format(str(pop))) if 'generations' not in extra_params.keys(): param_list.append('--generations={0}'.format(str(gen))) if multicore and 'multicore' not in extra_params.keys(): param_list.append("--multicore") if lamarck and 'lamarck' not in extra_params.keys(): param_list.append("--lamarck") for (key, val) in extra_params.items(): if val == "True": param_list.append("--"+key) elif val=="False" or val=="": continue else: param_list.append("--{0}={1}".format(key, val)) return param_list
d08629029f24a85df1adaeeb3db865c2b4a9507f
17,430
import re def isdatauri(value): """ Return whether or not given value is base64 encoded data URI such as an image. If the value is base64 encoded data URI, this function returns ``True``, otherwise ``False``. Examples:: >>> isdatauri('data:text/plain;base64,Vml2YW11cyBmZXJtZW50dW0gc2VtcGVyIHBvcnRhLg==') True >>> isdatauri('dataxbase64data:HelloWorld') False :param value: string to validate base64 encoded data URI """ data_uri = re.compile(r"\s*data:([a-zA-Z]+/[a-zA-Z0-9\-+]+(;[a-zA-Z\-]+=[a-zA-Z0-9\-]+)?)?(;base64)?,[a-zA-Z0-9!$&',()*+,;=\-._~:@/?%\s]*\s*$") return bool(data_uri.match(value))
43a50554c17b1fd180a9234f2a4631689f14c823
17,431
def load_inspection_page(filename): """Opens the output of a previously saved query result.""" if filename == 'file': filename = 'inspection_page.html' with open(filename) as file: content = file.read() return content
86c729703dfe62570531947a31c0ad0fbf9ba297
17,432
def list_all_items_inside(_list, *items): """ is ALL of these items in a list? """ return all([x in _list for x in items])
5f7f2b93d966a7e7fffeede3924a7c86309ef90f
17,433
import re from datetime import datetime def convert_to_datetime(date_str): """Receives a date str and convert it into a datetime object""" date_str = re.findall(r"\s(.+)\s", date_str)[0] return datetime.strptime(date_str, "%d %b %Y %X")
4d5d7f54ae54c8e9b26d446755739e562d7ff9a8
17,434
def outer(x, y): """Compute the outer product of two one-dimensional lists and return a two-dimensional list with the shape (length of `x`, length of `y`). Args: x (list): First list, treated as a column vector. 1 dimensional. y (list): Second list, treated as a row vector. 1 dimensional. Returns: list: Outer product between x and y. """ return [[x_i * y_i for y_i in y] for x_i in x]
24f912d4a3152be96d8656f5a387fa526320fd19
17,435
def _get_step_inout(step): """Retrieve set of inputs and outputs connecting steps. """ inputs = [] outputs = [] assert step.inputs_record_schema["type"] == "record" for inp in step.inputs_record_schema["fields"]: source = inp["source"].split("#")[-1].replace("/", ".") # Check if we're unpacking from a record, and unpack from our object if "valueFrom" in inp: attr_access = "['%s']" % inp["name"] if inp["valueFrom"].find(attr_access) > 0: source += ".%s" % inp["name"] inputs.append({"id": inp["name"], "value": source}) assert step.outputs_record_schema["type"] == "record" for outp in step.outputs_record_schema["fields"]: outputs.append({"id": outp["name"]}) return inputs, outputs
d2cc2002792b83d01bcdb7566332c8f37c0aae99
17,436
def get_intersect_list(lst1, lst2): """ Find the intersection of two lists :param lst1: List One :param lst2: List Two :return: A list of intersect elements in the two lists """ lst3 = [value for value in lst1 if value in lst2] return lst3
c7895c948b3a5132bd769e9320e40139c8aac3ad
17,437
def _choose_mtx_rep(adata, use_raw=False, layer=None): """Get gene expression from anndata depending on use_raw and layer""" is_layer = layer is not None if use_raw and is_layer: raise ValueError( "Cannot use expression from both layer and raw. You provided:" f"'use_raw={use_raw}' and 'layer={layer}'" ) if is_layer: return adata.layers[layer] elif use_raw: return adata.raw.X else: return adata.X
0998c0c74b0d9e97f4cf2485d6039f0475f9541c
17,438
def count_unauth_purchases(request): """ Count number of recipes in a shoplist of an unauthorized user. """ user_ip = request.META['REMOTE_ADDR'] if request.session.get(user_ip): return len(list(set(request.session[user_ip]))) return 0
ec590cd823188699d1dad212ae343647e98c1494
17,439
import uuid def is_uuid(u): """validator for plumbum prompt""" if isinstance(u, str) and u.replace('-', '') == uuid.UUID(u).hex: return u return False
e71392315625f081172c47102415fbdabe6429cd
17,440
import re def get_urls(filename): """ Returns a sorted list of puzzle URLs from the given log file. The hostname comes from the filename and puzzle URLs contain the word puzzle. If a URL is duplicated, then we only include it once. """ under_index = filename.find("_") log_index = filename.find(".log") prefix = "http://" + filename[under_index + 1:log_index] with open(filename, "r") as f: lines = f.read().split("\n") pattern = r"GET (\S*/puzzle/\S*\w+-(\w+).jpg) HTTP" regex = re.compile(pattern) url_dict = {} for line in lines: match = regex.search(line) if match: url_dict[prefix + match.group(1)] = match.group(2) return sorted(url_dict, key=lambda u: url_dict[u])
fd3da48638b398777c60af51db2fb46ea6dffcd5
17,441
def distribute(x, values, level): """Distribute elements in values on nth level of nested lists. This creates an additional nested level of lists. Exemple: distribute([[[1,2], [1,3]], [[2,2]]], ['a', 'b'], 2) --> [[ [[1,2,'a'], [1,2,'b']], [[1,3,'a'], [1,3,'b']] ], [ [[2,2,'a'], [2,2,'b']] ] ] """ if level == 0: return [x + [v] for v in values] else: return [distribute(y, values, level - 1) for y in x]
0584af83a7c928552e6b560dbfab11115064e7ac
17,444
import re def normalize_layout_string(layout_string): """normalize a layout string such that it is characterwise identical to the output of the 'dump' command """ layout_string = layout_string.replace('\n', ' ') # drop multiple whitespace layout_string = re.sub(r'[ ]+', ' ', layout_string) # drop whitespace after opening parenthesis layout_string = re.sub(r'[(] ', r'(', layout_string) # drop whitespace and before closing parenthesis layout_string = re.sub(r' [)]', r')', layout_string) return layout_string.strip()
e333603a21edcc52a5a648c1850d976210869d12
17,445