content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import operator def get_max_queue(queues): """Retrieve a queue with max messages as tuple.""" queue, value = max(queues.items(), key=operator.itemgetter(1)) return (queue, value)
69b52a6bac89cc61e84f639fed06cffe7a0d697f
22,910
def set_gpu(gpu_mon, args): """ Sets the GPU visibility based on the passed arguments. Takes an already initialized GPUMonitor object. Sets GPUs according to args.force_GPU, if specified, otherwise sets first args.num_GPUs free GPUs on the system. Stops the GPUMonitor process once GPUs have been set If gpu_mon is None, this function does nothing Args: gpu_mon: An initialized GPUMonitor object or None args: argparse arguments Returns: The number of GPUs that was actually set (different from args.num_GPUs if args.force_GPU is set to more than 1 GPU) """ num_GPUs = args.num_GPUs if gpu_mon is not None: if not args.force_GPU: gpu_mon.await_and_set_free_GPU(N=num_GPUs, sleep_seconds=120) else: gpu_mon.set_GPUs = args.force_GPU num_GPUs = len(args.force_GPU.split(",")) gpu_mon.stop() return num_GPUs
09c7b4a9956bd0f82666046c890b3e86cfa9d6a9
22,911
def format_output_fname(current_theme): """Formatting output file name. """ output_fname = "_".join([word.capitalize() for word in current_theme.split(" ")]) return output_fname
ce915566acb74f0c89d100647f3cf20ab0696347
22,912
import warnings import ctypes def pack(ctypes_obj): """Convert a :mod:`ctypes` structure into a Python string. Args: ctypes_obj (ctypes.Structure): The :mod:`ctypes` structure to convert to a string. Returns: New Python string containing the bytes from memory holding *ctypes_obj*. .. deprecated:: 1.5 This function is deprecated, use ``bytes(ctypes_obj)`` instead. """ warnings.warn( "This function is deprecated and will be removed, use ``bytes(ctypes_obj)`` instead.", DeprecationWarning, stacklevel=2, ) return ctypes.string_at(ctypes.addressof(ctypes_obj), ctypes.sizeof(ctypes_obj))
7ae5a320e93fbcbcec09b5d5ee587f266fa75f9e
22,913
import re def norm_string(range_val): """ Look at a range value and if it's got characters that make range parsing unhappy then put it in a q() """ re_badchars = re.compile(r'[\s\/]') re_minus = re.compile(r'((\S)+ - (\S)+)+') re_quoted = re.compile(r'^q\(.*\)$') re_int_parens = re.compile(r'[\(\)]') re_l_paren = re.compile(r'\(') re_r_paren = re.compile(r'\)') # Escape internal parms, wrap in a q() if they exist if re_int_parens.search(range_val) and not re_quoted.match(range_val): range_val = re_l_paren.sub(r'\(', range_val) range_val = re_r_paren.sub(r'\)', range_val) range_val = 'q(%s)' % range_val # Look for spaces and slashes, if they exist, wrap in a q() if re_badchars.search(range_val) and not re_quoted.match(range_val): if not re_minus.match(range_val): range_val = 'q(%s)' % range_val return range_val
038d9648a2c15e911ad98246dc121cd50204f217
22,914
import torch import math def predict_sliding(model, image, num_classes, crop_size, overlap=0.25, # 控制 infer 数量 ): """ 滑窗 infer 大图 @param model: @param image: @param num_classes: @param crop_size: 大图 crop 小图,crop_size = model input size @param overlap: @return: """ B, _, H, W = image.shape # out_stirde 控制模型 输出 size, 开辟存储空间,保存输出 full_probs = torch.zeros((B, num_classes, H, W)).cuda() cnt_preds = torch.zeros((B, num_classes, H, W)).cuda() # row/col 滑窗范围 stride = int(math.ceil(crop_size * (1 - overlap))) # overlap -> stride tile_rows = int(math.ceil((H - crop_size) / stride) + 1) tile_cols = int(math.ceil((W - crop_size) / stride) + 1) num_tiles = tile_rows * tile_cols print("Need %i x %i = %i prediction tiles @ stride %i px" % (tile_cols, tile_rows, num_tiles, stride)) for row in range(tile_rows): for col in range(tile_cols): # bottom-right / left-top 保证右下有效,反推左上 x2, y2 = min(col * stride + crop_size, W), min(row * stride + crop_size, H) x1, y1 = max(int(x2 - crop_size), 0), max(int(y2 - crop_size), 0) # crop input img img = image[:, :, y1:y2, x1:x2] out = model(img) probs = out.softmax(dim=1) # 使用 softmax 归一化后的 acc 更准确 # map image pos -> output pos full_probs[:, :, y1:y2, x1:x2] += probs # C 维 prob 之和 cnt_preds[:, :, y1:y2, x1:x2] += 1 # 对应 pixel 估计次数 full_probs /= cnt_preds return full_probs
0eb34038c9fe9bdee4f3f5881ca09eddf4912011
22,915
def pixel_to_terrain_type(pixel): """ Convert a RGBA pixel to a terrain type. The B channel composes the lower 8 bits, the A channel composes the upper 8 bits. Note that some images do not have an alpha channel. """ if len(pixel) > 3: return pixel[2] & 0xff | ((pixel[3] & 0xff) << 8) else: return pixel[2] & 0xff
a7a5538756b0566b6a2e978bf3a8fd28cbb0258b
22,916
def get_gain(l_ch, r_ch): """ l_ch -> left channel, array of int values r_ch -> right channel, array of int values returns compare left and right channel in dBFS """ diff = abs(l_ch - r_ch) return 0 if l_ch > r_ch else diff
0f7095c565e9d10619889ef0e26a2898be2a82ec
22,917
def armstrong_number(n: int) -> bool: """ Return True if n is an Armstrong number or False if it is not. >>> armstrong_number(153) True >>> armstrong_number(200) False >>> armstrong_number(1634) True >>> armstrong_number(0) False >>> armstrong_number(-1) False >>> armstrong_number(1.2) False """ if not isinstance(n, int) or n < 1: return False # Initialization of sum and number of digits. sum = 0 number_of_digits = 0 temp = n # Calculation of digits of the number while temp > 0: number_of_digits += 1 temp //= 10 # Dividing number into separate digits and find Armstrong number temp = n while temp > 0: rem = temp % 10 sum += rem ** number_of_digits temp //= 10 return n == sum
0a88312a08c7d5e6d3df660b164ed28e0a4a40d5
22,919
def remove_transition(net, trans): """ Remove a transition from a Petri net Parameters ---------- net Petri net trans Transition to remove Returns ---------- net Petri net """ if trans in net.transitions: in_arcs = trans.in_arcs for arc in in_arcs: place = arc.source place.out_arcs.remove(arc) net.arcs.remove(arc) out_arcs = trans.out_arcs for arc in out_arcs: place = arc.target place.in_arcs.remove(arc) net.arcs.remove(arc) net.transitions.remove(trans) return net
85283735ec41e76ff491e562f0b2d2d115fd4114
22,923
def orbstat(orb): """"Return various status parameters about the orbserver""" return orb.stat()
e834f8fd924a9d4ecff4f25c51e6072c59acec10
22,924
def get_full_intent(intent_json): """recovers the full intent json from standalized intent. Basically we will add fields that are omitted becauase their values are all or 2 back to the intent json. """ # dep/ret time if 'departure_time' not in intent_json: intent_json['departure_time'] = 'all' if 'return_time' not in intent_json: intent_json['return_time'] = 'all' # class if 'class' not in intent_json: intent_json['class'] = 'all' # max_connections if 'max_connections' not in intent_json: intent_json['max_connections'] = 2 # airline_preference if 'airline_preference' not in intent_json: intent_json['airline_preference'] = 'all' return intent_json
3565c36a9bd07f4f84efb4542dc5132ee76b28cb
22,925
def getBestCols(Matrix, numCols): #expects matrix of floats, except header #no row names """ given a matrix of estimated proportions for each sample, returns the indices of the columns with the highest average values """ outMat = [] TotalValues = [0.0] * len(Matrix[1]) for line in Matrix[1:]: for i in range(len(line)): TotalValues[i] += float(line[i]) for line in Matrix: outLine = [] SortedValues = sorted(TotalValues)[::-1] MaxIndices = [] for MaxVal in SortedValues[:numCols]: MaxIndices.append(TotalValues.index(MaxVal)) TotalValues[TotalValues.index(MaxVal)] = -1 return MaxIndices
f18aa701b16e001bcceedd2a9d1271fbd5902b58
22,928
import platform import ntpath import os import shlex def path_info(path_str): """ Retrieve basic information about the given path. Parameters ---------- path_str : str The path from which to extract information. Returns ------- tuple base dir, file name, list of args """ if platform.system() == "Windows": os_path_mod = ntpath else: os_path_mod = os.path dir_name, other_parts = os_path_mod.split(path_str) split = shlex.split(other_parts) file_name = split.pop(0) args = split return dir_name, file_name, args
fdc38f5ae4b4fca60731716a50104af1f3f0c98a
22,929
def dim_up_down_up_sequence(gamut, idex, frames): """ up color intensity to full """ # initial compiled list is size 512 cseq = gamut[idex:] + gamut[::-1] + gamut[0:idex] # adjust size reframed = [] ratio = 512.0 / frames for frameidx in range(frames): gamut_pos = int(round(frameidx * ratio)) reframed.append(cseq[gamut_pos]) return reframed
9aaf7318a0c339767e9165e34a04a96f776c84d5
22,931
def check_translations(translations, mention_text, base_word="please"): """ Iterates over the translated words and checks whether any exist within the text. Does not yet deal with alternate encoding. :param translations: A list of translations :param mention_text: The tweet text :param base_word: The word being translated. Must exist within the translations struct. :return: True if any of the translated words exist within the text """ if base_word not in translations["translations"]: raise Exception("Baseword '%s' does not exist in the translation struct"%base_word) for translation in translations["translations"][base_word]: translation = translation.lower() if translation in mention_text: return True return False
8573356bc0bbecde9daac03795d34b1be2b7b797
22,932
from typing import Iterable from typing import Any def gimplies(anteponents: Iterable[Any], postnents: Iterable[Any]) -> bool: """ Logical implication P_1 ^ P2 ^ P3 ^ ... P^N => Q1 ^ Q2 ^ Q3 ^ ... ^ QM The formula is false only when P is true and Q is false :param anteponents: the left hand side of the implication. Implicitly concatenatied by "and" :param postnents: the right hand side of the implication. Implicitly concatenated by "and" :return: """ if all(anteponents) and not all(postnents): return False else: return True
81b12972f4004621d711e0423d2c3abe231a784f
22,933
def upper(string): """ Return string converted to upper case. """ return str(string).upper()
fa2bfe354e3308aec8ee37635a0594b1d5699d6e
22,935
def quote_sh_string(string): """Make things that we print shell safe for copy and paste.""" return "\\'".join("'" + p + "'" for p in string.split("'"))
a6a770fb39ef0e23127ea2c007cacaf4e8a13116
22,937
def take_state(dim2, vet_board, empty, o, x): """Representação dos Estados possíveis do tabuleiro""" somatorio = 0 for i in range(dim2): if vet_board[i] == empty: digit = 0 elif vet_board[i] == o: digit = 1 else: digit = 2 somatorio = somatorio + digit * (3**i) state = somatorio return state
e6111737632df277febdcdbcdb04eff8be5cb25b
22,938
import glob import os def get_stats(ptrn, collect_stats=True): """ Returns the size of the files (matched by the ptrn) occupied. Also returns the list of files Sorted by the Descending order of creation time & size """ files = glob.glob(ptrn) file_stats = [] total_size = 0 for file in files: file_size = os.path.getsize(file) if collect_stats: file_stats.append((os.path.getmtime(file), file_size, file)) total_size += file_size if collect_stats: # Sort by the Descending order of file_creation_time, size_of_file file_stats = sorted(file_stats, key=lambda sub: (-sub[0], sub[1], sub[2])) return (file_stats, total_size)
0771042443a31d73995010744fc1c71429bf159f
22,939
def get_file_extension(meta_data): """ get file extension for cards on webapp :return: string """ file_ext = meta_data["downloadable_items"][0]["doc_type"] return file_ext
31e44510b36d6bac79992e44eb5b8bdc75db3bbe
22,940
def user_session(request): """Add user session information to the template context.""" return { 'chaospizza_user': { 'name': request.session.get('username', None), 'is_coordinator': request.session.get('is_coordinator', False), 'coordinated_order_slug': request.session.get('order_slug', None), } }
6e98c21cc30508db793dc79ef580757d71e9d6c4
22,941
def getEventTime(cursor, event_id=None, event_time=None, order="DESC"): """Get the event time. If the event_time is specified, simply return. Else if the event_id is specified, return its time. Else return the most recent event. Args: cursor: Database cursor. event_id: Event id. event_time: Event time. order: ASC or DESC. Returns: The event time string. """ if event_id == None: cursor.execute("SELECT id FROM Event ORDER BY time %s"%order) event_id = int(cursor.fetchone()["id"]) if event_time == None: cursor.execute("SELECT time FROM Event WHERE id=%d"%event_id) event_time = str(cursor.fetchone()["time"]) return event_time
0e662d92f8da79ce154bd4951bb247b083b09345
22,943
import os import subprocess import signal def _is_process_running(pid): """Whether the process with given PID is running.""" if os.name == 'nt': return str(pid) in subprocess.check_output([ 'tasklist', '/fi', f'PID eq {pid}', ]).decode() try: # os.kill throws OSError if the process with PID pid is not running. # signal.SIG_DFL is one of two standard signal handling options, it will # simply perform the default function for the signal. os.kill(pid, signal.SIG_DFL) except OSError: return False return True
c234bf97b8a65d83eecb7ae223ae9c229adfe073
22,944
def get_simproducts(indice_masterproduct, dfproducts, sim_matrix): """ returns dataframe of products similar to product nb <indice_masterproduct> """ # indices and similarity scores of products similar to master product start = sim_matrix.indptr[indice_masterproduct] stop = sim_matrix.indptr[indice_masterproduct + 1] indices_simproducts = sim_matrix.indices[start:stop] similarities = sim_matrix.data[start:stop] # extracting dataframe of similar products and appending column of similarity dfsimproducts = dfproducts.iloc[indices_simproducts] dfsimproducts['sim'] = similarities return dfsimproducts
87e2665952dc6f8e73638aee55b2c637ef06b38a
22,947
def get_indent(line): """ get indent length :param line: :return: """ index = 0 for i in line: if i == " ": index += 1 else: break return index
feb8e00bd9c7e0198b088acceb529100438af961
22,948
import re from datetime import datetime def extract_date(str_date): """ Find the first %Y-%m-%d string and return any legitimate datetime with the remainder of the string """ rgx = re.compile(r'((\d{4})-(\d{2})-(\d{2}))') o_match = rgx.search(str_date) if o_match is not None: lng_day = int(o_match.group(4)) lng_month = int(o_match.group(3)) lng_year = int(o_match.group(2)) # These digits may not give a legitimate combination of Y M D try: dte = datetime(lng_year, lng_month, lng_day) except ValueError: # Use today's values as defaults, and use any part that does work dte = datetime.now() # Start with day=1 in case the month is feb and the day 30 etc dte = datetime.replace(dte, day=1, hour=0, minute=0, \ second=0, microsecond=0) try: dte = datetime.replace(dte, year=lng_year) except ValueError: pass try: dte = datetime.replace(dte, month=lng_month) except ValueError: pass try: dte = datetime.replace(dte, day=lng_day) except ValueError: pass i_start = o_match.start() tpl_date_rest = (dte, str_date[0:i_start] + ' ' + \ str_date[i_start + 10:]) else: tpl_date_rest = (None, str_date) return tpl_date_rest
fb7729e8a631a69d0e7eda4f6b62aff0589f39e4
22,949
import gzip def read_compressed(path): """ Write a compressed (gzipped) file from `path`. """ with gzip.open(path, 'rb') as f: return f.read()
b5806c8606fa6344a8b00fb3e52042c5f9cb49db
22,950
def __has_value(cell): """Checks if a cell value from a Pandas dataframe is a valid string The following are treated as invalid: * empty cell (None) * empty string ('') * zero (0) * type = nan OR type = None * 'null' OR 'NULL' * 'none' OR 'NONE' Args: cell (Any): Value of a cell from pandas dataframe Returns: Boolean: Whether or not value is valid string """ # Falsy values are FALSE if not cell: return False # nan values FALSE if not isinstance(cell, str): return False # strings == 'none' or 'null' or '0' are also FALSE if ( cell.lower() == "none" or cell.lower() == "null" or cell.lower() == "nan" or cell == "0" ): return False return True
3c0e406bac02013b4f80787970715c535337b772
22,951
def get_pred_and_suc(value, times): """ Return the predecessor and the successor of the value as well as the predecessor of the predecessor of the value in the array times. :param value: float :param times: SortedSet :return: """ if len(times) == 2: return times[0], times[0], times[1] id_before = times.bisect_left(value) if value == times[id_before]: pred = value suc = times[id_before + 1] if id_before - 1 < 0: pred_pred = times[0] else: pred_pred = times[id_before - 1] else: pred = times[id_before - 1] suc = times[id_before] if id_before - 2 < 0: pred_pred = times[0] else: pred_pred = times[id_before - 2] return pred_pred, pred, suc
899f2e7484bcada4b65e17ac23358a558e002e35
22,952
from typing import List def get_origin_position_factor(matrix: List[List[float]]): """ Calculate average distance between stops. :matrix: list of lists containing all to all distances return float """ # NOTE: matrix is processed to integers for solver (d * 100) distance_factor = (sum(matrix[0]) / 100) / len(matrix[0][1:]) return distance_factor
daa1ab06ec8a32bf72cf87814edc22e77271cfce
22,953
def map_to_filtered(rev): """Gets hash of rev after filtering. If rev hasn't been filtered (yet), returns None. Equivalent to the `map` function exposed by git-filter-branch, except that function returns rev if the revision hasn't yet been filtered, and that this function raises an error if rev maps to multiple commits. """ #if not workdir: # raise RuntimeError("workdir environment variable is empty?") mapfile = '../map/%s' % rev try: with open(mapfile, 'r') as f: lines = f.read().strip().split('\n') if len(lines) != 1: raise RuntimeError("mapfile %s doesn't contain a single line: %s" % (mapfile, str(lines))) return lines[0] except IOError: return None
0ebbcb04881435c2a0a1ca217b52155ef6e0ad7f
22,954
from pathlib import Path import os def load_noimage_user(): """ユーザーのNoImage画像を取得します""" noimage_user_path = ( Path(os.path.dirname(__file__)) / "app/static/noimage_user.png" ) with open(noimage_user_path, "rb") as f: noimage_user = f.read() return noimage_user
37912b907bb67d81059bafa43da31813a37b7ff3
22,956
def get_hyperparameters(args): """ Store all arguments in `main.py`, except `SM_CHANNEL` and `model`, in a dictionary return: Dictionary of selected arguments passed to `main.py` """ return {param : val for param, val in args.__dict__.items() if (not param.endswith('_dir')) and param != 'model'}
daeb40eaed3dca172226d29ebd1afcd8345b8802
22,957
import base64 def decode_attachment_payload(message): """Decodes a message from Base64, if fails will outputs its str(message) """ msg = message.get_payload() try: # In some cases the body content is empty and cannot be decoded. msg_info = base64.b64decode(msg) except TypeError: msg_info = str(msg) return msg_info
633ab2a9572ba481bc5a348ca6717d948b4ffa06
22,958
import torch def species_split(dataset, train_valid_species_id_list=[3702, 6239, 511145, 7227, 10090, 4932, 7955], test_species_id_list=[9606]): """ Split dataset based on species_id attribute :param dataset: :param train_valid_species_id_list: :param test_species_id_list: :return: train_valid dataset, test dataset """ # NB: pytorch geometric dataset object can be indexed using slices or # byte tensors. We will use byte tensors here train_valid_byte_tensor = torch.zeros(len(dataset), dtype=torch.uint8) for id in train_valid_species_id_list: train_valid_byte_tensor += (dataset.data.species_id == id) test_species_byte_tensor = torch.zeros(len(dataset), dtype=torch.uint8) for id in test_species_id_list: test_species_byte_tensor += (dataset.data.species_id == id) assert ((train_valid_byte_tensor + test_species_byte_tensor) == 1).all() train_valid_dataset = dataset[train_valid_byte_tensor] test_valid_dataset = dataset[test_species_byte_tensor] return train_valid_dataset, test_valid_dataset
3693eb9122baf79e41b404755fb500fcbaed7c6c
22,959
import subprocess def branch_has_commit(upstream: str, branch: str, commit: str) -> bool: """ Returns True if the commit is actually present in the branch """ ret = subprocess.call(['git', 'merge-base', '--is-ancestor', commit, upstream + '/' + branch], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) return ret == 0
d8b0dc4f22201d7a084047af4bd61da3d3622fad
22,960
def spawn(school: list[int], cycles: int, memo: dict[int, int]): """Simulates fish reproduction when not already stored.""" school_size: int = len(school) for fish in school: if fish not in memo: spawned_fish: list[int] = [] day: int = fish day += 9 while day <= cycles: spawned_fish.append(day) day += 7 memo[fish], memo = spawn(spawned_fish, cycles, memo) school_size += memo[fish] return school_size, memo
5e13e8b139db201adddafeb66695ef0fd7583bcc
22,961
def merge(struct_a, struct_b): """ Merges two struct files. Parameters ---------- struct_a, struct_b : pandas.DataFrame Dataframes containing 4 columns of metabolite ID, formula, SMILES, and InChI. Returns ------- struct : pandas.DataFrame """ return struct_a.append(struct_b, ignore_index=True)
31b42c2507dddc65294fb7138e05a26e4b239e04
22,964
def skip_punishment(player, table, lied_card=None, turns_to_wait=0): """ Function used to punish player with turns to skip. :param player: Player object :param table: list with cards on table :param lied_card: tuple with last lied card :param turns_to_wait: integer value of take card punishment :return: tuple with last lied card, integer value of turns to skip """ player.turns_to_skip = turns_to_wait - 1 player.print_foo(f'{player.name} will have to skip this and next {player.turns_to_skip} turns.') turns_to_wait = 0 if lied_card: table.append(lied_card) lied_card = None return lied_card, turns_to_wait
a3f141243d78eb1919536c414c278fa1ea92637f
22,965
import torch def mask_iou(lhs_mask, rhs_mask): """Compute the Intersection over Union of two segmentation masks. Args: lhs_mask (torch.FloatTensor): A segmentation mask of shape (B, H, W) rhs_mask (torch.FloatTensor): A segmentation mask of shape (B, H, W) Returns: (torch.FloatTensor): The IoU loss, as a torch scalar. """ batch_size, height, width = lhs_mask.shape assert rhs_mask.shape == lhs_mask.shape sil_mul = lhs_mask * rhs_mask sil_add = lhs_mask + rhs_mask iou_up = torch.sum(sil_mul.reshape(batch_size, -1), dim=1) iou_down = torch.sum((sil_add - sil_mul).reshape(batch_size, -1), dim=1) iou_neg = iou_up / (iou_down + 1e-10) mask_loss = 1.0 - torch.mean(iou_neg) return mask_loss
d9d9a79176117bab0caa5198b20b304eb111a141
22,966
def get_facetview_link(facetview_url, objectid, system_version=None): """Return link to objectid in FacetView interface.""" if system_version is None: q = '{"query":{"query_string":{"query":"_id:%s"}}}' % objectid b64 = q.encode('utf-8') else: q = '{"query":{"query_string":{"query":"_id:%s AND system_version:%s"}}}' % (objectid, system_version) b64 = q.encode('utf-8') if facetview_url.endswith('/'): facetview_url = facetview_url[:-1] return '%s/?base64=%s' % (facetview_url, b64)
3a4e4255cf6c05279be0acab73c97728a751bb73
22,967
from numpy import where, concatenate, repeat from numpy.random import choice, permutation def balance_samples(X, Y): """ Balances the number of positive and negative samples by duplicating samples from the smaller one. """ n_pos = Y.sum() # number of positive samples n_neg = len(Y) - n_pos # number of negative samples if n_pos == n_neg: return X, Y # Add either n_pos-n_neg negative samples or n_neg-n_pos positive samples Y = concatenate((Y, repeat(n_neg>n_pos, abs(n_pos-n_neg)))) i,(d,r) = (where(~Y),divmod(n_pos, n_neg)) if n_neg < n_pos else where(Y),divmod(n_neg, n_pos) i = concatenate((repeat(i, d-1), choice(i, r, False))) X = concatenate((X, X.take(i, 0)), 0) # Shuffle the data so all of the new samples aren't at the end ii = permutation(len(Y)) return X.take(ii, 0), Y.take(ii)
38848890b75b9b70fd53129a99c92782c9aa8d42
22,968
def calc_f1(precision: float, recall: float) -> float: """ Compute F1 from precision and recall. """ return 2 * (precision * recall) / (precision + recall)
4a20816b0f5b2457826c146da52e352c39b88b16
22,969
def selectivity(weights, thresh=0.1): """Computes normalized selectivity of a set of `weights`. This is something like "peakiness" of the distribution. Currently, this is computed by looking at how many weights are above the given `thresh`. The result is normalized by the length of `weights`. """ return len([w for w in weights if w > thresh])/float(len(weights))
b526070770d42f5923c410f6cc4087f858d7c265
22,970
import argparse def parse_args(input_args=None): """ Parse command-line arguments. Parameters ---------- input_args : list, optional Input arguments. If not provided, defaults to sys.argv[1:]. """ parser = argparse.ArgumentParser() parser.add_argument('input', help='Input filename.') parser.add_argument('-r', '--receptor', action='store_true', help='Treat input as a receptor.') parser.add_argument('-s', '--split', action='store_true', help='Split output files into one file per molecule.') return parser.parse_args(input_args)
1a21ac03affa8f5901490abfe465042a61e31631
22,971
def _format_size(size): """Copied from tracemalloc.py""" for unit in ('B', 'KiB', 'MiB', 'GiB', 'TiB'): if abs(size) < 100 and unit != 'B': # 3 digits (xx.x UNIT) return '%.1f %s' % (size, unit) if abs(size) < 10 * 1024 or unit == 'TiB': # 4 or 5 digits (xxxx UNIT) return '%.0f %s' % (size, unit) size /= 1024
fce40546aa6576c4f91f32b85f3aa260e0e6129d
22,973
def subone(bv): """ Subtract one bit from a bit vector >>> print subone(BitVector(bitstring='1111')) 1110 >>> print subone(BitVector(bitstring='0010')) 0001 >>> print subone(BitVector(bitstring='0000')) 1111 @param bv: Bits to add one bit to the right side @type bv: BitVector @rtype: BitVector """ new = bv r = range(1,len(bv)+1) for i in r: index = len(bv)-i if 1==bv[index]: new[index]=0 break new[index]=1 return new
6f30489efb76dd27f8e7ca09003a97278d497c90
22,974
import requests import json def set_record( airtable_key: str, base_id: str, data: dict, table_name: str = "submissions" ): """ Add event to Airtable with AIRTABLE_ID specified in .env file See https://airtable.com/api for more details about Airtable APIs Note that we have to edit Airtable to have the following fields fullname, date, speaker, institution, id (submission_id) >>> data = { "records": [ { "fields": { "title": "How we make neuromatch conference", "starttime": "2020-10-01T00:00:00Z", "endtime": "2020-10-01T00:00:15Z", "fullname": "Titipat A.", "institution": "University of Pennsylvania", "talk_format": "Short talk", "id": "[put ID of the talk here]" } } ] } >>> output = add_data_airtable(data) >>> print(output) """ headers = { "Authorization": f"Bearer {airtable_key}", "Content-Type": "application/json", } post_url = f"https://api.airtable.com/v0/{base_id}/{table_name}" output = requests.post(post_url, data=json.dumps(data), headers=headers) return output.json()
013037338eb94a02563c4e6eff398985cf1082cc
22,975
def ceiling_root(num, pwr): """ Returns the integer ``num ** (1. / pwr)`` if num is a perfect square/cube/etc, or the integer ceiling of this value, if it's not. """ res = num ** (1. / pwr) int_res = int(round(res)) if int_res ** pwr == num: return int_res else: return int(res + 1)
3f36275e834ae32ef1bcae0cecc7b733d2e54a69
22,977
def _parse_fingerprint_terraform(line, host=None): """Parse SSH host fingerprint from terraform output line""" fingerprint = None if line.find('(remote-exec)') > 0: host = line.split(' ')[0].split('.')[-1] fingerprint = line.split(': ', 2)[1] return host, fingerprint
ef57e8c0a505af88e583eb913c3049448bc5077e
22,979
def convertToObjs(headers, entries): """ """ return [dict(zip(headers, entry)) for entry in entries]
3ace4e0054c9a19aaedf5b7dd6cb36163b9b100c
22,980
def _get_grounded_string(name, args): """ We use the lisp notation (e.g. "(unstack c e)"). """ args_string = ' ' + ' '.join(args) if args else '' return '(%s%s)' % (name, args_string)
391e0aa5eea12d4085efa570262d5b406d493cc0
22,981
def split_host_port(host_port): """Return a tuple containing (host, port) of a string possibly containing both. If there is no port in host_port, the port will be None. Supports the following: - hostnames - ipv4 addresses - ipv6 addresses with or without ports. There is no validation of either the host or port. """ colon_count = host_port.count(':') if colon_count == 0: # hostname or ipv4 address without port return host_port, None elif colon_count == 1: # hostname or ipv4 address with port return host_port.split(':', 1) elif colon_count >= 2: # ipv6 address, must be bracketed if it has a port at the end, i.e. [ADDR]:PORT if ']:' in host_port: host, port = host_port.split(']:', 1) if host[0] == '[': # for valid addresses, should always be true host = host[1:] return host, port else: # no port; may still be bracketed host = host_port if host[0] == '[': host = host[1:] if host[-1] == ']': host = host[:-1] return host, None
89fd98aee3a07406c478eca82922bdecf5cb7078
22,982
def import_class(cl: str): """Import a class by name""" d = cl.rfind(".") classname = cl[d+1:len(cl)] m = __import__(cl[0:d], globals(), locals(), [classname]) return getattr(m, classname)
4a0929dee0c44ad1d30d91373db813d605c5fbb4
22,983
import os def is_root(): """Gets whether the current user is root""" if os.name == 'nt': return winapi.is_user_admin() else: return os.geteuid() == 0
0393a56bf0a87f1cbcc6bd532c6a8afa8ce5e3e7
22,984
import json from typing import List def load_encoding(path: str) -> List[str]: """ Load character table from OCR engine configuration :param path: Path to OCR engine config file. :return: array containing characters from encoding """ with open(path, "r") as f: engine = json.load(f) return engine['characters']
03d4836f1b4d792f3e7dae17e658df908feb24e3
22,986
def flatten_gear(gear_array): """the shape of the request data from the project wizard that corresponds to the gears and process types are too deeply nested and need to be flattened to match the other data elements. This function takes the deeply nested dict and flattens each one into a dictionary with just two elemnts: gear and proccess type. """ gears = [] for item in gear_array: for ptype in item.get("process_types"): tmp = { "gear": item["gear"], "process_type": ptype["process_type"], } gears.append(tmp) return gears
88d851076a2bb5561b124016106ba143edeca7a8
22,987
import subprocess def vocal_eq(infile, outfile): """ Tries to EQ a song so that vocals are more prominent. Doesn't work very well. """ subprocess.call([ 'sox', infile, outfile, 'bass', '-32', '200', 'vad', 'norm' ]) return outfile
5fb34b72ae3ee3141781d59420b0656f83ef92cf
22,988
def _get_mode(steps, mode): """ Gets the correct mode step list by rotating the list """ mode = mode - 1 res = steps[mode:] + steps[:mode] return res
920f416e0420ced6a12033c0cefb6ba1ccdbf986
22,989
def get_standard_action(params): """ Return the action that needs to be executed. Based on the module parameters specified a given action needs to be executed. The process to determine this action can be quite verbose. In order to facilitate the reading of the modules code, we externalize this decision process. """ non_determistic_params = ['dci_login', 'dci_password', 'dci_cs_url', 'dci_client_id', 'dci_api_secret', 'embed', 'mime', 'state', 'where', 'active'] deterministic_params = {k: v for k, v in params.items() if k not in non_determistic_params} non_empty_values = [item for item in deterministic_params if deterministic_params[item] is not None] if 'state' in params and params['state'] == 'absent': return 'delete' elif 'status' in non_empty_values: return 'status' elif not non_empty_values: return 'list' elif non_empty_values == ['id']: return 'get' elif 'id' in non_empty_values: return 'update' return 'create'
f8d972ee9305d030bdc52cc8bb6f9210e8dac595
22,990
import subprocess def cmd_exists(cmd): """ Determine if the command defined by cmd can be executed in a shell. Returns a tuple (rc, msg), where rc==0 indicates that the command can be executed, and otherwise rc is the command (or shell) return code and msg is an error message. """ proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out, _ = proc.communicate() rc = proc.returncode if rc == 0: msg = None else: msg = out.strip() return rc, msg
2acdc289a72d512c3a1a3cf6655ac6a5643a28eb
22,991
def countKey(theDict, name): """ Return the number of times the given par exists in this dict-tree, since the same key name may be used in different sections/sub-sections. """ retval = 0 for key in theDict: val = theDict[key] if isinstance(val, dict): retval += countKey(val, name) # recurse else: if key == name: retval += 1 # can't break, even tho we found a hit, other items on # this level will not be named "name", but child dicts # may have further counts return retval
6542b6ebe2e9cf22c5204c7b4819a1958a029a44
22,993
def clear_data(path, start="M107", end='M82', cut=None): """ :param path: Pfad zur G-Code-Datei :param start: Teilstring, der den Beginn der Daten markiert (wird übersprungen) :param end: Teilstring, der das Ende der Daten markiert :param cut: String, der aus den Daten gelöscht werden soll :return: Stringliste, die nur G-Code Anweisungen enthält """ # öffne die Datei zeilenweise with open(path) as file: # entferne die Zeilenumbrüche raw_lines = [f.strip('\n') for f in file.readlines()] # finde den Startpunkt des G-Codes start_idx = [raw_lines.index(i) for i in raw_lines if start in i][0] # finde das Ende des G-Codes end_idx = [raw_lines.index(i, start_idx) for i in raw_lines if end in i][0] # trimme die Daten cut_lines = raw_lines[start_idx+1: end_idx] skirts = [i for i, x in enumerate(cut_lines) if x == ';TYPE:SKIRT'] outer_walls = [i for i, x in enumerate(cut_lines) if x == ';TYPE:WALL-OUTER'] if skirts: # falls es mehrere Skirts gibt, müsste man die Routine hier anpassen del cut_lines[skirts[0]:(outer_walls[0]-1)] # lösche die Typenbezeichnungen if cut is None: uncommented_lines = cut_lines else: uncommented_lines = [i for i in cut_lines if all(c not in i for c in cut)] cleared_lines = [l for l in uncommented_lines if l != ''] return cleared_lines
8c1b8e1e9583a0619f12cae8c988f07ba7156f56
22,994
def _format_lazy(format_string, *args, **kwargs): """ Apply str.format() on 'format_string' where format_string, args, and/or kwargs might be lazy. """ return format_string.format(*args, **kwargs)
2ae51537ee38af02bcbd1c952d92a702192d5866
22,995
def isiterable(obj): """ Return whether an object is iterable """ # see https://docs.python.org/3/library/collections.abc.html#collections.abc.Iterable try: iter(obj) return True except: return False
6c6cc1af2eccaf8e10809da964271471720abdf4
22,997
def kel2c(tempK): """Convert a temperature in Kelvin to degrees Celsius.""" return tempK - 273.15
91604c71fc5d7aaceea1f435ae1e5781d0dce169
22,998
from bs4 import BeautifulSoup def convert_to_beautifulsoup(data): """ 用于将传过来的data数据包装成BeautifulSoup对象 :param data: 对应网页的html内容数据 :return: 对应data的BeautifulSoup对象 """ bs = BeautifulSoup(data, "html.parser") return bs
9e44b97e6ecff48e7365f450a8ce1005dd0dc42b
22,999
import subprocess def run_command(cmd: list[str]) -> subprocess.CompletedProcess: """Run command and capture output with string encoding Args: cmd (list[str]): list describing command to be run Returns: subprocess.CompletedProcess: return value from run """ return subprocess.run(cmd, encoding='utf-8', capture_output=True)
fc41f251d14d92205a516549675c0ab23c92e0b3
23,000
def aslist(value): """ Return a list of strings, separating the input based on newlines. """ return list(filter(None, [x.strip() for x in value.splitlines()]))
249419c02d92d22d4c7cf0a7788ed09dc3bd19ce
23,002
import random def uusi_pakka(pakkojen_lkm=1): """Palauttaa korttipakan satunnaisesss järjestyksessä. Yksittäinen kortti on esimerkiksi (12, '♥') Parametrit ---------- pakkojen_lkm : int Kuinka monta 52 kortin pakkaa sekoittaa yhteen. Oletus 1. Palauttaa --------- array of (int, str) """ maat = ['♠', '♥', '♦', '♣'] arvot = list(range(1, 14)) pakka = pakkojen_lkm * [(arvo, maa) for maa in maat for arvo in arvot] # Kortit ovat järjestyksessä random.shuffle(pakka) # Sekoita pakka return pakka
4be7987e0e4fe156bfa66305b1a607ce3328e6bd
23,004
def opening_exceptions_cleaned(): """Prepare data that should match with the response.""" opening_exceptions = [ { "title": "Future holidays", "is_open": False, "start_date": "2100-02-11", "end_date": "2100-02-12", }, { "title": "Future holidays", "is_open": False, "start_date": "2100-03-01", "end_date": "2100-03-06", } ] return opening_exceptions
0369f160a335507c4349c6e859a27225ce2895b5
23,005
def get_apigateway_profile_groups_from_header(groups_header: str) -> list: """ Extracts apigateway consumer groups from header :param groups_header: :return: """ if groups_header not in (None, ''): return list(map(str.strip, groups_header.split(','))) return []
bc3f1b49892df78574d471e58b2c8ffb47b2584f
23,006
import os def path_coutwildrnp_shp(data_dir): """Path to ```coutwildrnp.shp``""" return os.path.join(data_dir, 'coutwildrnp.shp')
7463c17af58acb365801388015fa4d4329a31509
23,007
import os def verify_dir_has_perm(path, perm, levels=0): """ Verify that home directory has `perm` access for current user. If at least one of them fails to have it the result will be False. :param path: Path to test :param perm: Access rights. Possible values are os' R_OK, W_OK and X_OK or the result of a bitwise "|" operator applied a combination of them. :param levels: Depth levels to test """ if not os.path.exists(path): raise RuntimeError('%s does NOT exist!' % path) path = os.path.normpath(path) pdepth = len(path.split(os.path.sep)) pathaccess = os.access(path, perm) # 0th level if not levels or not pathaccess: return pathaccess # From 1st to `levels`th for root, dirs, files in os.walk(path): currentlevel = len(root.split(os.path.sep)) - pdepth if currentlevel > levels: break elif ".git" in dirs: dirs.remove(".git") for file_path in (os.path.join(root, f) for f in dirs + files): if os.path.exists(file_path): if not os.access(file_path, perm): #print('No permissions for "%s".' % file_path) return False return True
cb1d5c901d7c58ad04c4270d6043d09fe42a6cef
23,008
def format_dict(d, dict_define): """根据dict格式定义进行value转换""" for k in dict_define.keys(): # 原值 v = d.get(k, '') # 目标转换格式 v_format = dict_define.get(k, None) if v_format is None: continue if 'C' in v_format: str_len = int(v_format.replace('C', '')) new_v = '{}{}'.format(' ' * (str_len - len(v)), v) d.update({k: new_v}) continue elif "N" in v_format: v_format = v_format.replace('N', '') if '.' in v_format: int_len, float_len = v_format.split('.') int_len = int(int_len) float_len = int(float_len) str_v = str(v) new_v = '{}{}'.format(' ' * (int_len - len(str_v)), str_v) else: int_len = int(v_format) str_v = str(v) new_v = '{}{}'.format(' ' * (int_len - len(str_v)), str_v) d.update({k: new_v}) return d
3b302a55662314288487edd549c6006e08585912
23,009
import json def _bad_401_return_code_hack(response): """Check if 401 response includes valid json response. Under certain circumstanses the SCM may incorrectly return a 401. """ if response.status_code != 401: return False if response.headers.get("Content-Type", "").startswith("application/json"): try: decoded_json = response.json() except json.JSONDecodeError: return False if isinstance(decoded_json, dict) and "error" in decoded_json: return False return True
5da54b8e4947ac0b71009d5050a5fc9e11358aba
23,010
import os def resolve_full_path(relative_file_path): """ find a relative file and return it's json object based on the content """ full_file_path = os.path.join (os.path.dirname(__file__), relative_file_path) return full_file_path
bc58f365a9429b7fa65ba70c516489ba443d2653
23,012
def copy_dict(source_dict, diffs): """Returns a copy of source_dict, updated with the new key-value pairs in diffs.""" result = dict(source_dict) result.update(diffs) return result
971ea9e79d5a3b279d69d578464d767988891494
23,013
def rejection_resample(class_func, target_dist, initial_dist=None, seed=None): """A transformation that resamples a dataset to achieve a target distribution. **NOTE** Resampling is performed via rejection sampling; some fraction of the input values will be dropped. Args: class_func: A function mapping an element of the input dataset to a scalar `tf.int32` tensor. Values should be in `[0, num_classes)`. target_dist: A floating point type tensor, shaped `[num_classes]`. initial_dist: (Optional.) A floating point type tensor, shaped `[num_classes]`. If not provided, the true class distribution is estimated live in a streaming fashion. seed: (Optional.) Python integer seed for the resampler. Returns: A `Dataset` transformation function, which can be passed to `tf.data.Dataset.apply`. """ def _apply_fn(dataset): """Function from `Dataset` to `Dataset` that applies the transformation.""" return dataset.rejection_resample( class_func=class_func, target_dist=target_dist, initial_dist=initial_dist, seed=seed) return _apply_fn
392eac7e4aecdc50a9155b4b1d3a0a96ff6b5866
23,015
def get_dict_from_df( df, key_col: str = 'Key', val_col: str = 'Value' ) -> dict: """for a dataframe with keys and values, return a dictionary""" dict_mapping = {} for i in range(len(df)): dict_mapping[ df.loc[i, key_col] ] = df.loc[i, val_col] return dict_mapping
d3c83fc444d264bcbe7f036db820f0b278c9e7b2
23,017
import math def get_normalized_pv_value(t): """ Expects a value between 0 and 1 (0 -> 00:00:00 || 1 -> 24:00:00) and returns the normalized simulated photovoltaic output at that time, according to the graph on the challenge paper. The formula can be examinated on https://www.desmos.com/calculator The formula used is this one: \max\left(\max\left(-\frac{\left(x-7\right)^{2}}{10}+1,\ -\frac{\left(x-6.3\right)^{2}}{100}+.2\right),\ 0\right) It tries to mimic the Graph as normalized values. f(0) = photovoltaic output at 00:00:00 f(PI*4) = photovoltaic output at 24:00:00 """ x = t * math.pi * 4 normalized_photovoltaic_value = max( -(math.pow(x - 7 , 2) / 10 ) + 1, -(math.pow(x - 6.3, 2) / 100) + 2 ) normalized_photovoltaic_value = max( normalized_photovoltaic_value, 0 ) return normalized_photovoltaic_value
c5b619719fbe5c0ab16d5c1e8d66464c2609d619
23,018
def extend_rsltdict(D, key, val, Extend=False): """Helper for form_delta() --- Given a result dictionary D, extend it, depending on the setting of Boolean Extend. For some dictionaries, duplicates mean "extend"; for others, that means error. This is a crucial helper used by "form_delta". """ if Extend: if key in D: D[key] = D[key] | set({ val }) else: D[key] = set({ val }) else: assert(key not in D ),("Error: Duplicate map at key " + str(key) + "; duplicates: " + str(D[key]) + " and " + str(val) ) D[key] = val # don't make a set return D
44eeac575351b24db25c1c117abf545d189fe25d
23,019
import setuptools def find_packages_under(path): """ Recursive list all of the packages under a specific package.""" all_packages = setuptools.find_packages() packages = [] for package in all_packages: package_split = package.split(".") if package_split[0] == path: packages.append(package) return packages
5a6f1bfe757559c75341cc15d1a7f6dd44019163
23,020
def get_marker(request, name): """ Needs to keep compatible between different pytest versions Parameters ---------- request: _pytest.fixtures.SubRequest name: str Returns ------- Optional[_pytest.mark.structures.MarkInfo | _pytest.mark.structures.Mark] """ try: marker = request.node.get_marker(name) except AttributeError: marker = request.node.get_closest_marker(name) return marker
8f93a90d886911ac4c66889988b8e1a07e86f931
23,021
import math def fnCalculate_MinTargetHeight(target_range,Re,H): """ Calculate the minimum target height that is visible by the radar. Ref: Wikipedia Objects below this height are in the radar shadow. """ min_target_height = (target_range - math.sqrt(2.*H*Re))**2/(2.*Re); return min_target_height
3c3b247b979f25af9cdfb6e0fdd2079f7744fe7a
23,022
def mostDerivedLast(classList): """pass in list of classes. sorts list in-place, with derived classes appearing after their bases""" class ClassSortKey(object): __slots__ = 'classobj', def __init__(self, classobj): self.classobj = classobj def __lt__(self, other): return issubclass(other.classobj, self.classobj) classList.sort(key=ClassSortKey)
da4983fa2c6cf6d5b7d5cd1fde3eb4559569f10e
23,025
def peek_indentation(stream): """ Counts but doesn't actually read indentation level on new line, returning the count or None if line is blank """ indentation = 0 while True: ch = stream.text[stream.ptr + indentation] if ch == "\n": return None if not ch.isspace(): return indentation indentation += 1
7f123ab76491eab9bcdfb289e1f88575cd79f400
23,026
def __veja(soup): """ Gets the most read news from the Veja page :param soup: the BeautifulSoup object :return: a list with the most read news from the Veja Page """ news = [] headers = soup.find('section', class_='block most-read dark').find_all('h2') for h2 in headers: news.append(dict(title=h2.next.next.next.string, link=h2.parent['href'])) return news
224f2b92711bff9baab8a40f89a2f2f0e9b83880
23,027
def get_gene_list(dataframe): """Helper fn, function to get gene list from the dataframe. Function is extended to return the dictionary of genes and their variant type. """ if "type" in dataframe.columns: new_df = dataframe[["HGNC_ID", "type", "var_type"]] else: new_df = dataframe[["HGNC_ID", "var_type"]] new_df = new_df.drop_duplicates() gene_list = new_df["HGNC_ID"].tolist() gene_dict = new_df.to_dict('records') gene_tuple = [tuple(r) for r in new_df.to_numpy()] return gene_list, gene_dict, gene_tuple
8a6be23c0a97fe840aeddfe7c54303774c637588
23,028
def green(frame): """ gets the green channel of the frame :param frame: the frame :return: the green channel only (as a grayscale frame) """ return frame[:, :, 1]
86a26fc3462c69ff2fbec6a3df50527c1c14a9b9
23,029
def checkTupleAlmostEqualIn(tup, tupList, place): """ check if a tuple in a list of tuples in which float items only need to be almost equal :type tup: tuple :param tup: tuple to be checked :type tupList: list of tuples :para tupList: list of tuples that tup need to be check with :place: decimal places to round the values to compare """ for T in tupList: length = len(tup) if length != len(T): continue for i in range(length): if type(tup[i]) is float: if round(tup[i], place) != round(T[i], place): break else: if tup[i] != T[i]: break if i == length - 1: return True return False
49dddd9c388243f201ffc003f7d2d275734a46b2
23,030
from json import dumps def _app_post_json(self, url, params, **kwargs): """ To be injected into TestApp if it doesn't have an post_json method available """ params = dumps(params) kwargs['content_type'] = 'application/json' return self.post(url, params=params, **kwargs)
090c9c93496f20444ef1c155feea896bd914bf43
23,031
def filter_flash_errors(glm_data, LL_coords, UR_coords): """ There appears to be dense lines of erroneous flashes around 26 N on 10 Sep & 11 Sep from 1500-2100z. This function will remove these, but is unable to distinguish if a flash is genuine or erroneous. Parameters ---------- glm_data : list of str List of GLM flash latitudes & longitudes LL_coords : tuple of str Lower lefthand coordinates of the bounding box contaning the area of false flashes UR_coords : tuple of str Upper righthand coordinates of the bounding box contaning the area of false flashes Returns ------- filtered_flashes : tuple of lists Filtered GLM flash coordinates. Format: (flash_lons, flash_lats) """ filtered_lons = [] filtered_lats = [] lons = glm_data[0] lats = glm_data[1] min_lat = LL_coords[1] max_lat = UR_coords[1] min_lon = LL_coords[0] max_lon = UR_coords[0] for idx, lon in enumerate(lons): lat = lats[idx] if ((lat < min_lat or lat > max_lat) or (lon < min_lon or lon > max_lon)): filtered_lons.append(lon) filtered_lats.append(lat) return (filtered_lons, filtered_lats)
5ff8ca4dbb82b633e36105527ac7bf18db7b5c94
23,032
def extract_subtypes(mappings, data_type): """Extract subtypes of given data types. e.g: for data type "alert", possible subtypes are "dlp", "policy" etc. :param mapping_file: Path to JSON mapping file :param data_type: data type for which subtypes are to be fetched :return: extracted sub types """ taxonomy = mappings["taxonomy"].get(data_type, {}) return [subtype for subtype in taxonomy]
82db0ad6c9ac679be806678eaf1b5b21cc9d95c4
23,034
def dis_to_feature(x, feature_dict): """ Args: x: 元素 feature_dict: 位置字典 Return: 字符串 """ output_list = [0] * len(feature_dict) if x not in feature_dict: return ",".join([str(ele) for ele in output_list]) else: index = feature_dict[x] output_list[index] = 1 return ",".join([str(ele) for ele in output_list])
05c898657dababbce7c08d5658da80fa1637175f
23,036
def read_label(_label_file): """ label文件中的内容 :param _label_file: label文件的路径 :return: """ f = open(_label_file, "r") lines = f.read() f.close() return lines
d860625dd9b81c992c3db66550d069564e3346b0
23,037
def wraa(data, lg_woba, scale): """Weighted Runs Above Average (wRAA) wRAA = ((wOBA - league wOBA) / wOBA scale) * PA :param :returns: """ return ((data["woba"] - lg_woba) / scale) * data["pa"]
81568d3bd3ed3b3593e78192a0c3256a23b1efac
23,038
def allannotations(cls: type): """ Get annotations dict for all the MRO of class `cls' in right («super-to-sub») order. """ return {k: v for i in cls.mro()[::-1] if hasattr(i, '__annotations__') for k, v in i.__annotations__.items()}
ecdebdab4dbe836c3d8114778af0e94dec9b8850
23,040