content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def parse_relative_day_value(relative_day: str) -> int: """ Parses a relative day value such as "昨日" :param relative_day: The day to parse :return: An integer representing the relative value of the day, for example -1 """ if relative_day == "前月": return -1 if relative_day == "今月": return 0 if relative_day == "来月": return 1 raise ValueError(f"Could not parse the input as a relative month: {relative_day}")
845ee36e996af38fc0066f59ebcedea68c3d0cc6
50,290
import torch def cos_sim(x, y, epsilon=0.01): """ Calculates the cosine similarity between the last dimension of two tensors. """ numerator = torch.matmul(x, y.transpose(-1,-2)) xnorm = torch.norm(x, dim=-1).unsqueeze(-1) ynorm = torch.norm(y, dim=-1).unsqueeze(-1) denominator = torch.matmul(xnorm, ynorm.transpose(-1,-2)) + epsilon dists = torch.div(numerator, denominator) return dists
84fa92595110680350e0fe5eb7c3ba230e7a0ec1
50,294
def get_url_by_format(config): """Get URL depending on the format.""" # types: Config -> string if config.format == 'gff3': return config.sviewer_url return config.entrez_url
8dcf6a00b56a8a83773c63d976273f12f0f70bf8
50,295
def get_count(self): """ Return count value with a default of 1 """ return self.get("count", 1)
643064b29fff0b65a39f2eefb4f35d7468db09ae
50,298
from typing import List def suffixes(word) -> List[str]: """All non-empty proper suffixes of word, longest first.""" return [word[i:] for i in range(1, len(word))]
96f2ef102f41f1a058d4195e6840e44a1b01c5c8
50,299
def _compute_preferred_numer_of_labels( available_space: int, vertical_direction: bool ) -> int: """ Compute an estimate for the preferred number of labels. """ # For horizontal direction (x axis) preferred_number_of_labels = int(available_space / 15) if vertical_direction: # for y axis preferred_number_of_labels = int(available_space / 5) return max(2, min(20, preferred_number_of_labels))
4209498eda4fe8b35535ec05ad6d368ea6dba736
50,302
from typing import Dict from typing import List def make_data_lists(img_pth_to_cap: Dict[str, str], image_paths: List[str]): """Make lists of data paths and respective captions Args: img_pth_to_cap: Dictionary of image paths to captions image_paths: List of image paths Returns: img_list: List of image paths cap: List of captions """ cap, img_list = [], [] for im_pth in image_paths: caption_list = img_pth_to_cap[im_pth] cap.extend(caption_list) img_list.extend([im_pth] * len(caption_list)) return img_list, cap
dafee60e5a6ebcab9046cfd7b90f01a9eda08d02
50,303
def get_log(user_hash: str) -> dict: """ return the log for current user :param user_hash: hash identifying a user :type user_hash: str :return: dict with all the info. see protocol :rtype: dict """ if len(user_hash) != 64: return { 'success': False, 'error_msg': 'Invalid Hash', 'log': '' } with open("server.log", 'rt') as f: lns = [] for ln in f: if user_hash in ln and 'CALLED' in ln: lns.append(ln.replace('INFO:root:'+user_hash+' - ', '')) return { 'success': True, 'error_msg': '', 'log': "".join(lns) }
800d9f14cce0dfbb53a3d1c8d15113eb9a8f9996
50,304
def format_table(table, titles=True): """ Returns a multilined string representing the given table (2d list) as a table, with equal-width columns. :param titles: if true, the first row in the table is taken as headers for the table, adding a separator on the second line """ fmt_simple = "{:{width}}" fmt_string = "{!s:{width}}" # for type that don't accept a ':width' specifier when formatted (NoneType, dict, ...) def safe_format(cell, width=1): if isinstance(cell, bool): # because if we force it to str it becomes an int (True -> 1, False -> 0) return fmt_string.format(cell, width=width) try: return fmt_simple.format(cell, width=width) except TypeError: return fmt_string.format(cell, width=width) widths = [max(map(len, map(safe_format, column))) for column in zip(*table)] txt = '' for i, row in enumerate(table): if titles and i == 1: txt += '-'.join('-' * width for width in widths) + '\n' txt += '|'.join(safe_format(cell, width=width) for cell, width in zip(row, widths)) txt += '\n' return txt
172892020d8753516b54d204e94e78800a6a8249
50,306
def response_to_json(response): """Return API Response as json Args: response (requests.response): response from API request Returns: dict: Forbes List data as json """ return response.json()
8770411e27604d95ec8bc18ee09d918157509e7f
50,308
def display(*args, listreturn=1): """ Displays menu items [0] from list/tuple pairs/group to console, gets user selection and returns corresponding function/item (defaults to list item index[1] from pair/group. Args: *args (list/tuple): *Expanded list of list/tuple pairs/groups with info to display to console, and item or function to return if chosen. ex: ('display name', function_to_call) listreturn (int, optional): List index of which item to be returned. defaults to 1. Returns: item/function: Item/Function [1] from corresponding selection. (list/tuple index [1] by default) """ # Number (enumerate) and display the options [0] from args pair # starting at 1. for i, arg in enumerate(args, 1): print(f' [{i}]: {arg[0]}') # Ask for user input and return the corresponding item/function # (defaults to index[1]) # only if the selection can be found in list. while True: sel = input('\n Selection: ') if sel.isdigit() and int(sel) <= len(args) and int(sel): return args[int(sel)-1][listreturn] else: print('\n Please choose from available selections.')
af99517f12b9d7f5afe8aedbc5ee4e3e84a7885f
50,310
def parse_subdir_file(path_to_subdir_file): """Parses a .subdir.txt file by splitting its contents into a list by line @retval: list with each element containing a line from the .subdir.txt file. """ files = [] with open(path_to_subdir_file, 'r') as f: while True: line = f.readline().strip() if not line: break files.append(line) return files
59004a6009ba52ba539bf4a1ed1bd6330717b108
50,312
import json import pprint def pformat(value): """ Format given object: Try JSON fist and fallback to pformat() (JSON dumps are nicer than pprint.pformat() ;) """ try: value = json.dumps(value, indent=4, sort_keys=True, ensure_ascii=False) except TypeError: # Fallback if values are not serializable with JSON: value = pprint.pformat(value, width=120) return value
8c63bd0bb2f31dcd35ce9d582c26a6b1b808249f
50,313
def sort(x): """Return the sample x in increasing order.""" return sorted(x)
1338520706d75fe649f13c4724a9b4e6a77c266d
50,322
def convert_seconds(seconds: float) -> str: """ Convert time in seconds to days:hours:minutes:seconds.milliseconds with leading 0s removed. Parameters ---------- seconds : float Number of seconds to be converted. Returns ------- str Converted time. """ mS = int((seconds) * 1000) D, mS = divmod(mS, 86400000) H, mS = divmod(mS, 3600000) M, mS = divmod(mS, 60000) S, mS = divmod(mS, 1000) H = str(H).zfill(2) M = str(M).zfill(2) S = str(S).zfill(2) mS = str(mS).zfill(3) time = f'{D}:{H}:{M}:{S}.{mS}'.lstrip('0:') if time.startswith('.'): time = '0' + time return time
bb467f4e13bdf31db1e4d283b5e47b2ded45d48a
50,325
def is_question_answer_yes(question: str) -> bool: """ Prompt a yes/no question to the user. Parameters ---------- question : str The question to print on stdout. Returns ------- bool True if the user answers "yes", False otherwise. """ answer = input(question + " [y/N] ") answer = answer.lower() if answer == "y" or answer == "yes": return True return False
3b8564d0d984a315fcf46acc935db1759f148af5
50,328
def _G(x,y): """Helper function. returns True when the timestamps of x and y are within 5 seconds.""" return abs((x.timestamp - y.timestamp).total_seconds()) <= 5
8fc6bc08b8fd70438031878d6906e6e487166af9
50,331
def toStringDuration (duration): """Returns a description of the given duration in the most appropriate units (e.g. seconds, ms, us, or ns). """ table = ( ('%dms' , 1e-3, 1e3), (u'%d\u03BCs', 1e-6, 1e6), ('%dns' , 1e-9, 1e9) ) if duration > 1: return '%fs' % duration for format, threshold, factor in table: if duration > threshold: return format % int(duration * factor) return '%fs' % duration
2b000767563df5addaa4c2d7f98f44841d81130a
50,332
def vararg_callback(option, opt_str, value, parser): """Callback for an option with variable arguments. Manually collect arguments right of a callback-action option (ie. with action="callback"), and add the resulting list to the destination var. Usage: parser.add_option("-c", "--callback", dest="vararg_attr", action="callback", callback=vararg_callback) Details: http://docs.python.org/2/library/optparse.html#callback-example-6-variable -arguments """ value = [value] def floatable(str): try: float(str) return True except ValueError: return False for arg in parser.rargs: # stop on --foo like options if arg[:2] == "--" and len(arg) > 2: break # stop on -a, but not on -3 or -3.0 if arg[:1] == "-" and len(arg) > 1 and not floatable(arg): break value.append(arg) del parser.rargs[:len(value)-1] setattr(parser.values, option.dest, value)
0d25d97e4702a83b46a20842e9dea1100de575da
50,336
def mrmmult(temp, covmat): """Matrix multiplication (MRM' or m'Rm).""" return temp @ covmat @ temp.T
65d7da0f4303a8414c884ed172d1123ca9033f34
50,339
import math def calibrated_fps(calibrate): """Calibration of the dynamic frames per second engine. I've started with the equation y = log10(x + m) * k + n, where: y is the desired fps, m and n are horizontal and vertical translation, k is a calibration factor, computed from some user input c (see readme for details). Considering minfps and maxfps as given constants, I came to: fps = log10(x + 1) * k + minfps, which must be equal to maxfps for x = c, so the factor k = (maxfps - minfps) / log10(c + 1), and fps = log10(x + 1) * (maxfps - minfps) / log10(c + 1) + minfps Neat! ;) Args: calibrate (float): user provided Returns: a callable to calculate the fps """ min_fps, max_fps = 2., 60. calibrate = max(1e-6, calibrate) adjust_log_curve = 100. / min(calibrate, 100.) # adjust the curve for small numbers factor = (max_fps - min_fps) / math.log10((calibrate * adjust_log_curve) + 1.) def fps(rate): if rate <= 0: return 10. # bootstrap speed if rate < calibrate: return math.log10((rate * adjust_log_curve) + 1.) * factor + min_fps return max_fps return fps
8f51d14bc3b58a20e3a2e6775233569f65c0f511
50,344
def mut_pair_num(table): """ A function that calculates the number of pairs of codons one mutation away from each other. Treats mutations with directionality. In general, the number of mutational pairs is equal to the number of codons in a table multiplied by the number of unique codons within one mutation. Let a = alphabet length (generally 4), L = codon length (generally 3) n = (a^L) * L(a-1) Parameters ---------- dict table: the codon table to analyze Returns ------- int mut_num: the number of distinct mutational pairs. """ # get list of all codons in table codon_list = list(table) # get alphabet size alphabet = set() for codon in codon_list: for nt in codon: alphabet.add(nt) a = len(alphabet) # get codon length L = len(codon_list[0]) # calculate mut_num and return return (a ** L) * L * (a - 1)
ce44087d295ac2cf0860c364dbf18b4f2de500b1
50,346
def train_loop(model, optimizer, loss_fn, samples, labels, batch_size, seq_len, device='cpu', pre_trained=False): """ Standard pytorch training loop, using our helper loss function above. :param model: model to optimize :param optimizer: optimizer :param loss_fn: loss function :param samples: data in :param labels: labels out :param batch_size: batch size for sequences :param seq_len: sequence length :param device: device to put tensors on :param pre_trained: are we using pre-made embeddings or passing in indices? :return: model, loss, and accuracy """ loss_total = 0 acc_total = 0 total_samples = 0 # iterate through all samples, stepping by batch_size * sequence length and using # your loss function above to calculate loss. Then, zero gradients, backprop, step optimizer, and repeat # Also, store up the loss total, total number correct, and total number processed by the model so far # Return model, loss, and accuracy return model, loss_total, acc_total/total_samples
d97ec345f6e1bb1e1951a699bf171e5accca362e
50,347
import importlib import json def create_object(config): """ Creates an object from the specified configuration dictionary. Its format is: class: The fully qualified path name. args: A list of positional arguments (optional). kwargs: A dictionary of keyword arguments (optional). """ try: module_name, _, class_name = config['class'].rpartition('.') module = importlib.import_module(module_name) cls = getattr(module, class_name) return cls(*config.get('args', []), **config.get('kwargs', {})) except Exception as e: raise Exception( 'Could not create object\n{}'.format(json.dumps(config, indent=4)), e )
3f39a1f09a664602b4beeaf35590470dc96a1db2
50,353
def bollinger_band(df, base, upper_target, lower_target, period): """ Function to compute Bollinger Bands (BB) This is a lagging indicator df - the data frame base - on which the indicator has to be calculated eg Close upper_target - column name to store upper BB value lower_target - column name to store lower BB value period - period of the bb """ df['{}MA'.format(period)] = df[base].rolling(window=period).mean() df['{}STD'.format(period)] = df[base].rolling(window=period).std() df[upper_target] = df['{}MA'.format(period)] + (df['{}STD'.format(period)] * 2) df[lower_target] = df['{}MA'.format(period)] - (df['{}STD'.format(period)] * 2) df = df.drop(['{}MA'.format(period), '{}STD'.format(period)], axis=1) return df
e9daecd68e6a41178a554acefbc460184855bca6
50,356
def decode_topic_name(encoded: str) -> str: """ Reverses ``encode_topic_name``. :param encoded: the encoded SNS topic name :return: the decoded channel name """ decoded = encoded decoded = decoded.replace("_WCD_", "*") decoded = decoded.replace("_FWS_", "/") decoded = decoded.replace("_DOT_", ".") decoded = decoded.replace("_COL_", ":") decoded = decoded.replace("_LT_", "<") decoded = decoded.replace("_GT_", ">") return decoded
d8af5240645b1286bc119fdf162cb7645d439e0c
50,359
import json def download_options(dataset, node, entityids, api_key=None): """ The use of the download options request is to discover the different download options for each scene. Some download options may exist but still be unavailable due to disk usage and many other factors. If a download is unavailable it may need to be ordered. :param dataset: :param node: :param entityIds: :param api_key: API key is not required. """ payload = { "apiKey": api_key, "datasetName": dataset, "node": node, "entityIds": entityids } return json.dumps(payload)
2c82faab4f1a74dfa95bc3acc9049919c47be2c2
50,363
import shlex def shlex_split(s, comments=False, posix=True): """ Splits a string using shell lexer, but returns any incomplete string as the last component instead of erroring for unmatched quotations. """ lex = shlex.shlex(s, posix=posix) lex.whitespace_split = True if not comments: lex.commenters = '' result = [] while True: try: tok = lex.get_token() except ValueError as e: print(repr(e)) # Append the current token result.append(lex.token) break else: if tok == lex.eof: break result.append(tok) return result
8708c423af6ffa9b69aacec0e05676879f7104c1
50,367
def bib_sublist(bibfile_data, val_type): """ Sublist of bibfile_data whos elements are val_type This method examines each bib_dict element of a bibfile_data list and returns the subset which can be classified according to val_type. :param list bibfile_data: List containing `RefFile`s. :param type val_type: :rtype: list """ sublist = [bibfile for bibfile in bibfile_data if isinstance(bibfile.bib, val_type)] return sublist
1865e5af22b873b5b43a1b1cde7982e92aa77226
50,370
import base64 def convert_image(filename: str) -> str: """Converts image to string. Args: filename: The name of the image to convert. Returns: The image converted to serializable string representation. """ with open(filename, 'rb') as file: converted = base64.b64encode(file.read()).decode() return converted
28d0341a76ee2323683225606a8fc4b80205cb28
50,373
def column_to_list(data:list, prop:str): """ Agrupa os valores de uma coluna de dados para uma lista args: data_list (list): Uma lista de dados (list/dict) prop (str): Uma key ou index return (list): Uma lista dos valores mapeados como 'prop' em cada item da lista informada 'data' """ return [item.get(prop, None) for item in data]
18ddae43a15cee920d8f3789dc23fe019ef2b63f
50,374
def GetChunks(data, size=None): """ Get chunks of the data. """ if size == None: size = len(data) start = 0 end = size chunks = [] while start < len(data): chunks.append(data[start:end]) start = end end += size if end > len(data): end = len(data) return chunks
51bdfe5334292a700c660def7e3306b6fa528398
50,389
def remove_duplicate_words(text: str) -> str: """Remove duplicate words. It is a general-purpose function, which can remove duplicate words next to each other, and preserve only one of them. Args: text (str): Accepts only one element (i.e., scalar). Returns: A text variable of <class 'str'> after removing duplicate words. Examples: >>> input_text = 'Potter Potter I have a message for you' >>> remove_duplicate_words(input_text) 'Potter I have a message for you' """ result = [] for word in text.split(): if word not in result: result.append(word) return " ".join(result)
2f78193325266b47fd55b340989822c62fb6b6df
50,395
def get_common_elements(element_list): """ :param element_list: list of list where each internal list contains values :return: a sorted list of elements which are common in all the internal lists """ common_element_list = set(element_list[0]) index = 1 while index < len(element_list): common_element_list = common_element_list.intersection(element_list[index]) index += 1 return sorted(list(common_element_list))
fa3233bb2945949837fd70db4d75f5803100e3ee
50,396
def user_file(filename, username="master"): """Return json file for the user and given filename.""" assert username, "user_file: empty username." if username == 'master': return './library/' + filename + '.json' else: return './users/' + username + '/' + filename + '.json'
89ec038990eae7b285428ff9e8c7e70609cb9de3
50,397
import torch def compute_active_units(mu, delta): """Computes an estimate of the number of active units in the latent space. Args: mu(torch.FloatTensor): [n_samples, z_dim]. Batch of posterior means. delta(float): variance threshold. Latent dimensions with a variance above this threshold are active. Returns: int: the number of active dimensions. """ outer_expectation = torch.mean(mu, 0) ** 2 inner_expectation = torch.mean(mu ** 2, 0) return torch.sum(inner_expectation - outer_expectation > delta).item()
cdbd24ba9735f48f5c92b3c028106d7824a2e3cf
50,399
def decode_qwikcord(packet, channel=1): """Extract the qwikcord current measurements from val (CTavg, CTsum).""" val = str(packet.get('val', '')) if len(val) != 16: return None if channel == 1: return int(val[6:12], 16) # CTavg return int(val[12:], 16)
d0edf4244b5d62d892e5ce71c966145e82b5dc37
50,400
def has_afg_license(instr): """Returns True if the first license includes an AFG license""" return "AFG" in instr.query("LIC:ITEM? 0").strip().split('"')[3].split(",")
0b9b2d65b7f910d3a4e412f67c76c5333d4f7d7b
50,403
def resolve_set_to_value(value_set, default_value, error_message): """Resolve a set of values to a single value, falling back to a default value if needed. If it is unresolvable, produce an error message. """ if len(value_set) == 0: return default_value elif len(value_set) == 1: return list(value_set)[0] # should be single value raise ValueError(error_message)
f8d8cdf9dbbf73d7382fbf0fb37e217c975892f9
50,406
def pooled_prob(N_A, N_B, X_A, X_B): """Returns pooled probability for two samples""" return (X_A + X_B) / (N_A + N_B)
55beb8fc549fb0d71db16764738d7cdc9c570825
50,409
def compute_time(sign, FS): """Creates the signal correspondent time array. """ time = range(len(sign)) time = [float(x)/FS for x in time] return time
7d6bcc3a8f54d199a6bec9d46b0fe5bbdfeeb052
50,410
def __max_value_index(list): """ Find the idx of the max value in list list -- numeric list """ max_val = max(list) max_idx = list.index(max_val) return max_idx
f94cc5629711000c6dcffb059ffe0c9bbdab62cf
50,411
def validDate(date: str) -> bool: """Return whether a string follows the format of ####-##-##.""" if len(date) == 10: return date[0:4].isnumeric() and date[5:7].isnumeric() and date[8:10].isnumeric() and date[4] == "-" and date[7] == "-" return False
311eafdc794a97ff9b65c21b4ee79edd039c3027
50,414
import struct def enc_float(val): """Encode a single float""" return struct.pack("!f", val)
f4d6d3fff683c3b64dcebc97c48b4ab8e3815f91
50,417
def fizzbuzz(num): """Function returns fizzbuzz if divisible by 3 and 5, buzz if divisible by 5, fizz if divisible by 3, and returns num if none of those conditions met.""" arr = [] for i in range(1, num + 1): if i % 3 == 0 and i % 5 == 0: arr.append('FizzBuzz') elif i % 5 == 0: arr.append('Buzz') elif i % 3 == 0: arr.append('Fizz') else: arr.append(i) return(arr)
4759e0d2ffc95dc61c6c0129bba2acb88a775d1c
50,418
def _strip_list(list): """ strips all empty elements from a list, returns the stripped list :param list: the list to be stripped :return: the stripped list """ return [x for x in list if x]
e000a3335fbcab640981a825a698586e502f89b1
50,420
def join_fields(fields): """ Join a bunch of key/value pairs together. """ return ', '.join('%s="%s"' % pair for pair in fields.iteritems())
147a2add910046f48d403f404ec9333e4532ea56
50,421
def is_prime(n): """ Check if n is a prime number. Sample usage: >>> is_prime(0) False >>> is_prime(1) True >>> is_prime(2) True >>> is_prime(3) True >>> is_prime(4) False >>> is_prime(5) True """ if n <= 0: # This is only for numbers > 0. return False for x in range(2, n): if n%x == 0: return False return True
031948206a9b38ac12d6a0262eb749b8c5e18ca8
50,422
import random def random_mac_address(local_admin=True): """ Generates and returns a random MAC address. """ # By default use a random address in VMWare's MAC address # range used by VMWare VMs, which has a very slim chance of colliding # with existing devices. mac = [ 0x00, 0x05, 0x69, random.randint(0x00, 0x7f), random.randint(0x00, 0xff), random.randint(0x00, 0xff) ] if local_admin: # Universally administered and locally administered addresses are # distinguished by setting the second least significant bit of the # most significant byte of the address. If the bit is 0, the address # is universally administered. If it is 1, the address is locally # administered. In the example address 02-00-00-00-00-01 the most # significant byte is 02h. The binary is 00000010 and the second # least significant bit is 1. Therefore, it is a locally administered # address.[3] The bit is 0 in all OUIs. mac[0] |= 2 return ':'.join('{0:02X}'.format(o) for o in mac)
d72a702887c3e51f7917991af596a8dbdd1c3ab3
50,425
def crf_preprocess_candidates(candidates): """Receive annotated candidates and return features and labels list""" features = [] labels = [] for candidate in candidates: candidate_features = [] candidate_labels = [] for token_features, label in candidate: candidate_features.append(token_features) candidate_labels.append(label) features.append(candidate_features) labels.append(candidate_labels) return features, labels
91c8f941a9334d26a8ac0623201c13ca560cfeb0
50,428
def _BasenameFromPath(path): """Extracts the basename of either a Unix- or Windows- style path, assuming it contains either \\ or / but not both. """ short_path = path.split('\\')[-1] short_path = short_path.split('/')[-1] return short_path
90dd65c95ef61e48132d7f0404a5c4d5bef685c1
50,429
def has_case_updates(case_block_kwargs): """ Returns True if case_block_kwargs contains case changes. >>> has_case_updates({"owner_id": "123456", "update": {}}) True >>> has_case_updates({"update": {}}) False """ if case_block_kwargs.get("update"): return True if case_block_kwargs.get("index"): return True return any(k for k in case_block_kwargs if k not in ("update", "index"))
e80ec5d38b7d7b05983d6672df681c2efb4d3d1d
50,434
import logging def init_logger( _logger: logging.RootLogger, log_level: int, log_file: str ) -> logging.RootLogger: """Initialise the logger. :param logging.RootLogger _logger: Logger instance to initialise. :param int log_level: Desidered logging level (e.g. ``logging.INFO``). :param str log_file: Path to destination file for logging output. If no output file is provided (``log_file`` is ``None``) logs will be written to standard output. :return: The initialised logger object. :rtype: logging.RootLogger """ # Initialise the logger _logger.setLevel(log_level) if log_file is not None: handler = logging.FileHandler(filename=log_file, mode='w') else: handler = logging.StreamHandler() formatter = logging.Formatter( '%(asctime)s %(name)-12s %(levelname)-8s %(message)s' ) handler.setFormatter(formatter) _logger.addHandler(handler) _logger.info("Logger successfully initialised") return _logger
6bceb87729e66c0e037b7667d572d001b0ab981e
50,438
def linux_notify(title: str, message: str) -> str: """Display notification for Linux systems""" command = f'''notify-send "{title}" "{message}"''' return command
ae702eed884e35fccaf974898de9cc0c12b686c2
50,453
from typing import List def join_words_cat(words: List[str]) -> str: """Joins words using string concatenation""" sentence = "" for word in words: sentence += word return sentence
f47f2ea2f1e2fa9e53bb586283f6d8f2ba6af3cc
50,460
def array_chunk_slice(array, size): """Return an array containing chunks of the specified array with the specified size, using "slice".""" result = [] for i in range(0, len(array), size): result.append(array[i: i + size]) return result
1388bfd67bcd1689fb474b7a5b8495680915ed5b
50,463
def create_biomarker_schema(schema: dict) -> dict: """ Factory method for creating a schema object. Arguments: schema {dict} -- Cerberus schema dictionary. Returns: dict -- EVE endpoint definition. """ base_dict = { "public_methods": [], "resource_methods": ["GET", "POST"], "allowed_roles": ["user", "uploader", "admin", 'system'], "allowed_item_roles": ["user", "uploader", "admin", 'system'], "schema": { "trial": {"type": "objectid", "required": True}, "assay": {"type": "objectid", "required": True}, "record_id": {"type": "objectid", "required": True}, }, } base_dict["schema"].update(schema) return base_dict
6e534a7ecdb54c9c23d811430a89cf98bf3e9bdd
50,469
import re def compile_terms(terms): """ Compile terms as regular expression, for better matching. """ return [re.compile(re.escape(term), re.I | re.U) for term in terms]
de614da9e9f35b2dee61b07f649ed7559757cc4c
50,472
def pts_in_cell_numpy(pts, cell): """ get the point indices incide of a given cell (numpy) Input: pts, a set of points in numpy format cell, a list of 6 numbers {x1, y1, z1, x2, y2, z2} Output: inds, a list of indices for points inside the cell """ N = pts.shape[0] inds = [i for i in range(N) if pts[i,0]>cell[0] and pts[i,0] < cell[3] and pts[i,1]>cell[1] and pts[i,1] < cell[4] and pts[i,2]>cell[2] and pts[i,2] < cell[5]] return inds
825c2898e22ff8d4f913c78b988b818bb4a6d3b0
50,478
def extract_job_specs(replica_specs): """Extract tf job specs from tfReplicaSpecs. Args: replica_specs: A dictionary having information of tfReplicaSpecs from manifest. returns: Dictionary. Key is tf job type and value is number of replicas. """ specs = dict() for job_type in replica_specs: specs[job_type.encode("ascii").lower()] = int( replica_specs.get(job_type, {}).get("replicas", 0)) return specs
e4f375bdbe87e576225fd4f8bcb4f5348440e654
50,479
def is_valid_exit(exits, chosen_exit): """This function checks, given a dictionary "exits" (see map.py) and a players's choice "chosen_exit" whether the player has chosen a valid exit. It returns True if the exit is valid, and False otherwise. Assume that the name of the exit has been normalised by the function normalise_input(). """ return chosen_exit in exits
e5622cf41c68420b822bcfa450ae4f545d85040e
50,480
def vector_name_iterator(data): """ Produces an iterator that yields 2-tuples of vectors given a dict of fields vectors are identified by identifying fields with common prefixes that all end with '_x', '_y', or '_z'. The first element of the yielded tuple holds the common prefix of the fields related to the vector, while the second element holds a list of field names corresponding to the various components (orderred as x,y,z). Missing components are replaced by a None """ ax_map = {'x' : 0, 'y' : 1, 'z' : 2} candidates = {} # identify candidate vectors for elem in data.keys(): temp = elem.split('_') if len(temp) != 2 or (temp[1] not in ['x','y','z']): continue prefix, dim = temp if prefix not in candidates: candidates[prefix] = [None, None, None] candidates[prefix][ax_map[dim]] = elem return candidates.items()
dc987b589f5aeed94fcc57e2c51fe307ebe5f20f
50,485
def GetOwnerIds(hotlist): """Returns the list of ids for the given hotlist's owners.""" return hotlist.owner_ids
a1fff7ecdfb8c1d8a344c261fad3d94b1a81bdc2
50,486
def time_diff(t0, t1): """ Args: :t0: start time in seconds :t1: end time in seconds Returns: string with time difference (i.e. t1-t0) """ minutes, seconds = divmod(t1 - t0, 60) hours, minutes = divmod(minutes, 60) return "%d hours, %d minutes, %d seconds" % (hours, minutes, seconds)
9b1c179fbec8fa0b9dc5143cf3316b061bf5d5c8
50,490
import json def offers(request): """ Create Json response with offers menu :param request: POST request from "Offers" dialogflow intent :return: Json response that contains spoken and display prompt and also list as Dialogflow conversation item """ speech_text_pl = "Która opcja Cię interesuje?" display_text_pl = "Która opcja Cię interesuje?" list_pl = { "intent": "actions.intent.OPTION", "data": { "@type": "type.googleapis.com/google.actions.v2.OptionValueSpec", "listSelect": { "items": [ { "optionInfo": { "key": "Przeglądaj oferty", "synonyms": [ "Przejrzyj oferty" ] }, "title": "Przeglądaj oferty" }, { "optionInfo": { "key": "Wyszukaj oferty", "synonyms": [ "Znajdź oferty", "Znajdź ofertę" ] }, "title": "Wyszukaj oferty" }, { "optionInfo": { "key": "Wyszukaj ofertę po id", "synonyms": [ "Znajdź ofertę po id" ] }, "title": "Wyszukaj ofertę po id" }, { "optionInfo": { "key": "Do kiedy jest ważna oferta", "synonyms": [ "Ważnosć oferty", "Do kiedy oferta będzie aktualna", ] }, "title": "Do kiedy jest ważna oferta" } ] } } } suggestions_pl = [{"title": "Oferty"}, {"title": "Zlecenia"}, {"title": "Zapytania"}, {"title": "Konto"}, {"title": "Inne"}] speech_text_en = "Which option are you interested in?" display_text_en = "Which option are you interested in?" list_en = { "intent": "actions.intent.OPTION", "data": { "@type": "type.googleapis.com/google.actions.v2.OptionValueSpec", "listSelect": { "items": [ { "optionInfo": { "key": "Browse offers", "synonyms": [ "View offers", "Display offers" ] }, "title": "Browse offers" }, { "optionInfo": { "key": "Search offers", "synonyms": [ "Search active offers" ] }, "title": "Search offers" }, { "optionInfo": { "key": "Search offer after id", "synonyms": [ "Search offer according to id" ] }, "title": "Search offer after id" }, { "optionInfo": { "key": "Until when is the offer valid", "synonyms": [ "Offer valid", "Until when is the offer valid?", ] }, "title": "Until when is the offer valid" } ] } } } suggestions_en = [{"title": "Offers"}, {"title": "Orders"}, {"title": "Inquiries"}, {"title": "Account"}, {"title": "Others"}] with open('api/response.json') as json_file: offers = json.load(json_file) part_to_modify = offers['payload']['google'] if request.data['queryResult']['languageCode'] == 'pl': part_to_modify['richResponse']['items'][0]['simpleResponse']['textToSpeech'] = speech_text_pl part_to_modify['richResponse']['items'][0]['simpleResponse']['displayText'] = display_text_pl part_to_modify['systemIntent'] = list_pl part_to_modify['richResponse']['suggestions'] = suggestions_pl elif request.data['queryResult']['languageCode'] == 'en': part_to_modify['richResponse']['items'][0]['simpleResponse']['textToSpeech'] = speech_text_en part_to_modify['richResponse']['items'][0]['simpleResponse']['displayText'] = display_text_en part_to_modify['systemIntent'] = list_en part_to_modify['richResponse']['suggestions'] = suggestions_en offers['payload']['google'] = part_to_modify return offers
9d14ba6b962fd6fee2cb25566b3bbe8aea35fdfe
50,491
def normalize_path(path: str) -> str: """ Normalize path. Converts # -> root ^ -> parent ^^ -> parent.parent """ if not path.startswith('$'): return path path = path[1:] if path.startswith('#'): return 'root.' + path[1:] for i, value in enumerate(path): if value != '^': return 'parent.' * i + path[i:] return ('parent.' * len(path))[:-1]
092b61947dfeecdadc82012f680d02a2fe66463a
50,492
def convert_event_name(name: str) -> str: """Strips and capitalizes a string. This function takes a string input and, if the string length is larger than 1, capitalized the string and strips leading/trailing whitespaces. Args: name: Any string of any length. Returns: str: Capitalized and stripped string. """ if len(name) == 0: return "Generic Event" return name.title().strip()
fd734b7a178a1ead518c288de87df1ce6060a97d
50,499
def func2() -> list: """ This function has no parameters and a return value. """ return [1, 2, 3]
b07dfb199552d7b3059520466a0b1ecb811b006f
50,502
import zlib import base64 def decode_base64_and_inflate(string): """ base64 decodes and then inflates according to RFC1951 :param string: a deflated and encoded string :return: the string after decoding and inflating """ return zlib.decompress(base64.b64decode(string), -15)
20f6a219cf40d1ff2baf0a0d1c1beb6bee793b74
50,512
def is_empty(node): """Checks whether the :code:`node` is empty.""" return node == []
08dca99334a08c979df52d48ce9ef1c767f544e6
50,514
import math def var_y(obliquity_correction): """Returns Var Y with Obliquity Correction, obliquity_correction""" var_y = math.tan(math.radians(obliquity_correction / 2)) * math.tan( math.radians(obliquity_correction / 2) ) return var_y
47c14488da71edcb130e8a64434b6269d06be993
50,518
def assemble( client, file_, dirname=None, generic=None, into_asm=None, path=None, ref_model=None, transform=None, constraints=None, package_assembly=None, walk_children=None, assemble_to_root=None, suppress=None, ): """Assemble a component into an assembly. Args: client (obj): creopyson Client. `file_` (str): File name component. dirname (str, optional): Diretory name. Defaults is Creo's current working directory. generic (str, optional): Generic model name (if file name represents an instance). Defaults is generic model name (if file name represents an instance). into_asm (str, optional): Target assembly. Defaults is currently active model. path (list:int, optional): Path to a component that the new part will be constrained to. Defaults to None. ref_model (str, optional): Reference model that the new part will be constrained to; only used if path is not given. If there are multiple of this model in the assembly, the component will be assembled multiple times, once to each occurrence. Defaults to None. transform (obj:JLTransform, optional): Transform structure for the initial position and orientation of the new component; only used if there are no constraints, or for certain constraint types. Defaults to None. constraints (obj_array:JLConstraint, optional): Assembly constraints. Defaults to None. package_assembly (bool, optional): Whether to package the component to the assembly; only used if there are no constraints specified. Defaults is If there are no constraints, then the user will be prompted to constrain the component through the Creo user interface. walk_children (bool, optional): Whether to walk into subassemblies to find reference models to constrain to. Defaults to None. assemble_to_root (bool, optional): Whether to always assemble to the root assembly, or assemble to the subassembly containing the reference path/model. Defaults to None. suppress (bool, optional): Whether to suppress the components immediately after assembling them. Defaults to None. Returns: (dict): dirname (str): Directory name of component. files (list:str): File name of component. revision (int): Revision of file that was opened; if more than one file was opened, this field is not returned. featureid (int): Last Feature ID of component after assembly. """ data = {"file": file_} if dirname is not None: data["dirname"] = dirname if generic is not None: data["generic"] = generic if into_asm is not None: data["into_asm"] = into_asm if path is not None: data["path"] = path if ref_model is not None: data["ref_model"] = ref_model if transform is not None: data["transform"] = transform if constraints is not None: data["constraints"] = constraints if package_assembly is not None: data["package_assembly"] = package_assembly if walk_children is not None: data["walk_children"] = walk_children if assemble_to_root is not None: data["assemble_to_root"] = assemble_to_root if suppress is not None: data["suppress"] = suppress return client._creoson_post("file", "assemble", data)
c999ce074a4f0a5c5ba2232483a80cacc53e6f89
50,527
def valid_map(file_name): """ Checks if the magic numbers of a given file correspond to a Warcraft III map file """ with open(file_name, "rb") as f: map_name_bytes = f.read(4) try: map_name_bytes = str(map_name_bytes.decode('utf-8')) except UnicodeDecodeError: return False if map_name_bytes == "HM3W": return True return False
e4a3314aa52badb564283590997c641e0412bd48
50,530
def student_average(grades: list) -> float: """Return the weighted average of a student's grades. You may ASSUME that: - grades consists of exactly three float values """ # Sort the student's grades sorted_grades = sorted(grades) # These are the weights for the assignment grades weights = [0.25, 0.35, 0.4] return ( weights[0] * sorted_grades[0] + weights[1] * sorted_grades[1] + weights[2] * sorted_grades[2] )
064ffc0deac02556a60a5f37ae1a4aa6741904f7
50,533
import re def is_valid_attr_name(s: str) -> bool: """ Ensure the given string can be used as attribute on an object instance. """ return bool( isinstance(s, str) and re.search(string=s, pattern=r"^[a-zA-Z_][a-zA-Z0-9_]*$") )
64e6ae4105985a4738160f432f441fca19c4c718
50,535
import math def euclidian(p1, p2): """Return euclidian distance between 2 points.""" return math.sqrt((p2[0] - p1[0])**2 + (p2[1] - p1[1])**2)
82c326077e8a90ed067e7d6cd2d5aabfd9745499
50,546
def extract_column(X, col_name): """Extract specified column from dataframe. """ if col_name is None or col_name not in list(X.columns): return X, None w = X[col_name].copy() X = X.drop(col_name, axis=1) return X, w
b5617474644deffb7ae0b25059c3255732bacdc7
50,547
def get_roi(img, top_left, bot_right): """ Returns region of interest of an img given bounding box points """ y = [max(top_left[1], 0), min(bot_right[1], img.shape[0] - 1)] x = [max(top_left[0], 0), min(bot_right[0], img.shape[1] - 1)] return img[y[0]:y[1], x[0]:x[1]]
3a53c5388424e18d0cdd4d03ad70db8eaeedadb8
50,548
def corpus2sentences(corpus): """split corpus into a list of sentences. """ return corpus.strip().split('\n')
6ec305d00e410adf4b80acf753665d0ee849e98e
50,553
def get_color(matrix): """Returns the color of the matrix (excluding black) """ for a in matrix: for color in a: if color != 0: return color
a8b41bb3555e89abd7e92d54685d5a82b17813bd
50,554
def draw_truth_table(boolean_fn): """ This function prints a truth table for the given boolean function. It is assumed that the supplied function has three arguments. ((bool, bool, bool) -> bool) -> None If your function is working correctly, your console output should look like this: >>> from truth_tables import * >>> draw_truth_table(boolean_fn1) a b c res ----------------------- True True True False True True False False True False True False True False False False False True True False False True False False False False True True False False False True """ # Regularize the print format of each line print_format = "%-6s" * 3 + "%s" # Line 1 and 2 print(print_format % ("a", "b", "c", "res")) print("-" * 23) # Line 3 to 10 for a in (1, 0): for b in (1, 0): for c in (1, 0): tup = (a, b, c, boolean_fn(a, b, c)) print(print_format % tuple(map(bool, tup))) return None
6ffbd8a8d80c0a0044b547facd50b1861948b9d7
50,562
import jinja2 def render_j2_template(templatefile, searchpath, obj): """Render a Jinja2 template and return the rendered string""" rendered_data = None template_loader = jinja2.FileSystemLoader(searchpath=searchpath) env = jinja2.Environment( loader=template_loader, trim_blocks=False, lstrip_blocks=False ) template = env.get_template(templatefile) rendered_data = template.render(obj) return rendered_data
f051fa9b1c50ba39e1e4fc71d894ff50cb0043be
50,563
def unpack_string(value): """ Unpack a string from byte format, to its original form. """ return value.decode('utf-16')
d87dc41225d6f1de3082b8cedbdd9e489d458edb
50,564
def tokenize_function(examples, tokenizer, block_size): """ This function will take the text dataset and complete this steps below 1. Tokenize the entire dataset 2. Concatenate all examples from 2d list into a 1D 3. Create blocks of the concatenated examples with a certain block size 4. Create labels for the dataset @params: examples: The dataset to be tokenized tokenizer: The tokenizer to be used for tokenizing the dataset block_size: The size of the blocks to be created @returns: Tokenized dataset with labels """ #1. Tokenize the entire dataset tokenized_examples = tokenizer(examples["text"]) #2. Concatenate all examples from 2d list into a 1D # Going to flatten ['text'], ['input_ids'], ['attention_masks] from 2D lists to 1D lists or concatenate them concatenated_examples = {key:sum(tokenized_examples[key], []) for key in tokenized_examples.keys()} #3. Create blocks of the concatenated examples with a certain block size # Getting the total number of words num_tokens = len(concatenated_examples['input_ids']) # Getting the number of blocks; Cutting the that are left over that cannot make another block total_length = (num_tokens // block_size) * block_size results = {} for key, value in concatenated_examples.items(): blocks = [] for i in range(0, total_length, block_size): blocks.append(value[i: i+block_size]) results[key] = blocks #4. Create labels for the dataset results['labels'] = results['input_ids'].copy() return results
b79777b039a8f6eaaf25559fd939ff72a7dcfc60
50,565
import random def d6() -> int: """Roll a D6""" return random.randint(1, 6)
8a56a6bc614a5397d28fb5abafd97df0383276f4
50,569
def is_listing_owner(listing, user_object): """returns true if a user is the owner of a given listing""" return user_object.id == listing.owner_id
b129ee05eccf1e9e3ca62966e75a8a6051e9b03b
50,570
def merge_bbox(bbox1, bbox2): """Merge two pdf blocks' bounding boxes.""" return ( min(bbox1[0], bbox2[0]), # x0 min(bbox1[1], bbox2[1]), # y0 max(bbox1[2], bbox2[2]), # x1 max(bbox1[3], bbox2[3]), # y1 )
6e5343d1f651755bc2ac9412fac257a0c7dc6170
50,575
def is_abbreviation(nm: str): """ Determine if something is an abbreviation. Otherwise if text ends with "." we'll conclude so. Examples: Ala. YES Ala NO S. Bob NO -- abbreviated, yes, but this is more like a contraction. S. B. YES :param nm: textual name :return: True if obj is inferred to be an abbreviation """ return nm.endswith(".")
16415152adad3ba41a11d9f9216fa7e65f5123ff
50,576
import re def find_timestamp(text_list): """ Find timestamp line and put digit's value Parameters ---------- text_list : dataframe A dataframe you want to convert Returns ------- dataframe it has new columns ["start_timestamp", "digit"] The digit column helps filling start_timestamp and end_timestamp """ pat = re.compile('(\d\d:\d\d:\d\d. *\d\d)') matches = pat.search(text_list['speech']) if matches is not None: text_list['start_timestamp'] = matches.group(1) if matches is not None else None text_list['digit'] = 1 else: text_list['digit'] = 0 text_list['start_timestamp'] = None return(text_list)
867359e5267e421af0595a670b3137c0cd0b8147
50,578
def align_by_root(joints): """ Assumes joints is 24 x 3 in SMPL order. Subtracts the location of the root joint from all the other joints """ root = joints[0, :] return joints - root
bb1470fc1bce79710a770bc97122a3e2fcd4ab23
50,579
def simple_bytecode() -> str: """From C code: int B() { return 10; } int A() { int x = B(); if (x == 5) { x += 1; } return x; } """ return """ ; Function Attrs: noinline nounwind optnone ssp uwtable define i32 @B() #0 { ret i32 10 } ; Function Attrs: noinline nounwind optnone ssp uwtable define i32 @A() #0 { %1 = alloca i32, align 4 %2 = call i32 @B() store i32 %2, i32* %1, align 4 %3 = load i32, i32* %1, align 4 %4 = icmp eq i32 %3, 5 br i1 %4, label %5, label %8 ; <label>:5: ; preds = %0 %6 = load i32, i32* %1, align 4 %7 = add nsw i32 %6, 1 store i32 %7, i32* %1, align 4 br label %8 ; <label>:8: ; preds = %5, %0 %9 = load i32, i32* %1, align 4 ret i32 %9 } """
2ff2f9f11a46733c1e91afbea3610273505a228f
50,580
def predictors_validate(predictors, data=None): """Validates the predictors and ensures that they are type list(str) Optionally checks that the predictors are columns in the data set. Only performs this check if the data parameter is not None Parameters ---------- predictors: list(str) or str the predictor(s) to validate data : pd.DataFrame or None, optional the data set to validate the predictors are in Returns ------- list(str) validated predictors Raises ------ ValueError if a predictor is named 'all' or 'none' if a predictor is not a column in the data set Examples -------- >>> predictors_validate('famhistory') ['famhistory'] >>> predictors_validate(['famhistory', 'marker']) ['famhistory', 'marker'] >>> predictors_validate('all') Traceback (most recent call last): ... ValueError: predictor cannot be named 'all' or 'none' """ if isinstance(predictors, str): # single predictor predictors = [predictors] #convert to list #cant't use 'all' or 'none' columns as predictors for predictor in predictors: if predictor in ['all', 'none']: raise ValueError("predictor cannot be named 'all' or 'none'") #check that predictors are columns in the data if data is not None: for predictor in predictors: if predictor not in data.columns: raise ValueError("predictor must be a column in the dataframe") else: pass # skip check return predictors
646ee3e0f6e93fe149a02ac05c50cdadef1ec7c2
50,598
def anyendswith(value, ends): """ Check if `value` ends with one of the possible `ends` """ for end in ends: if value.endswith(end): return True return False
f74f3abc7358e71ef116d25d2ead975d3b65de56
50,601
import torch def quat_to_d6(quats:torch.Tensor) -> torch.Tensor: # take (...,4) --> (...,6) """This code is adapted from https://github.com/facebookresearch/pytorch3d/blob/master/pytorch3d/transforms/rotation_conversions.py""" r, i, j, k = torch.unbind(quats, -1) two_s = 2.0 / (quats * quats).sum(-1) o = torch.stack( ( 1 - two_s * (j * j + k * k), two_s * (i * j - k * r), two_s * (i * k + j * r), two_s * (i * j + k * r), 1 - two_s * (i * i + k * k), two_s * (j * k - i * r), two_s * (i * k - j * r), two_s * (j * k + i * r), 1 - two_s * (i * i + j * j), ), -1, ) matrix = o.reshape(quats.shape[:-1] + (3, 3)) return matrix[..., :2, :].clone().reshape(*matrix.size()[:-2], 6)
f7c49b4964b29a483962db6afcc51f289b321f0a
50,603
def get_column(length, count): """This custom tag takes two integers, the length of a ordered list and the count of the current list item. It returns col[1-4] to be used as a class to position the item in the correct column. """ col_length = length // 4 if count <= col_length: return 'col1' elif count <= 2 * col_length: return 'col2' elif count <= 3 * col_length: return 'col3' else: return 'col4'
f93b6d6b071bbb2868ddaab144f81c80d8557905
50,605
def try_anafas_float(floatstr): """ Try converting a string into a float. Trims empty space and checks whether there is a decimal separator. When a decimal separator is unspecified, assumes two decimals separators by default (Anafas' default) dividing the resulting number by 100. """ try: num = float(floatstr.strip()) # checks if the decimal separator was omitted thereIsDot = not (floatstr.find(".") == -1) if not thereIsDot: num = num / 100.0 except ValueError: num = 0.0 return num
cad5ca9f3aae58b2417a0cd9786522c762193144
50,609
def all_pairs(elements): """ Helper function, giving all pairs of a list of elements Parameter -------- elements: List[Any] list of elements Returns ------- List[Tuple[Any, Any]] Unique pairings of the elements in the given list. """ return [(elements[i], elements[j]) for i in range(0, len(elements)) \ for j in range(i + 1, len(elements))]
b71bb0d3d573cd818c4b946fe517d0f5632b7e4e
50,613
def scan_year(visit, studyid='TON'): """ Retrieve the year in which a scan was collected. Parameters ---------- visit : str or int Visit number studyid: str, optional Specifies the study from which files will be retrieved. Valid values are 'THD' and 'TON'. Returns ------- sc_year : int Actual scan year """ if type(visit) is str: visit = int(visit[-1:]) if studyid == 'TON': years = [2012, 2013, 2014] else: years = [2008, 2009, 2010, 2011] sc_year = years[visit-1] return sc_year
3128d43b00caff51c1e0329470d36592e2e848a3
50,620
from typing import List def shell_line_to_o_files_list(line: str) -> List[str]: """Return a list of .o files in the files list.""" return [entry for entry in line.split() if entry.endswith(".o")]
a2e7d9d4c6cd333f32515ed6305e4a2d031f9ae9
50,621
def append_offset(name, offset): """ This function is used for assigning a name with offset if a file with the same name exists It takes a filename and a offset and returns a valid equivalent name with offset number Example : # assume two variables name = 'python.py' offset = '2' append_offset(name, offset) # The above invocation will return string as # 'python_2.py' """ fname, extension = name.split('.') fname = ''.join([fname, '_', offset, '.', extension]) return fname
719ae91df2d7af04090d7bcc5f07f32e24f7f8fc
50,622