content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
import re def clean_title(title): """ Clean title -> remove dates, remove duplicated spaces and strip title. Args: title (str): Title. Returns: str: Clean title without dates, duplicated, trailing and leading spaces. """ date_pattern = re.compile(r'\W*' r'\d{1,2}' r'[/\-.]' r'\d{1,2}' r'[/\-.]' r'(?=\d*)(?:.{4}|.{2})' r'\W*') title = date_pattern.sub(' ', title) title = re.sub(r'\s{2,}', ' ', title) title = title.strip() return title
1fecb39449c4bcb9a750ada18d9382c0fb0d95ea
526,546
def get_indexes_from_best_path(best_path): """Grab the reference and event index of the best path from the maximum_expected_accuracy_alignment function. :param best_path: output from maximum_expected_accuracy_alignment :return: list of events [[ref_pos, event_pos]...] """ path = [] while best_path[4]: ref_pos = best_path[0] event_pos = best_path[1] path.append([ref_pos, event_pos]) best_path = best_path[4] # gather last event ref_pos = best_path[0] event_pos = best_path[1] path.append([ref_pos, event_pos]) # flip ordering of path return path[::-1]
ffcfa377b48f02356b3ddad75b725baf22a096d8
632,354
def map_generate_tuple(*args): """Generate a tuple with the results from the func. Used to assist dict(), map() to generate a dictionary. Args: *args (list): [0]:( key (immutable): key of the generated dict, func (function): function to be called, arg (tuple): arguments for func) Returns: tuple: (key, func(*arg)) """ key, func, arg = args[0][0], args[0][1], args[0][2] return (key, func(*arg))
ba2986a41aa28098aa2c7ecb8781429eaf0419f9
85,460
def GdbChecksum(message): """Calculate a remote-GDB style checksum.""" chksum = sum([ord(x) for x in message]) return ('%.2x' % chksum)[-2:]
71e62098e2cbea3bc0272f595bd76dd66ebae29f
238,060
def extract_measured_states(record, measured_list): """ Given the following input: record: a list of all final states where the index of the list corresponds to the index in the quantum board measured_list: a list of index for qubits that have been measured by the player Return a dictionary of that maps the position that has been measured to the correspond state from record (This will be used to update the classic board) """ result = {} for index in measured_list: result[str(index)] = record[index] return result
5b728579ee98e258dbdefdc3267f4d9dfccc793e
344,479
import collections def reorder_props(props): """ If "children" is in props, then move it to the front to respect dash convention Parameters ---------- props: dict Dictionary with {propName: propMetadata} structure Returns ------- dict Dictionary with {propName: propMetadata} structure """ if 'children' in props: props = collections.OrderedDict( [('children', props.pop('children'),)] + list(zip(list(props.keys()), list(props.values())))) return props
13911471c943187f6f022fcfbd22d9e4ad3a66af
641,453
def _deep_tuple(item): """ Convert a nested list with arbitrary structure to a nested _tuple_ instead. """ if isinstance(item, list): # In the recursive case (item is a list), recursively deep_tuple-ify all # list items, and store all items in a tuple intead. return tuple(_deep_tuple(i) for i in item) else: # In the base case (any items that are not lists, including some # possibly deep objects like dicts, and tuples themselves), change # nothing. return item
ee0afc8072d3b60d20ab6eb5c1eae0a1ef46804f
416,802
def tidyBBB( BBB:str ) -> str: """ Change book codes like SA1 to the conventional 1SA. BBB is always three characters starting with an UPPERCASE LETTER. """ return (BBB[2]+BBB[:2]) if BBB[2].isdigit() else BBB
58775330c3508f9fab49e474de945c8c213b240e
89,430
def quintic_easein(pos): """ Easing function for animations: Quintic Ease In """ return pos * pos * pos * pos * pos
2a5c722315940d00e5be50e3c597d42a0b627268
626,676
def check_ambiguity(grammar, root, n_max=6): """ Check if the grammar produces ambiguous (repeated) objects Args: grammar (dict): Grammar to check root: Grammar root object n_max (int): Max object size to check Returns: bool: Check result """ for n in range(n_max): L = grammar[root].list(n) if len(L) != len(set(L)): return True return False
4d5beedb618b7e34af024c83e7c95136ab65fc87
86,502
def minmax(data): """Solution to exercise R-1.3. Takes a sequence of one or more numbers, and returns the smallest and largest numbers, in the form of a tuple of length two. Do not use the built-in functions min or max in implementing the solution. """ min_idx = 0 max_idx = 0 for idx, num in enumerate(data): if num > data[max_idx]: max_idx = idx if num < data[min_idx]: min_idx = idx return (data[min_idx], data[max_idx])
9715bef69c120f6d1afb933bd9030240f556eb20
708,838
def define_options(L_opts=[], L_brief=[], L_details=[], L_other=[]): """ Define several groupings with lists of show options. """ L_opts = ['brief', 'all', ] L_brief = ['computes', 'servers', 'server_groups', 'migrations', 'flavors', 'images', ] L_details = ['computes', 'servers', 'server_groups', 'libvirt', 'migrations', 'flavors', 'images', 'volumes', ] L_other = ['aggregates', 'topology', 'topology-long'] return (L_opts, L_brief, L_details, L_other)
7969632519ede4918588bfa7337d2f9b3a07f5c2
565,391
import base64 import zlib import json def decode_event_data(event): """ Decodes the raw Cloudwatch lambda payload into a dictionary representing the JSON structure. """ base64_gzipped_data = str(event['awslogs']['data']) gzipped_data = base64.b64decode(base64_gzipped_data) json_data = zlib.decompress(gzipped_data, 15+32) data = json.loads(json_data) return data
1b31a79ab7c7fdd12dbbd50210f420b86a14c7d5
255,395
import torch def backward_step(optimizer, input_tensor, output_tensor, output_tensor_grad): """Backward step through passed-in output tensor. If last stage, output_tensor_grad is None, otherwise gradient of loss with respect to stage's output tensor. Returns gradient of loss with respect to input tensor (None if first stage).""" # Retain the grad on the input_tensor. if input_tensor is not None: input_tensor.retain_grad() # Backward pass. torch.autograd.backward(output_tensor, grad_tensors=output_tensor_grad) # Collect the grad of the input_tensor. input_tensor_grad = None if input_tensor is not None: input_tensor_grad = input_tensor.grad return input_tensor_grad
ded6b04eb98d5dd08ec9737d689e138497578518
556,658
import hashlib def hash_file(filepath): """ Compute the md5 has for the specified file and return it as a hex string. """ h = hashlib.md5() with open(filepath, 'rb') as f: for chunk in iter(lambda: f.read(8192), b''): h.update(chunk) return h.hexdigest()
b9095e6d44181bbc632e879178e40996d6638a49
267,692
def json_serial(obj): """JSON serializer for objects not serializable by default json code""" if "time" in str(type(obj)): return obj.isoformat() return str(obj)
3d686ea0af71b07a5d182bd0ce73585bb7073656
417,951
def ppi_params_and_ref(request): """ Fixture that applies the PointsPerIntervalSlicer to the prepared test case. Parameters ---------- request : tuple Prepared test case for PointsPerIntervalSlicer. Returns ------- dict Characteristics of interval. """ params = request.param[0] references = request.param[1] intervals = request.param[2] boundaries = request.param[3] return { "params": params, "references": references, "intervals": intervals, "boundaries": boundaries, }
5c9200b06ba2adfa2f76e7de1300b74a60ef1c3c
447,903
import re def parse_date(deadline_date): """ Given a date in the form MM/DD/YY or MM/DD/YYYY, returns the integers MM, DD, and YYYY (or YY) in this order. """ deadline_split = re.split('\\/|\\-', deadline_date) return int(deadline_split[0]), int(deadline_split[1]), int(deadline_split[2])
0ded6bccce8437aad61cfa5ff121c5ed0595849b
1,199
def h(x, fx): """helper function as in the PLoS article, doi:10.1371/journal.pcbi.1005352.t003 pg 16/24""" fx = fx % 3 x = x % 3 if fx > x: return x + 1 elif fx < x: return x - 1 else: return x
bac70fb88b2b34b5b391391031af7674da5aaffd
194,297
def get_tri_category(score): """Get a 3 class integer classification label from a score between 0 and 1.""" if score >= 0 and score < 0.3333333333: return 0 elif score >= 0.3333333333 and score < 0.6666666666: return 1 else: return 2
54306667882448925361bcb12e0bade4ddbcacf2
658,253
def convert_timestamp(timestamp: str) -> int: """ Converts a timestamp (MM:SS) to milliseconds. """ timestamp_list = timestamp.split(":") minutes = timestamp_list[0] seconds = timestamp_list[1] minutes_in_ms = int(minutes) * 60 * 1000 seconds_in_ms = int(seconds) * 1000 total_ms = minutes_in_ms + seconds_in_ms if any((len(seconds) > 2, len(seconds) < 2, minutes_in_ms < 0, seconds_in_ms < 0)): raise ValueError("Invalid format. Proper format is MM:SS.") return total_ms
7133709d3d5ff76661889bef48301bd0912047d4
678,491
import inspect def view_namespace(view, cls_name=None): """ Create the namespace from the view :param view: object (class or instance method) :param cls_name: str - To pass the class name associated to the view in the case of decorators that may not give the real class name :return: string or None """ ns = view.__module__ if inspect.isclass(view): ns += ".%s" % view.__name__ else: if hasattr(view, "im_class") or hasattr(view, "im_self"): if view.im_class is not None: cls_name = view.im_class.__name__ elif view.im_self is not None: cls_name = view.im_self.__name__ if cls_name is None: return None ns += ".%s.%s" % (cls_name, view.__name__) return ns
46239aebe05befd5169cbc386d0cd4922f84c3c1
586,520
def flatten_to_polygons(geometry): """ Return a list of all polygons of this (multi)`geometry`. """ if geometry.type == 'Polygon': return [geometry] if geometry.type == 'MultiPolygon': return list(geometry) if hasattr(geometry, 'geoms'): # GeometryCollection or MultiLineString? return list of all polygons geoms = [] for part in geometry.geoms: if part.type == 'Polygon': geoms.append(part) if geoms: return geoms return []
81df86d3bb6767c1016b309537b8bc4b8c9e9190
234,173
import logging def define_problem(raw_frame): """ Cleans the input, raw data and defines the set of columns that should be used for modeling :param raw_frame: the raw HMDA dataset :return: A dataframe clean and fit for clustering """ selected_columns = [ 'applicant_income_000s', 'loan_purpose_name', 'hud_median_family_income', 'loan_amount_000s', 'originated' ] log = logging.getLogger(__name__) log.info("Input data size {}".format(raw_frame.shape)) return raw_frame.assign( originated=lambda f: (f['action_taken'] == 1).astype('int'))\ .loc[:, selected_columns]
1daf7137e9be4ef1e73fb83b87f0472087039eae
142,722
def _get_info_names(profile): """Get names of infos of tasks.""" info_names = sorted(set().union(*(set(val) for val in profile.values()))) return info_names
b692d6046da9371b73e3bd0ae926b61d8507ef7c
94,035
def map_rows(header_list, row_list): """ :param header_list: list of names ordered to match row data, provides names for each row :param row_list: list of row tuples/lists with each tuple/list in same order as header_list :return: list of dicts with named values instead of tuples """ header_map = {v: header_list.index(v) for v in header_list} mapped = [{key: item[header_map[key]] for key in header_map} for item in row_list] return mapped
4f17006106fdb3a5ea38f87deaad372229ee3eb3
507,195
def recvn(sock, n): """ Read n bytes from a socket Parameters ---------- sock - socket.socket The socket to read from n - int The number of bytes to read """ ret = b'' read_length = 0 while read_length < n: tmp = sock.recv(n - read_length) if len(tmp)==0: return ret ret += tmp read_length += len(tmp) if read_length != n: raise #"Low level Network ERROR: " return ret
07e854ea13d2bf0c74dbbea0348bf6ee15ac1a0c
680,830
def _GetPercentageDiff(value1, value2): """Returns the percentage difference between the specified values.""" difference = value2 - value1 avg = (value2 + value1)/2 return 0 if avg == 0 else difference/avg * 100
8534bc7f55eeb6a191ccfecdf8c69261449f9ad9
430,541
from typing import Dict import requests def gdansk_route_names() -> Dict[str, str]: """Returns a mapping from route_short_name to route_long_name for ZTM Gdańsk, as route_long_names aren't included in the main GTFS.""" req = requests.get("https://ckan.multimediagdansk.pl/dataset/c24aa637-3619-4dc2-a171-a23eec8f2172/resource/22313c56-5acf-41c7-a5fd-dc5dc72b3851/download/routes.json") # noqa req.raise_for_status() all_routes = req.json() route_names: Dict[str, str] = { "F5": "Żabi Kruk - Westerplatte - Brzeźno", "F6": "Targ Rybny - Sobieszewo"} for routes in map(lambda i: all_routes[i]["routes"], sorted(all_routes.keys())): for route in routes: if route["routeShortName"] in route_names: continue else: route_names[route["routeShortName"]] = route["routeLongName"] return route_names
b56997f44c7d89cd4d9dbd678405f51141f4fa0f
495,619
def heuristic(state): """ Take in a state (a triple of current place, all places before the current place, and the total cost associated with the current place) and apply a heuristic to it for A* search. state (triple): (current place, all places before the current place, the total cost associated with the current place) return: integer that approximates distance from the goal """ # Encode the progress to the goal as the length of the list of previous states progress = len(state[0]) # Goal distance is the number of places left to be added multiplied by # the average of the sadness value to the current point if progress != 0: goal_distance = (7 - progress) * state[2] / (progress) else: goal_distance = 7 return goal_distance
abbb08a15b7133a8eac57bd47733d3f7b372cbd0
366,270
def get_synsets_lemmas(synsets): """ Take a list of synsets and returns a list of lemmas of all synsets """ lemmas = [] for synset in synsets: for lemma in synset.lemmas(): word = lemma.name() lemmas.append(word) return lemmas
b51f9b5858ded4b3d599104e7ff8d6e601477e9a
88,499
def get_account_age_in_days(numpy_time_difference): """ Args numpy_time_difference (numpy timedelta): a numpy timedelta object that is the difference between the user's account creation date and the date of their most recent tweet Return account_age (int) """ return int(numpy_time_difference/1000000000/60/60/24)+1
c0c539994b9d051c01f181338c04ad0df4d4a442
224,185
def strip_timezone(dt): """Make datetime tz-naive by stripping away any timezone information. Args: dt : datetime - tz-aware: Used in the formatted string. - tz-naive: Returned unchanged. Returns: datetime tz-naive datetime. """ return dt.replace(tzinfo=None)
9655bc8f3bf5e5a9d4df7a2ba5a9c94063199e27
337,887
def first_key(dictionary): """Get the first key.""" keys = list(dictionary.keys()) if len(keys) > 0: return keys[0] return ""
ddb74a662f74d0ad81910187bf02e3ede3239c4f
106,165
def pad(source: str, pad_len: int, align: str) -> str: """Return a padded string. :param source: The string to be padded. :param pad_len: The total length of the string to be returned. :param align: The alignment for the string. :return: The padded string. """ return "{s:{a}{n}}".format(s=source, a=align, n=pad_len)
6f4b587e224cfeb33ca2aba3ef2c67ff8eb86d3b
647,301
import importlib def import_module_by_name(mod_name): """ Executes an "import" statement dynamically. Parameters ---------- mod_name : str Name of the module to be imported. Returns ------- module The imported module. """ return importlib.__import__(mod_name)
9320e0bc729983ea6698e89597254fbc66c8f9a2
567,596
def stripper(reply: str, prefix=None, suffix=None) -> str: """This is a helper function used to strip off reply prefix and terminator. Standard Python str.strip() doesn't work reliably because it operates on character-by-character basis, while prefix/terminator is usually a group of characters. Args: reply: String to be stripped. prefix: Substring to remove from the beginning of the line. suffix: Substring to remove from the end of the line. Returns: (str): Naked reply. """ if prefix is not None and reply.startswith(prefix): reply = reply[len(prefix):] if suffix is not None and reply.endswith(suffix): reply = reply[:-len(suffix)] return reply
b48281a0dedd5d7f3d476943f12ac49720e67476
3,442
import hashlib def validate_transaction_object(txobj, asset_files=None): """Validate transaction and its asset Args: txobj (BBcTransaction): target transaction object asset_files (dict): dictionary containing the asset file contents Returns: bool: True if valid tuple: list of valid assets tuple: list of invalid assets """ digest = txobj.digest() for i, sig in enumerate(txobj.signatures): if sig.pubkey is None: continue try: if not sig.verify(digest): return False, (), () except: return False, (), () if asset_files is None: return True, (), () # -- if asset_files is given, check them. valid_asset = list() invalid_asset = list() for idx, evt in enumerate(txobj.events): if evt.asset is None: continue asid = evt.asset.asset_id asset_group_id = evt.asset_group_id if asid in asset_files: if asset_files[asid] is None: continue if evt.asset.asset_file_digest != hashlib.sha256(bytes(asset_files[asid])).digest(): invalid_asset.append((asset_group_id, asid)) else: valid_asset.append((asset_group_id, asid)) for idx, rtn in enumerate(txobj.relations): if rtn.asset is None: continue asid = rtn.asset.asset_id asset_group_id = rtn.asset_group_id if asid in asset_files: if asset_files[asid] is None: continue if rtn.asset.asset_file_digest != hashlib.sha256(bytes(asset_files[asid])).digest(): invalid_asset.append((asset_group_id, asid)) else: valid_asset.append((asset_group_id, asid)) return True, valid_asset, invalid_asset
0562ad06d9fd8f85bf9b8989211aeda5d1e416d9
386,923
def discrete_binary_search(func, lo, hi): """ Locate the first value x s.t. func(x) = True within [lo, hi] """ while lo < hi: mi = lo + (hi - lo) // 2 if func(mi): hi = mi else: lo = mi + 1 return lo
f8233515a822178898b93a333356c51069493440
642,558
import random def get_fixed_embedding(QUBO, complete_embedding, random_state=42): """ Given an input of a QUBO and an embedding, this function maps the variables from the qubo onto the embedding. Parameters ---------- QUBO : dict dictionary where the keys are linear or quadratic terms, and the values are real numbers. Represents a Quadratic Unconstrained Binary Optimization problem. complete_embedding : dict all-to-all connectivity embedding for the given QUBO. random_state : integer, optional random seed parameter. The default is 42. Returns ------- QUBO_embedding : dict remapped embedding for QUBO. """ random.seed(random_state) linear_variables = [] quadratic_variables = [] for a in QUBO: if a[0] == a[1]: linear_variables.append(a[0]) else: quadratic_variables.append(a) QUBO_embedding = {} complete_vars = list(complete_embedding.keys()) random.shuffle(complete_vars) for i in range(len(linear_variables)): QUBO_embedding[linear_variables[i]] = complete_embedding[complete_vars[i]] return QUBO_embedding
a559c51b4671853197bfffa96ec910b57003814a
304,930
def sec_to_hms(duration): """ Return hours, minutes and seconds for given duration. >>> sec_to_hms('80') (0, 1, 20) :param int|str duration: Duration in seconds. Can be int or string. :return: tuple (hours, minutes, seconds) :rtype: (int, int, int) """ s = int(duration) h = s // 3600 s -= (h * 3600) m = s // 60 s -= (m * 60) return h, m, s
fd37067ac6698aceff253f9488ad30f7a9053459
275,338
def prune_summary(summary, prefixes_to_remove): """Removes keys starting with provided prefixes from the dict.""" ret = {} for key in summary.keys(): report_key = True for prefix in prefixes_to_remove: if key.startswith(prefix): report_key = False break if report_key: ret[key] = summary[key] return ret
2bef7b2a669747314a5cb7eec2cb0007e200597f
340,200
import json def _make_config_json(host, host_version, settings): """Generates a config.json to embed in swarming_bot.zip""" # The keys must match ../swarming_bot/config/config.json. config = { 'enable_ts_monitoring': False, 'isolate_grpc_proxy': "", 'server': host.rstrip('/'), 'server_version': host_version, 'swarming_grpc_proxy': "", } if settings: config['enable_ts_monitoring'] = settings.enable_ts_monitoring config['isolate_grpc_proxy'] = settings.bot_isolate_grpc_proxy config['swarming_grpc_proxy'] = settings.bot_swarming_grpc_proxy return json.dumps(config)
a9620d2d858095ed63f8a94c68efad343a1dec6d
197,691
def parse_military_friendly(parts): """ Parse whether or not the ad indicates "military friendly". parts -> The backpage ad's posting_body, separated into substrings """ for part in parts: if 'military' in part: return 1 return 0
b1a095289f4ba4982cad9cb96f22e31f820ea128
204,137
def cls_prop(name, data_type): """Helper function to define class properties.""" masked_name = "__" + name @property def prop(self): return getattr(self, masked_name) @prop.setter def prop(self, value): if not isinstance(value, data_type): raise TypeError(f"Expected data type for {name} is {data_type}.") setattr(self, masked_name, value) return prop
254114ab128e9c6827d15740c1b7277c5ec1364d
338,748
def longest_common_prefix(items1, items2): """ Return the longest common prefix. >>> longest_common_prefix("abcde", "abcxy") 'abc' :rtype: ``type(items1)`` """ n = 0 for x1, x2 in zip(items1, items2): if x1 != x2: break n += 1 return items1[:n]
4f4a3724e50e37a632f2f761828d5edcd8c79f00
661,621
def _cond_helper(func, arguments): """ Helper function for the conditional filters. The arguments are converted to float numbers before the comparison function is applied. >>> _cond_helper(operator.gt, (0, 1)) False >>> _cond_helper(operator.gt, (1, 0)) True >>> _cond_helper(operator.lt, ("1", 2.3)) True >>> _cond_helper(operator.le, ("abc", 0)) False """ try: numbers = [float(tmp) for tmp in arguments] return func(*numbers) except ValueError: return False
3ff5abda9bdf8c7953b62e6774ba71073b52125e
550,650
def is_nested_list(mlist): """Is a list nested? Args: l ([list]): A Python list. Returns: [bool]: 1 is is a nested list, 0 is not and -1 is not a list. Examples: >>> from ee_extra import is_nested_list >>> is_nested_list([1,2,3]) >>> # 0 >>> is_nested_list([1,[2],3]) >>> # 1 >>> is_nested_list("Lesly") >>> # -1 """ if not isinstance(mlist, list): return -1 # is not a list if not any([isinstance(line, list) for line in mlist]): return 0 # if a list but not nested else: return 1
2053488510a3cfc33ab3257753e59e7e4317ae97
535,333
def get_format_from_ending(file_ending): """Returns the corresponding format-name to the given file-ending and an empty string if the ending is unknown.""" if file_ending == "fasta": return "fasta-dna" if file_ending == "fastq": return "fastq" # in case of an unknown ending return empty string. return ""
cb51027d627250abbd7951907f8ea8f3bcd9d737
477,565
import decimal def has_number_type(value): """ Is a value a number or a non-number? >>> has_number_type(3.5) True >>> has_number_type(3) True >>> has_number_type(decimal.Decimal("3.5")) True >>> has_number_type("3.5") False >>> has_number_type(True) False """ return isinstance(value, (int, float, decimal.Decimal)) and not isinstance(value, bool)
d5db38736244af750ee881ceb83b5433eecd6bb9
41,220
def blocks_slice_to_chunk_slice(blocks_slice: slice) -> slice: """ Converts the supplied blocks slice into chunk slice :param blocks_slice: The slice of the blocks :return: The resulting chunk slice """ return slice(blocks_slice.start % 16, blocks_slice.stop % 16)
02acf0d3f7da676d2114172b28604657bb5792f9
522,278
def r(o, t): """ Transform back from the Boltzmann variable into `r`. Parameters ---------- o : float or numpy.ndarray Value(s) of the Boltzmann variable. If an array, it must have a shape broadcastable with `t`. t : float or numpy.ndarray Time(s). If an array, it must have a shape broadcastable with `o`. Values must not be negative. Returns ------- r : float or numpy.ndarray The return is a float if both `o` and `t` are floats. Otherwise it is an array of the shape that results from broadcasting `o` and `t`. See also -------- o t """ return o*t**0.5
c490bbbf2b156ffb21a543c6779a58bef6856394
380,422
import re def make_XML_identifier(s): """ Given a name, strip invalid characters from XML identifier """ s = re.sub('[^0-9a-zA-Z_]', '', s) s = re.sub('^[^a-zA-Z_]+', '', s) return s
4569fc2dfa50a09693c88cd55f8aa7f06e227cbd
569,550
def levenshtein_distance(str1, str2): """ Return the minimum number of character deletions, insertions and/or substitutions between two strings. :param str1: first string :param str2: second string :type str1: string :type str2: string :returns: levenshtein distance between the two strings :rtype: int """ cols, rows = len(str1), len(str2) matrix = [[0 for col in range(cols + 1)] for row in range(rows + 1)] # Distances from empty string for col in range(1, cols + 1): matrix[0][col] = col for row in range(1, rows + 1): matrix[row][0] = row for row in range(1, rows + 1): for col in range(1, cols + 1): cost = 0 if str1[col - 1] == str2[row - 1] else 1 matrix[row][col] = min( matrix[row][col - 1] + 1, # deletion matrix[row - 1][col] + 1, # insertion matrix[row - 1][col - 1] + cost) # substitution return matrix[rows][cols]
aa001cb482b5bcc603ea7b063e2f0d2caa5fab0a
331,352
def seq_match(exseq, inseq, allowMismatch): """ Return True if <exseq> and <inseq> are same length and either (1) identical OR (2) has at most one mismatch (if allowMismatch is True) :return: bool, num_mismatch """ if len(exseq)!=len(inseq): return False, None elif exseq == inseq: return True, 0 elif allowMismatch: # allow at most one mismatch num_mismatch = 0 for a,b in zip(exseq, inseq): if a!=b: if num_mismatch == 1: return False, None # second mismatch, return False! else: num_mismatch += 1 return True, num_mismatch else: return False, None
9358f4071bbd5d340d2222131b4a79ee33123cfd
118,583
import mpmath def pdf(x, nu, sigma): """ PDF for the Rice distribution. """ if x <= 0: return mpmath.mp.zero with mpmath.extradps(5): x = mpmath.mpf(x) nu = mpmath.mpf(nu) sigma = mpmath.mpf(sigma) sigma2 = sigma**2 p = ((x / sigma2) * mpmath.exp(-(x**2 + nu**2)/(2*sigma2)) * mpmath.besseli(0, x*nu/sigma2)) return p
b2d96bc19fb61e5aaf542b916d06c11a0e3dea46
708,002
def search_linears(a, x): """ Returns the index of x in a if present, None elsewhere. """ d = a[-1] a[-1] = x i = 0 while a[i] != x: i += 1 a[-1] = d if i == len(a) - 1 and d != x: return None else: return i
f2cff5f65c1f9389be71f64b72866aeaff3cfe4f
388,361
def contains_duplicates(collection: list): """ Returns whether or not the collection contains duplicates """ current = collection[:] while len(current) > 1: item, current = current[0], current[1:] if any(map(lambda i: i == item, current)): return True return False
e7a0f250cff718cd7134389efd4957496f7ede67
372,953
import re def preprocess(x): """Combine preprocessing done by CountVectorizer and the SimilarityEncoder. Different methods exist to compute the number of ngrams in a string: - Simply sum the values of a count vector, which is the ouput of a CountVectorizer with analyzer="char", and a specific ngram_range - Compute the number of ngrams using a formula (see ``get_ngram_count``) However, in the first case, some preprocessing is done by the CountVectorizer that may change the length of the string (in particular, stripping sequences of 2 or more whitespaces into 1). In order for the two methods to output similar results, this pre-processing is done upstream, prior to the CountVectorizer. """ # preprocessing step done in ngram_similarity x = ' %s ' % x # preprocessing step done in the CountVectorizer _white_spaces = re.compile(r"\s\s+") return _white_spaces.sub(' ', x)
d62adfd43913eb916982776058805b792786be5f
270,263
def pressure_fao(z=1331): """ Calculate the pressure for a certain elevation if no measurements are available. FAO 1998 Args: Returns: p (float) : atmospheric pressure [hPa] """ p = 101.3 * ((293 - 0.0065 * z) / 293.) ** 5.26 return p * 1e1
03e4c20e5f00434bbddc4164be582b2cf54953c0
446,566
def extract_tag(tags): """ Takes a list of tags and extracts the first hastagged item in the list, returning it as a string without the hashtag. eg. ["@person", "#coffee", "#payment"] -> "coffee" """ for tag in tags: if tag[0] == "#": return tag[1::] return None
a25f5e644a3b673a64e71e288b3df69a1cadaad2
334,781
from datetime import datetime def get_data_for_daily_statistics_table(df): """ Return data which is ready to be inserted to the daily_statistics table in the database. Parameters ---------- df : pandas.core.frame.DataFrame Pandas Dataframe containing data received from the API. Returns ------- df_daily_statistics_data : pandas.core.frame.DataFrame Pandas Dataframe containing data to be inserted to the daily_statistics table in the database. """ df_daily_statistics_data = ( df.groupby(["district_name", "min_age_limit", "vaccine"])["available_capacity"] .sum() .reset_index() ) df_daily_statistics_data["vaccine"] = df_daily_statistics_data[ "vaccine" ].str.upper() df_daily_statistics_data["timestamp"] = datetime.utcnow().strftime( "%Y-%m-%d %H:%M:%S" ) return df_daily_statistics_data
faf7cfb76e88838d049e79bcabfbeef2238dc304
20,625
def define_tf_tensors(detection_graph): """ Define input and output tensors (i.e. data) for the object detection classifier """ # Input tensor is the image image_tensor = detection_graph.get_tensor_by_name('image_tensor:0') # Output tensors are the detection boxes, scores, and classes # Each box represents a part of the image where a particular object was detected detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0') # Each score represents level of confidence for each of the objects. # The score is shown on the result image, together with the class label. detection_scores = detection_graph.get_tensor_by_name('detection_scores:0') detection_classes = detection_graph.get_tensor_by_name( 'detection_classes:0') # Number of objects detected num_detections = detection_graph.get_tensor_by_name('num_detections:0') return image_tensor, [detection_boxes, detection_scores, detection_classes, num_detections]
f5f4c85c5d01f04cf0f2a17cdb2be4671f7f46ff
507,275
def get_dlats_from_case(case: dict): """pull list of latitudes from test case""" dlats = [geo[0] for geo in case["destinations"]] return dlats
8cff11a5e978daa3656003f1c1a4c83aeac55865
449,321
def parse_clip_name(stdout_file): """ Parses the name of the clip from the stdout.txt of the job's run. """ with open(stdout_file, 'r') as f: for line in f.readlines(): if 'FLAGS.clip_name' in line: return line.split()[-1] raise Exception("Unable to identify clip_name in: '{}'".format(stdout_file))
e665ff605a3042184fc18c691c7483027a939103
416,950
import re def to_discord_title_safe(text: str) -> str: """Convert the given string to one that will be accepted by discord as a title for a channel. """ text = text.replace(" ", "-") text = text[:96] text = text.lower() text = re.sub(r"[^a-zA-Z0-9\-]", "", text) # Eliminate all non alpha numerics and `-` return re.sub(r"(\-)\1{1,}", "-", text)
3085c4483a3e269b5c3fd22c17cab42c0b9582a8
233,378
def _r(x, digits=6): """round a number""" return round(x, ndigits=digits)
fde3d351f3a9ef376c3b03732415138c4a801d1b
232,405
def cigar_to_end_coord(start, cigar): """ Compute the end coordinate based on the CIGAR string. Assumes the coordinate is 1-based. """ #print start, cigar # Parse cigar part #for cigar_part in cigar: # cigar_type, seq_len = cigar_part # offset += seq_len offset = sum([cigar_part[1] for cigar_part in cigar]) end = start + offset - 1 return end
ff03a1aa92a228c62c250c276851a15f4f6d7c41
550,768
def findKeywordExtn(ft, keyword, value=None): """ This function will return the index of the extension in a multi-extension FITS file which contains the desired keyword with the given value. """ i = 0 extnum = -1 # Search through all the extensions in the FITS object for chip in ft: hdr = chip.header # Check to make sure the extension has the given keyword if keyword in hdr: if value is not None: # If it does, then does the value match the desired value # MUST use 'str.strip' to match against any input string! if hdr[keyword].strip() == value: extnum = i break else: extnum = i break i += 1 # Return the index of the extension which contained the # desired EXTNAME value. return extnum
07a316dfd10afe9f96c466980f6a9eb0f89e8e0d
564,897
def translate_key_values(adict, translations, default=''): """Modify the keys in adict to the ones in translations. Be careful, this will modify your input dictionary. The keys not present in translations will be left intact. Parameters ---------- adict: a dictionary translations: iterable of 2-tuples Each 2-tuple must have the following format: (<adict existing key>, <desired key name for the existing key>) Returns ------- Translated adict """ for src_key, dst_key in translations: adict[dst_key] = adict.pop(src_key, default) return adict
349142d69caf1e391eea2155ddbf74267bbe824b
383,497
def http_fix(url): """Try adding an http schema""" return "http://" + url
f52411b0a732576b05588474bb60be2f68304d82
189,018
def batchnumber2formidxes(batch_number, batch_size): """ Convert a batch number to its respective form indexes Args: batch_number (int): index of the batch batch_size (int): size of the batch """ start_idx = batch_number * batch_size forms_idxes = list(range(start_idx, start_idx + batch_size)) return forms_idxes
2b2897ab5c96367cea198eaabf6e69803d87c7c4
249,816
def rdiv(denom, result): """ Calculates the inverse of a div operation. The *denom* parameter specifies the denominator of the original div (//) operation. In this implementation, *denom* must be greater than 0. The *result* parameter specifies the result of the div operation. The function returns the set of potential numerators. """ if denom <= 0: raise ValueError('invalid denominator') return range(result * denom, result * denom + denom)
31dfbfa0ba32347066c95a75a80b386871684f6e
506,382
def predict_forward_fn(model, batch): """Use model and batch data to predict ner tag.""" word = batch["text_tag"]["data"] char = batch["char_tag"]["data"] word_masks = batch["text_tag"]["masks"][0] output = model.decode(input_word=word, input_char=char, mask=word_masks) output = output.numpy() return {"output_tag": output}
f748c7ad194227f8ea3e41c0a224b8dc1a90b1b4
398,247
from pathlib import Path import hashlib def calculate_hash_fingerprint(file): """ Calculate the sha-256 fingerprint of the attached file :param file: the attached file :return: the fingerprint """ if file is not None and Path(file.path).exists(): file.open(mode='rb') digest = hashlib.sha256(file.read()).digest() file.close() return digest return hashlib.sha256().digest()
ad2f45d34806fed2cce0e2edfaf9ccc73dbe2675
273,931
def formatError(error): """ Format an error as a string. Write the error type as prefix. Eg. "[ValueError] invalid value". """ return "[%s] %s" % (error.__class__.__name__, error)
d37d19ab544e3c19fcfd59ce97732be9e6ba6efd
126,545
from datetime import datetime def calculate_hrs_after(all_hr_entries, all_timestamps, heart_rate_average_since): """ Find heart rate entries after indicated time This function uses a for loop and if/else statement to index and make a list of all the heart rate entries that occur after the "heart_rate_average_since" date (from inputted json). If the date is a future date, an empty list will be returned. :param all_hr_entries: a list of all the indicated patient's heart rate entries :param all_timestamps: a list of all the indicated patient's heart rate entry timestamps :param heart_rate_average_since: a string containing the desired date. All heart rates entered after this date will be averaged. :returns: A list of heart rate values that were entered after the indicated date. An empty list will be returned if the date is in the future. """ average_since = datetime.strptime(heart_rate_average_since, "%Y-%m-%d %H:%M:%S") hrs_after_time = [] for entry in range(0, len(all_hr_entries)): if datetime.strptime(all_timestamps[entry], "%Y-%m-%d %H:%M:%S") \ > average_since: hrs_after_time.append(all_hr_entries[entry]) return hrs_after_time
ad0b74344f851d3c36a89309c6c7b3afa0b59587
134,359
from typing import List from typing import Tuple import re def get_subphrases(line: str) -> List[Tuple[str, str]]: """ Given a ground truth transcription, extracts all subphrases. A subphrase is defined as a set of words with a single timestamp. In our ground truth file, it is possible for a single line to contain multiple timestamps, corresponding to different phrases. This function extracts each individual phrase so we can create the json file with precise timestamps. :param line: Text from the transcription file. :return: List of subphrases, where each subphrase is defined by the timestamp and words. """ # Find all timestamps on this line. # Finds: `[TIME: MM:SS]:` with or without the leading or ending colon. patterns = [ (r"\[+TIME: \d+:[0-5][0-9]\]", len("[TIME: MM:SS]")), (r"\[+TIME: \d+:[0-5][0-9]\]: ", len("[TIME: MM:SS]:")), (r"\[+TIME \d+:[0-5][0-9]\]", len("[TIME MM:SS]:")), (r"\[+TIME \d+:[0-5][0-9]\]:", len("[TIME MM:SS]:")), ] meta = [] for item in patterns: p, _ = item idxs = [m.span() for m in re.finditer(p, line)] meta += idxs meta = list(reversed(meta)) # Only one phrase in this line. subphrases = [] if len(meta) == 1: start, end = meta[0] ts, text = line[start:end], line[end:] item = (ts, text) subphrases.append(item) elif len(meta) > 1: # Extract the text for the subphrase. for i in range(len(meta)): start, end = meta[i] ts = line[start:end] text_start = end # If this is the last phrase. if i == len(meta) - 1: text = line[text_start:] else: next_idx, _ = meta[i + 1] text = line[text_start:next_idx] item = (ts, text) subphrases.append(item) return subphrases
de28255ebc012c144a49e7482bee5b721069c9e3
604,261
def node_to_string(sample): """ Given a sample in list format [['0','1','0','2'], uniq_ident] converts to string format, 'Ch1|Ch2|....|Chn_Identifier' :param sample: Sample in format [['0','1','0','2'], uniq_ident] :return: String formatted version of a given sample """ return "|".join(sample[0]) + "_" + str(sample[1])
9e74844d751740be691a2dea6c244627782838ed
421,459
def _to_bool(value): """Transform string value to bool. Return `True` is input `value` is "yes" or "true", or "1" or "on", return `False` for all other cases. For example: .. code-block:: python >>> _to_bool('yes') True >>> _to_bool('no') False >>> _to_bool('') False """ return value.lower() in ("yes", "true", "1", "on")
43f8188b115428461a92597ed5d8a560a1fef32f
598,555
from datetime import datetime from typing import Dict def create_event_body( event_name: str, location: str, description: str, start: datetime, end: datetime, timezone: str = 'America/Toronto', ) -> Dict: """ Create calendar event body data. :param event_name: the name of the calendar event :param location: the event location :param description: the event description :param start: the start date/time :param end: the end date/time :param timezone: the timezone :return: the calendar event body """ return { 'summary': event_name, 'location': location, 'description': description, 'start': { 'dateTime': start.isoformat(), 'timeZone': timezone, }, 'end': { 'dateTime': end.isoformat(), 'timeZone': timezone, }, }
74227ea8204c9f5d110df4e2b1a58a9f672c4d09
398,803
def get_skincluster_info(skin_node): """Get joint influence and skincluster method. Result key : - joint_list, - skin_method, - use_max_inf, - max_inf_count :arg skin_node: Skincluster PyNode that need to get info extracted. :type skin_node: pm.nt.SkinCluster :return: Skincluster joint influence, Skin method index, Use max influence, Max influence count. :rtype: dict """ output = { 'joint_list': [], 'skin_method': 0, 'use_max_inf': False, 'max_inf_count': 4, } if skin_node: output['joint_list'] = skin_node.getInfluence() output['skin_method'] = skin_node.getSkinMethod() output['use_max_inf'] = skin_node.getObeyMaxInfluences() output['max_inf_count'] = skin_node.getMaximumInfluences() return output
6c6918f628931669784ab995d67f64cf21f5df74
283,564
def set_floating_point_behaviour_options(opts, inv=True, div0=True, oflo=True, esr=True, nanoo=True): """Set the IPU floating point control behaviour bits See the Poplar API documentation for poplar::FloatingPointBehaviour. Args: inv: If true a floating point invalid operation (defined by IEEE 754) will cause an exception. div0: If true a floating point divide by zero operation will cause an exception. oflo: If true a floating point overflow will cause an exception. esr: Enable stochastic rounding. nanoo: Enable Not-a-Number on overflow mode. """ opts.floating_point_behaviour.inv = inv opts.floating_point_behaviour.div0 = div0 opts.floating_point_behaviour.oflo = oflo opts.floating_point_behaviour.esr = esr opts.floating_point_behaviour.nanoo = nanoo return opts
5de7a94831dc6f5a8b3bb641aebb383c031d342b
588,652
def case_insensitive_header_lookup(headers, lookup_key): """Lookup the value of given key in the given headers. The key lookup is case insensitive. """ for key in headers: if key.lower() == lookup_key.lower(): return headers.get(key)
32441029ac7bbf0bb5db4afa3a31b7ab067113e1
659,435
def getRep(rep,L,originalWindow=510, W=3): """ Extract a window of the matrix representation rep of size W around a mutation at location L Arguments: - rep : embedding matrix - L : The index of the variant about which windows will be taken - originalWindow : The size of the window used to generate the subset of the protein sequence that is embedded - W : the desired size of the window to be taken from embedding around the variant Example: variant at position 3 what the function is given is 1 2 3 4 5 Given these two you want to return a window of size 1 around position 3 : [2 3 4] Original Sequence : 0 1 2 3 4 5 6 Variant Position : L subset passed to model : [1 2 3 4 5] I J pass to model a window of size 1: [2 3 4] K M Example: L = 2 Original Window size used for embedding: 3 Desired Window size : 1 Original Sequence : 0 1 2 3 4 5 6 I L J K M """ i,j = max(0, L-originalWindow),min(len(rep), L + originalWindow) k,m = max(0,L - W - i), min(len(rep),L + W - i + 1) assert k >= 0 and m <= len(rep) return rep[k : m]
ee539b4f233ed3178f18b0cda3e9c5f73bdfda70
498,589
from datetime import datetime def _get_stop_as_datetime(event_json)->datetime: """Reads the stop timestamp of the event and returns it as a datetime object. Args: event_json (json): The event encapsulated as json. Returns datetime: Timestamp of the stop of the event. """ name = event_json['info']['name'] payload_stop = 'meta.raw_payload.' + name + '-stop' stop_timestamp_string = event_json['info'][payload_stop]['timestamp'] stop_date_string, stop_time_string = stop_timestamp_string.split('T') stop_time_string, _ = stop_time_string.split('.') date_and_time_string = stop_date_string + ' ' + stop_time_string return datetime.strptime(date_and_time_string, '%Y-%m-%d %H:%M:%S')
958915a568c66a04da3f44abecf0acca90181f43
705,334
def coNLLDataset_full(filename, processing_word=None, processing_tag=None, max_iter=None, which_tags=-1): """ Same as above but simply processes all datasets and returns full lists of X and y in memory (no yield). :param filename: path to the file :param processing_word: (optional) function that takes a word as input :param processing_tag: (optional) function that takes a tag as input :param max_iter: (optional) max number of sentences to yield :param which_tags: (optional) which tagging scheme to use (-1 -2 -3 or 3 2 1 for task 1 2 3 respectively) :return X,y: lists of words and tags in sequences """ X,y = [], [] niter = 0 with open(filename) as f: words, tags = [], [] for line in f: line = line.strip() if (len(line) == 0 or line.startswith("-DOCSTART-")): if len(words) != 0: niter += 1 if max_iter is not None and niter > max_iter: break X.append(words) y.append(tags) words, tags = [], [] else: ls = line.split() word, tag = ls[0],ls[which_tags] if processing_word is not None: word = processing_word(word) if processing_tag is not None: tag = processing_tag(tag) words += [word] tags += [tag] return X,y
997e892228ce2d6ef6b1e26623dcd8f6f832195d
264,329
def colors_from_palette(palette, n_colors): """ Params ---------------- palette: dict, bokeh palette with the number of colors as keys n_colors: int, number of colors to return Returns ------------ colors: list, list of colors of length n_colors. If n_colors > len(palette) the same color appear multiple times. """ max_palette_colors = max(palette.keys()) min_palette_colors = min(palette.keys()) if n_colors < min_palette_colors: colors = palette[min_palette_colors] colors = colors[:n_colors] elif n_colors <= max_palette_colors: colors = palette[n_colors] else: # the case that n_colors > max_palette_colors palette_colors = palette[max_palette_colors] indices = range(0, n_colors) indices = [i - (i // len(palette_colors)) * len(palette_colors) for i in indices] colors = [palette_colors[i] for i in indices] return colors
b19677d912f2c63a386ed1c189eb274289373601
529,762
from typing import Union def invalid_output( query: dict, db_query: Union[str, dict], api_key: str, error: str, start_record: int, page_length: int) -> dict: """Create and return the output for a failed request. Args: query: The query in format as defined in wrapper/input_format.py. db_query: The query that was sent to the API in its language. api_key: The key used for the request. error: The error message returned. start_record: The index of the first record requested. page_length: The page length requested. Returns: A dict containing the passed values and "-1" as index where necessary to be compliant with wrapper/output_format. """ out = dict() out["query"] = query out["dbQuery"] = db_query out["apiKey"] = api_key out["error"] = error out["result"] = { "total": "-1", "start": str(start_record), "pageLength": str(page_length), "recordsDisplayed": "0", } out["records"] = list() return out
4a97a89f5ce7003d582b23b8e7ca036eff74a3b0
690,304
import copy import collections def get_db_data(relation_data, unprefixed): """Organize database requests into a collections.OrderedDict :param relation_data: shared-db relation data :type relation_data: dict :param unprefixed: Prefix to use for requests without a prefix. This should be unique for each side of the relation to avoid conflicts. :type unprefixed: str :returns: Order dict of databases and users :rtype: collections.OrderedDict """ # Deep copy to avoid unintentionally changing relation data settings = copy.deepcopy(relation_data) databases = collections.OrderedDict() # Clear non-db related elements if "egress-subnets" in settings.keys(): settings.pop("egress-subnets") if "ingress-address" in settings.keys(): settings.pop("ingress-address") if "private-address" in settings.keys(): settings.pop("private-address") singleset = {"database", "username", "hostname"} if singleset.issubset(settings): settings["{}_{}".format(unprefixed, "hostname")] = ( settings["hostname"]) settings.pop("hostname") settings["{}_{}".format(unprefixed, "database")] = ( settings["database"]) settings.pop("database") settings["{}_{}".format(unprefixed, "username")] = ( settings["username"]) settings.pop("username") for k, v in settings.items(): db = k.split("_")[0] x = "_".join(k.split("_")[1:]) if db not in databases: databases[db] = collections.OrderedDict() databases[db][x] = v return databases
0e2a30624f35f49119ae9bd275153d5e9fdf7503
10,412
def hexString(s): """ Output s' bytes in HEX s -- string return -- string with hex value """ return ":".join("{:02x}".format(ord(c)) for c in s)
22c1e94f0d54ca3d430e0342aa5b714f28a5815b
6,373
def _get_indices(term, chunk): """Get indices where term appears in chunk Parameters ---------- term : str The token to look for in the `chunk` chunk : [str] A chunk of text in which to look for instances of `term` Returns ------- [int] Indices in `chunk` where `term` was found Examples -------- >>> term = 'a' >>> chunk = ['a', 'a', 'b', 'b', 'a'] >>> _get_indices(term, chunk) [0, 1, 5] """ return [i for i, token in enumerate(chunk) if token == term]
a9ae9f046e1266ec4fc96291d16c64ffb8a2e49a
684,499
import math def percentile(arr, percent): """ Calculate the given percentile of arr. """ arr = sorted(arr) index = (len(arr) - 1) * percent floor = math.floor(index) ceil = math.ceil(index) if floor == ceil: return arr[int(index)] low_value = arr[int(floor)] * (ceil - index) high_value = arr[int(ceil)] * (index - floor) return low_value + high_value
48bd98956eb11961351ee6c7fddf84c733c4d7dc
311,870
def record_to_xml(name): """ Utility function to translate object names to XML tags. """ if name == 'authors': return 'author' elif name == 'related_identifiers': return 'detail' elif name == 'records': return 'record' else: return name
3cd4ec02bb4e98f5bec56f753efa5ea41cf9dc20
333,573
def accepts_gzip(request): """ returns True if the request accepts Content-Encoding: gzip """ return 'gzip' in request.META.get('HTTP_ACCEPT_ENCODING', '')
9f37c306bf0eef49bf9d782b7f9e816ceaee4a99
186,122
def count_unique (items): """This takes a list and returns a sorted list of tuples with a count of each unique item in the list. Example 1: count_unique(['a','b','c','a','c','c','a','c','c']) returns: [(5,'c'), (3,'a'), (1,'b')] Example 2 -- get the most frequent item in a list: count_unique(['a','b','c','a','c','c','a','c','c'])[0][1] returns: 'c' """ stats = {} for i in items: if i in stats: stats[i] = stats[i] + 1 else: stats[i] = 1 stats = [(v, k) for k, v in stats.items()] stats.sort() stats.reverse() return stats
2a6ace7633d2e37de51e1d612e4e7f7ed3b9bc66
357,161
def _safe_coerce_to_lowercase(value): """Returns strings as lowercase, and any other types of value unchanged.""" if isinstance(value, str): return value.lower() return value
cee9c62abdee7dc53cf5348b9342f8023fc9c449
321,701
def p(x,xtrue): """ Calculate percentage difference between two values """ return 100.0*(x-xtrue)/xtrue
b6a400ba41192658f3234814e14b8dd1676b7572
187,120
def subject_destroy(client, subject_id): """Destroy the subject or raise if it does not exist.""" return client.subject_destroy(subject_id=subject_id)
71f531cdf64f0758cad719022e0fac6bc08a0afa
644,401
def username(user: dict): """ Returns the user's first and last name if they've seen set, else returns the user's email """ if user["first_name"]: return user["first_name"] + " " + user["last_name"] return user["email"]
19f7a4d47677fa1217c03f61f867c7704194f2bc
668,237