content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def findIdx(list1, list2): """ Return the indices of list1 in list2 """ return [i for i, x in enumerate(list1) if x in list2]
2fb6c27cdc65185675bc4cd45a61d986f4792e07
51,271
from datetime import datetime def reformat_subway_dates(date): """ The dates in our subway data are formatted in the format month-day-year. The dates in our weather underground data are formatted year-month-day. In order to join these two data sets together, we'll want the dates formatted the same way. Write a function that takes as its input a date in the MTA Subway data format, and returns a date in the weather underground format. Hint: There are a couple of useful functions in the datetime library that will help on this assignment, called strptime and strftime. More info can be seen here and further in the documentation section: http://docs.python.org/2/library/datetime.html#datetime.datetime.strptime """ struct_time = datetime.strptime(date, "%m-%d-%y") date_formatted = datetime.strftime(struct_time, "%Y-%m-%d") return date_formatted
a40aff606bc790e41b75b4588dbb9b4442510805
51,274
def load_txt(path): """ load txt file :param path: the path we save the data. :return: data as a list. """ with open(path,'r') as f: data = [] while True: a = f.readline().split() if a: data.append(a[0]) else: break return data
902fdba2f6d3a9d2dbe1aef22fdcecfba1ed2455
51,285
def get_time_string(hour): """Returns a string to display given a value 8-22 (8am to 10pm)""" if hour < 12: return str(hour) + ":00am" elif hour == 12: return str(hour) + ":00pm" elif hour > 12 and hour < 23: return str(hour-12) + ":00pm" else: return None
bc2d7f84f51ef2198d6cce7fa537f4e048331fcb
51,288
def batch_to_numpy_images_and_labels(data): """Returns a list of images and labels from a data batch Args: data (batch): A data batch with labels and images Returns: tuple: A tuple of lists of labels and images """ images, labels = data numpy_images = images.numpy() numpy_labels = labels.numpy() return numpy_images, numpy_labels
fe58dd02dfc7128928042ec7e2be65db6c18ce85
51,291
def get_byte_array(integer): """Return the variable length bytes corresponding to the given int""" # Operate in big endian (unlike most of Telegram API) since: # > "...pq is a representation of a natural number # (in binary *big endian* format)..." # > "...current value of dh_prime equals # (in *big-endian* byte order)..." # Reference: https://core.telegram.org/mtproto/auth_key return int.to_bytes( integer, (integer.bit_length() + 8 - 1) // 8, # 8 bits per byte, byteorder='big', signed=False )
09f432308ca62ee05273dc278178fde27bc18c40
51,293
def neutronify(name): """Adjust the resource name for use with Neutron's API""" return name.replace('_', '-')
660b403cbdcf3dea4c16439668a24d04a22acbf5
51,294
def pluralize(num, singular): """Return the proper plural version. Examples: >>> pluralize(2, "meme") '2 memes' >>> pluralize(1, "thing") '1 thing' >>> pluralize(1, "class") '1 class' >>> pluralize(0, "class") '0 classes' """ if num == 1: return f"{num} {singular}" plural_form = singular + ("es" if (singular[-1] == "s") else "s") return f"{num} {plural_form}"
458f09e95a9dbb329e719c50fc5c72c27e952057
51,298
def rangeSplit(rangeStr): """Return an array of numbers from a specified set of ranges. Given a string such as "1 2 4-6 8" will return [1,2,4,5,6,8]. The numbers and ranges can either be space separated or comma separated (but not both). Keyword arguments: rangeStr -- a string containing ranges such as "1 2 4-6 8" """ result = [] splitChar = ' ' if ',' in rangeStr: splitChar = ',' for part in rangeStr.split(splitChar): if '-' in part: a, b = part.split('-') a, b = int(a), int(b) result.extend(range(a, b + 1)) else: a = int(part) result.append(a) return result
60f8d2d7a051e6f7a1819b6de3da8f42a20179dc
51,299
def overlap(v1, v2): """Determine whether affected positions of two variants overlap.""" v1_b = v1.pos + max(len(v1.ref), len(v1.alt)) v2_b = v2.pos + max(len(v2.ref), len(v2.alt)) return min(v1_b, v2_b) - max(v1.pos, v2.pos) > 0
79c4e4958d0293896cb00937a8119d2c2bb7e696
51,306
def _get_plot_title(target_name: str, last_observation_date: str, eval_dataset_creation_date: str, forecast_horizon: int) -> str: """Gets the title of the plot.""" return ( f"Comparison of metrics for predicting {target_name}. Forecast date: " f"{last_observation_date}, forecast horizon: {forecast_horizon} days, " f"evaluation reporting date: {eval_dataset_creation_date}.")
bdd8a9a0d648342192200d7583cb4ed452ab989c
51,310
def list_chunk(target, n): """ 리스트를 n개로 분할해줍니다. **Example** simple_utils.array.list_chunk(your_list, 5) **Parameters** * **target** (list) -- 분할할 타겟입니다. * **n** (int) -- 몇 개씩 분할할지 정합니다. """ return [target[i:i+n] for i in range(0, len(target), n)]
77d9ec17cacb339a7899c05156670ad5164aa9f5
51,312
def _quote_if_str(val): """ Helper to quote a string. """ if isinstance(val, str): return f"'{val}'" return val
5b216d94c1039c6a95947220badf414e2b6b3a93
51,316
def count_labels_distribution(data_loader): """ Count dataset statistics """ bg_pix_count = 0 fg_pix_count = 0 for i, sample_batch in enumerate(data_loader): labels = sample_batch['label'][:, :, :, :].numpy() bg_pix_count += (labels == 0).sum() fg_pix_count += (labels == 1).sum() return bg_pix_count, fg_pix_count
daa65a759dca1c88b91bb58ff46b438bd6027142
51,317
def take_out_npools_from_cmdline(cmdline): """ Wipe out any indication about npools from the cmdline settings :param cmdline: list of strings with the cmdline options (as specified in pw input settings) :return : the new cmdline with the options about npools """ return [e for i,e in enumerate(cmdline) if (e not in ('-npools','-npool','-nk') and cmdline[i-1] not in ('-npools','-npool','-nk'))]
6ae18ffb8092cfc9aca8d1016b28faa3c579769d
51,327
def strings_to_services(strings, string_to_service): """Convert service strings to SUPPORT_* service bitmask.""" services = 0 for string in strings: services |= string_to_service[string] return services
e863ed240980dd5ed60e665c73a0d999681d0bde
51,335
def hasSheBang(fname): """Checks fname is a #! script.""" with open(fname) as f: return f.read(2).startswith('#!')
b5d04b3a28fd09b42e59b862a51afdbbe2e35ae9
51,343
def quote_escape(value, lf='&mjf-lf;', quot='&mjf-quot;'): """ Escape a string so that it can safely be quoted. You should use this if the value to be quoted *may* contain line-feeds or both single quotes and double quotes. If the value contains ``\n`` then it will be escaped using ``lf``. By default this is ``&mjf-lf;``. If the value contains single quotes *and* double quotes, then all double quotes will be escaped using ``quot``. By default this is ``&mjf-quot;``. >>> quote_escape('hello') 'hello' >>> quote_escape('hello\\n') 'hello&mjf-lf;' >>> quote_escape('hello"') 'hello"' >>> quote_escape('hello"\\'') "hello&mjf-quot;'" >>> quote_escape('hello"\\'\\n', '&fish;', '&wobble;') "hello&wobble;'&fish;" """ if '\n' in value: value = value.replace('\n', lf) if '\'' in value and '\"' in value: value = value.replace('"', quot) return value
b9ad94a91e9fbecb9cef7c2f7fba362f09be25a7
51,345
def load_noise(front_path, end_path): """ Load the front and end noise :param front_path: :param end_path: :return: List: front_list, end_list """ front_list = [] end_list = [] with open(front_path, 'r', encoding='utf-8') as front_f: while True: line = front_f.readline() if not line: print('Front noise phrase load finished!') break front_list.append(line.replace('\n', '')) with open(end_path, 'r', encoding='utf-8') as end_f: while True: line = end_f.readline() if not line: print('End noise phrase load finished!') return front_list, end_list end_list.append(line.replace('\n', ''))
e1388935afc8437c0e773591d57a01fc79a1560f
51,346
def str_to_array_of_int(value): """ Convert a string representing an array of int into a real array of int """ return [int(v) for v in value.split(",")]
244d8250e42665fd782b7dfbc52d1f5938d1d3d8
51,347
def is_json_response(req): """Returns True when the request wants a JSON response, False otherwise""" return "Accept" in req.headers and "application/json" in req.accept
d566e03c84ebb6a26254fa14dbd794539caf5728
51,348
def convert_pt_to_in(pts: int) -> float: """Converts a length in pts to a length in inches. Parameters ---------- pts : int A length in pts. Returns ------- float A length in inches. References ---------- - https://www.overleaf.com/learn/latex/Lengths_in_LaTeX """ return 12.0 * 249.0 / 250.0 / 864.0 * pts
396438bd677cf267c3b9a83fab1198f3ab0808a5
51,349
def split_list(input_list): """Split input_list into three sub-lists. This function splits the input_list into three, one list containing the inital non-empty items, one list containing items appearing after the string 'Success' in input_list; and the other list containing items appearing after the string 'Failure' in input_list. """ initial_flag = 1 success_flag = 0 failure_flag = 0 initial_list = [] success_list = [] failure_list = [] for c in input_list: if c == 'Success:': success_flag = 1 failure_flag = 0 elif c == 'Failure:': failure_flag = 1 success_flag = 0 elif c != '' and success_flag: success_list.append(c) elif c != '' and failure_flag: failure_list.append(c) elif c != '' and initial_flag: initial_list.append(c) return initial_list, success_list, failure_list
3c4d73b824b56e1f74cf1f5eeea7a86d829f2aed
51,355
def add42(self, a_val): """ Add a class constant to a variable. """ return a_val + self.A42
47f6ddd9b3e6c19a45787e7ec311e3fc2004c662
51,356
def synchronize_iterables(iterables): """Synchronize the given iterables in item-wise order. Return: the {field: value} dictionary list Examples -------- >>> from nipype.pipeline.engine.utils import synchronize_iterables >>> iterables = dict(a=lambda: [1, 2], b=lambda: [3, 4]) >>> synced = synchronize_iterables(iterables) >>> synced == [{'a': 1, 'b': 3}, {'a': 2, 'b': 4}] True >>> iterables = dict(a=lambda: [1, 2], b=lambda: [3], c=lambda: [4, 5, 6]) >>> synced = synchronize_iterables(iterables) >>> synced == [{'a': 1, 'b': 3, 'c': 4}, {'a': 2, 'c': 5}, {'c': 6}] True """ out_list = [] iterable_items = [(field, iter(fvals())) for field, fvals in sorted(iterables.items())] while True: cur_dict = {} for field, iter_values in iterable_items: try: cur_dict[field] = next(iter_values) except StopIteration: pass if cur_dict: out_list.append(cur_dict) else: break return out_list
4573e9c6a5a2bc04447b54e088ae68650b5f832e
51,357
def get_config(configs, requisition): """Get the associated config.""" for config in configs: ref = "{}-{}".format(config["enduser_id"], config["aspsp_id"]) if requisition["reference"] == ref: return config
7f1a3ceaa845954984ef9f3c17c843097f82ad06
51,359
def zipstar(L, lazy=False): """ A much faster A, B, C = zip(*[(a, b, c), (a, b, c), ...]) May return lists or tuples. """ if len(L) == 0: return L width = len(L[0]) if width < 100: return [[elem[idx] for elem in L] for idx in range(width)] L = zip(*L) return L if lazy else list(L)
dc458ca547b5bdf7fccf9137f158f3eb67474ff7
51,360
def BuildAdGroupAdOperations(adgroup_operations): """Builds the operations adding a TextAd to each AdGroup. Args: adgroup_operations: a list containing the operations that will add AdGroups. Returns: a list containing the operations that will create a new TextAd for each of the provided AdGroups. """ adgroup_ad_operations = [ { # The xsi_type of the operation can usually be guessed by the API # because a given service only handles one type of operation. # However, batch jobs process operations of different types, so # the xsi_type must always be explicitly defined for these # operations. 'xsi_type': 'AdGroupAdOperation', 'operand': { 'adGroupId': adgroup_operation['operand']['id'], 'ad': { 'xsi_type': 'TextAd', 'headline': 'Luxury Cruise to Mars', 'description1': 'Visit the Red Planet in style.', 'description2': 'Low-gravity fun for everyone!', 'displayUrl': 'www.example.com', 'finalUrls': ['http://www.example.com/1'] } }, 'operator': 'ADD' } for adgroup_operation in adgroup_operations] return adgroup_ad_operations
5786705380d9bd870c6ff9f391b0152de0241f75
51,363
def remove_values(the_list, val): """ Remove all items with value `val` from `the_list` """ return [value for value in the_list if value != val]
071f7a2adf88540187b1eea6f1465ea6025bc2d7
51,364
def flatten(list_of_lists): """Return a list-of-lists into a single list with only items in it.""" return [item for inner_list in list_of_lists for item in inner_list]
2ec2ccca79377b9f99417e630a9b7f0ab513c3d4
51,366
def get_state_dict(model): """Return model state dictionary whether or not distributed training was used""" if hasattr(model, "module"): return model.module.state_dict() return model.state_dict()
fe1f22c63dac3b7d96b0f0485c9a010e98bae2f5
51,367
def printed(o, **kwargs): """Print an object and return it""" return print(o, **kwargs) or o
a40f40541999ebb8bd9b9e61b6d2dbd7b77a6722
51,376
from pathlib import Path def museum_packages_dir(tmpdir, monkeypatch): """ Fixture pointing to a directory containing museum packages """ path = Path(tmpdir) / "MuseumPackages" path.mkdir(exist_ok=True) return path
0625b8dbb41ec320772d359d47d3b572ad2d7c36
51,381
def is_bool(value): """ Checks if the value is a bool """ return value.lower() in ['true', 'false', 'yes', 'no', 'on', 'off']
c71cac1dbf2ffacc2da420906bc7588bd4f416ec
51,384
def get_slope(x,t): """This function calculates the slope of a peak from exceeding the threshold to the maximum. Args: x (list): x Values from which the slope is to be determined t (list): time section from which the slope is to be determined Returns: slope (float): slope of the section """ end = 0 flag = False for i in range(len(x)-1): if flag == False: if x[i+1] > x[i]: pass else: end = i flag = True slope = (x[end]-x[0])/(t[end]-t[0]) return slope
5c8b7347d5b3abeb633fd6990afc68a288c0f733
51,386
def _unmangle_name(mangled_name, class_name): """Transform *mangled_name* (which is assumed to be a "_ClassName__internal" name) into an "__internal" name. :arg str mangled_name: a mangled "_ClassName__internal" member name :arg str class_name: name of the class where the (unmangled) name is defined :return: the transformed "__internal" name :rtype: str """ return mangled_name.replace("_%s" % class_name.lstrip('_'), "")
681e7b0e05b2feda22764149feafd8da89609ebc
51,388
import math def get_page_numbers(resources, page): """ Get the pagination information for the request. Parameters ---------- resources: array List of top level resources page: int Page number passed through the query parameters Returns ------- A dictionary of page numbers """ total_page = 1 if len(resources) > 10: total_page = math.floor(len(resources)/10) # If we are on the first page if page == 1: previous_page = None if total_page == 1: next_page = None else: next_page = page + 1 # If we are on the last page elif page == total_page: previous_page = page - 1 next_page = None # If more pages exist elif page < total_page: previous_page = page - 1 next_page = page + 1 # If we are past total pages else: # page > total_page previous_page = total_page next_page = None pages = { "first_page": '1', "previous_page": previous_page, "next_page": next_page, "last_page": total_page, "total_pages": total_page, "per_page": 10 } return pages
4880d846374af9b915bb3cff69adaa4fafde18ce
51,389
import json def to_json(data): """Converts Python data to JSON. """ return json.dumps(data).encode('utf8')
512ba5c2df08029a735b6046ebc977210117dbdc
51,396
def state_derivatives(states): """Returns functions of time which represent the time derivatives of the states.""" return [state.diff() for state in states]
c8daf98027e9949be2a12db7acb799ed6c9845c0
51,397
from typing import Callable from typing import Iterable def sort_by(key: Callable): """Return a new list containing all items from the iterable in ascending order, sorted by a key. >>> sort_by(len)(["hi!", "my", "name", "is"]) ['my', 'is', 'hi!', 'name'] """ def sort_by(seq: Iterable): return sorted(seq, key=key) return sort_by
9e37364aa604c170c599286829c60b2f8a7f2ae4
51,398
def obj_ratio(dets): """Calculate the ratio of one ojbect detected versus anything not one. Parameters ---------- dets : list of lists The list of how many ojbects were detected. Returns ------- ratio : float The ratio of when one object was detected versus something else. """ ones = 0 twos = 0 for i in range(len(dets)): chunks = dets[i] ones += chunks.count(1) twos += chunks.count(2) ratio = twos/(twos + ones) return ratio
f872c6265a0bf7f43007b1cc687fa2c32eaf64b6
51,401
def human_bytes(num, suffix='B', use_binary_prefix=True): """Provide a human readable representation of a specified number of bytes. Convert a number of bytes into a higher order representation such as megabytes or gigabytes to make more human readable. Similar to specifying `-h` in many command line tools. Args: num (int): The number of bytes you wish to represent. suffix (str, optional): The suffix for the representation. Defaults to 'B' use_binary_prefix (bool, optional): Use binary prefix, Defaults to True, if False use decimal prefix. https://en.wikipedia.org/wiki/Binary_prefix Returns: str: The human representation of the bytes provided. Examples: >>> print(human_bytes(1024)) 1.0KiB """ if use_binary_prefix: multiplyer = 1024.0 units = ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi'] else: multiplyer = 1000.0 units = ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y'] for unit in units: if abs(num) < multiplyer: return "%3.1f%s%s" % (num, unit, suffix) num /= multiplyer return "%.1f%s%s" % (num*multiplyer, units[-1], suffix)
4bb21c33fb8cadfde03e0d084b3ed61ea6414970
51,403
def read_binary_metadata(metadata_file): """ Read the metadata of a binary file. Arguments: metadata_file: str Metadata filename. Returns: metadata: dict Metadata. """ fid = open(metadata_file, 'r') trans_tab = str.maketrans(dict.fromkeys('\n{}')) metadata = dict() while 1: line = fid.readline() if '=' in line: key, value = line.split('=', 1) if ('{' in value) and ('}' not in value): while '}' not in line: line = fid.readline() if line.strip()[0] == ';': continue value += line key = key.strip() if ('{' in value) and ('}' in value): value = value.translate(trans_tab).strip() value = list(map(str.strip, value.split(','))) else: value = value.translate(trans_tab).strip() metadata[key] = value if line == '': break fid.close() return metadata
074ce259221efb906a305119a8a1eeef744c2031
51,404
def color_tuple_to_hsl(three_tuple): """ Converts a (h, s, l) to a valid CSS value. h: hue s: saturation (in %) l: lightness (in %) """ return 'hsl({}, {}%, {}%)'.format(*three_tuple)
5ea546bef92fe2dfa40b5440e8d0004330ee5299
51,405
def process_group(grp): """ Given a list of list of ints, where two ints share a directed edge u-v with v > u if v = u + i for some i in (1, 2, 3), compute the total number of branches (or equivalently leaves) in this directed tree. :param grp: The list of list of ints. :return: The count of the number of leaves. """ st = list(sorted(map(int, grp))) st = [0] + st + [max(st) + 3] exists = set(st) def count_leaves(memo, curr_val): """ Given a tree structure with root 0 count the number of leaves present in it. Notes _____ Recursive Step: Given a curr_val, we store in memo[curr_val]: 'The number of leaves in the subtree rooted at curr_val.' """ if curr_val == st[-1]: # Reached a leaf. # Leaves have exactly one leaf in the subtree # rooted at them. memo[curr_val] = 1 return 1 elif curr_val in memo: # If memoized, don't recompute, save time. return memo[curr_val] else: # Subdivide the problem amongst # the current nodes children. for i in range(1, 4): if curr_val + i in exists: count_leaves(memo, curr_val + i) # Assume it is solved for children. # Then how to use children's solution # to produce current node's? # The number of leaves in the subtree rooted # at curr_val is: # The sum of the number of leaves in the # subtrees rooted at its children. memo[curr_val] = 0 for i in range(1, 4): if curr_val + i in memo: memo[curr_val] += memo[curr_val + i] # Populate memo[curr_val] with the result. # and trace back to the next node. return memo[curr_val] mm = dict() count_leaves(mm, 0) return mm[0]
30192a6773a6d008e3c6b7f47b136ce80680df24
51,406
def fmt_filename(text): """File name formatter. Remove all file system forbidden char from text. **中文文档** 移除文件系统中不允许的字符。 """ forbidden_char = ["\\", "/", ":", "*", "?", "|", "<", ">", '"'] for char in forbidden_char: text = text.replace(char, "") return text
5326a74dfd1887f3d17141824b96e5b2459eea53
51,407
def div(num1, num2): """ Divides two numbers >>> div(4,2) 2.0 Raises zero division error >>> div(4,0) Traceback (most recent call last): ... ZeroDivisionError: division by zero """ return num1 / num2
b575f4addbbc5319a56b9aa5c5e555a40eab9930
51,415
from typing import Optional def is_google_registry_domain(domain: Optional[str]) -> bool: """Returns true if the given Docker image path points to either the Google Container Registry or the Artifact Registry.""" if domain is None: return False return domain == 'gcr.io' or domain.endswith('docker.pkg.dev')
ecab75b7d70e6c657a030ba33b06287d16e48472
51,418
def is_goal_attained(character: dict) -> bool: """ Check if goal is attained. :param character: a dictionary :precondition: character must be a dictionary :postcondition: returns True if character has a key Artifact, else returns False :return: True if goal is attained, otherwise False >>> is_goal_attained({"Artifact": "Necronian Servo-Skull"}) True >>> is_goal_attained({"Max wounds": 1000000000}) False """ return "Artifact" in character.keys()
f39182b52f238acec51f24168cb87ee3ef4ea20e
51,422
def _setup_genome_annotations(g, args, ann_groups): """Configure genome annotations to install based on datatarget. """ available_anns = g.get("annotations", []) + g.pop("annotations_available", []) anns = [] for orig_target in args.datatarget: if orig_target in ann_groups: targets = ann_groups[orig_target] else: targets = [orig_target] for target in targets: if target in available_anns: anns.append(target) g["annotations"] = anns if "variation" not in args.datatarget and "validation" in g: del g["validation"] return g
9c288f7e5dccb4ff4e7876ccacf3b3bdb32b130c
51,430
def get_important_vals(labeled, unlabeled): """ Get values that are important for later Args: labeled_np (np.array of floats) list of numbers for catastrophy times unlabeled_np (np.array of floats) list of numbers for catastrophy times Returns: labeled_len (int) max indexable number for labeled list unlabeled_len max indexable number for unlabeled list high highest value in either list """ labeled_len = len(labeled) - 1 unlabeled_len = len(unlabeled) - 1 high = 0 if labeled[labeled_len] > unlabeled[unlabeled_len]: high = labeled[labeled_len]+1 else: high = unlabeled[unlabeled_len]+1 return [labeled_len, unlabeled_len, int(high)]
4d11f8c364177fc158bcd2735fef53ae4fca8713
51,431
def sequence_pad(sequence, maxlen, dtype='int32', padding='pre', truncating='pre', value=0): """Pads sequences to the same length. Args: sequences: pd.Series or np.array or List of lists, where each element is a sequence. maxlen: Int, maximum length of all sequences. dtype: Type of the output sequences. To pad sequences with variable length strings, you can use `object`. padding: String, 'pre' or 'post': pad either before or after each sequence. truncating: String, 'pre' or 'post': remove values from sequences larger than `maxlen`, either at the beginning or at the end of the sequences. value: Float or String, padding value. Returns: List of lists with shape `(len(sequences), maxlen)` """ if not isinstance(sequence[0], list): sequence = [sequence] if padding=='post' and truncating=='post': t = [i[:maxlen] if len(i)>maxlen else i+[value]*(maxlen-len(i)) for i in sequence] elif padding=='post' and truncating=='pre': t = [i[-maxlen:] if len(i)>maxlen else i+[value]*(maxlen-len(i)) for i in sequence] elif padding=='pre' and truncating=='post': t = [i[:maxlen] if len(i)>maxlen else [value]*(maxlen-len(i))+i for i in sequence] elif padding=='pre' and truncating=='pre': t = [i[-maxlen:] if len(i)>maxlen else [value]*(maxlen-len(i))+i for i in sequence] else: raise ValueError('Padding type "%s" not understood or Truncating type "%s" not understood' % (padding, truncating)) return t
5e193bc60c1579d0f4cd39bab983cea1a30e11a4
51,436
def select_named_pairs(pair_data): """Returns a list of name, base id, target id tuples for the codepoint and primary pairs in pair_data. Generates a name for each selected pair. If the pair is matched by codepoint, use the 'u(ni)XXXX' name of the codepoint. Else use a name formed from the glyph id(s). This handles unmatched pairs (where one glyph id is -1).""" named_pairs = [] if pair_data.cp_pairs is not None: for b, t, cp in pair_data.cp_pairs: name = "%s%04X" % ("uni" if cp < 0x10000 else "u", cp) named_pairs.append((name, b, t)) if pair_data.pri_pairs is not None: for b, t, _ in pair_data.pri_pairs: if b == t: name = "g_%05d" % b elif t == -1: name = "g_b%05d" % b elif b == -1: name = "g_t%05d" % t else: name = "g_b%05d_t%05d" % (b, t) named_pairs.append((name, b, t)) return named_pairs
32dd6d32737cb7a9c29c90782557ce1bfec022e4
51,437
def deduplicate(list_object): """Rebuild `list_object` removing duplicated and keeping order""" new = [] for item in list_object: if item not in new: new.append(item) return new
69608a67e0a8c466e9e8233f9d8cf61cb553a6cf
51,438
import re import string def stripLeadingTrailingWhitespace(text): """Given a string, remove any leading or trailing whitespace""" text = re.sub("^([" + string.whitespace + "])+", "", text) text = re.sub("([" + string.whitespace + "])+$", "", text) return(text)
1933a39b42e4266680ca8b2049b01ded35882aa7
51,448
from typing import List from typing import Tuple def char_span_to_token_span(spans: List[Tuple[int, int]], char_start: int, char_end: int) -> Tuple[int, int]: """ Map a character span to the minimal containing token span. Args: spans: a list of end-exclusive character spans for each token char_start: starting character offset char_end: ending character offset (exclusive) Returns: (start, end) token indices, end-exclusive """ # first span ending after target start tok_s = min(i for i, s in enumerate(spans) if s[1] >= char_start) # last span starting before target end tok_e = max(i for i, s in enumerate(spans) if s[0] <= char_end) return (tok_s, tok_e + 1)
8a2bd0a6fc3036064fa75127dfa1e6bc09bec92e
51,453
def pre_hash(s): """ Prepends a string with its length. EXAMPLES:: sage: from sage.doctest.parsing import pre_hash sage: pre_hash("abc") '3:abc' """ return "%s:%s" % (len(s), s)
eedf80648a3cb93a5ec7ac941239834801e7b18d
51,455
import io def read_file(file): """Open and read file input.""" f = io.open(file, 'r', encoding='utf-8') text = f.read() f.close() return text
8c5e65f59e0475473c29798a8aa10d038628a9b4
51,459
def split_into_integers(coordinate): """Get individual parts of a float and transform into integers :coordinate: float value :returns: list of integers """ return list(map(int, str(coordinate).split('.')))
5ddb05f2a6618a212de2e6f0c65598327f42a514
51,462
import logging def get_broadcasts(resp): """Parses an NHL schedule response to get broadcast information. Args: resp: JSON response from NHL Schedule API call. Returns: broadcasts: Dictionary of home & away broadcasts. """ broadcasts = {} # Set defaults in case one team doesn't have a broadcast yet # broadcasts["home"] = "TBD" # broadcasts["away"] = "TBD" try: resp_broadcasts = resp["broadcasts"] for broadcast in resp_broadcasts: broadcast_team = broadcast["type"] if broadcast_team == "national": broadcasts["away"] = broadcast["name"] broadcasts["home"] = broadcast["name"] break else: broadcast_channel = broadcast["name"] broadcasts[broadcast_team] = broadcast_channel except KeyError: logging.warning("Broadcasts not available - setting them to TBD.") broadcasts["home"] = "TBD" broadcasts["away"] = "TBD" return broadcasts
f1033ff0477f20439b1e6a22b8807ba207e6f30e
51,466
def get_verified_emails(user): """ Get a list of non-primary, verified email addresses. """ return user.get_emails(is_verified=True, include_primary=False)
0f3093f43cbca503cbaf3afc73b565ffe4d6c255
51,468
def str_input(prompt="", max_length=0): """Uses `input(prompt)` to request a str value from the user, retrying if the user doesn't enter anything or only enters whitespace. @param str prompt: The prompt to display. @param int max_length: The maximum length of the string. Defaults to no length limit. @return str: The entered value. """ while True: string = input(prompt) if not len(string.strip()): continue if max_length != 0 and len(string) > max_length: print("Your text is too long. It must not exceed a length of %i characters." % max_length) return string
30a8579e82220b96e530d5b0142f4e7028b547f4
51,469
def fill_tabs(string: str): """Replaces every occurence of \\t with four spaces""" return string.replace("\t", " ")
5b8849612759d2f7c803c6311414d59d94b49d70
51,473
def calculate_mins(time_string): """Convert a time string into minutes.""" hours, mins = time_string.split() hour_num = int(hours.replace('h', '')) min_num = int(mins.replace('min', '')) return hour_num * 60 + min_num
a991f780296473fd926c470c37664fdf0c59173f
51,474
from typing import Sequence def remove_reps( seq: Sequence, ) -> Sequence: """Remove consecutive equal elements from a given sequence""" result = [seq[0]] for el in seq[1:]: if el == result[-1]: pass else: result.append(el) return result
a8b89dfe7c36d9bc3f01dd3ccfae0449ec955757
51,483
def linear_trans(u0, u1, t, tc, teps): """ Linear transition from u0 to u1 in interval `tc - teps < t < tc + teps`. """ t0 = tc - teps t1 = tc + teps return u0 if t <= t0 else u1 if t >= t1 else \ u0 + (u1 - u0) * (t - t0) / (t1 - t0)
5a8336d59037a208a3746cca50ca2384eeb9ad37
51,484
from typing import List from typing import Counter def extract_word_ngrams(tokens: List[str], n: int) -> Counter: """Extracts n-grams with order `n` from a list of tokens. :param tokens: A list of tokens. :param n: The order of n-grams. :return: a Counter object with n-grams counts. """ return Counter([' '.join(tokens[i:i + n]) for i in range(len(tokens) - n + 1)])
6504e8b40f3cd256c34d2aa0e44660eded49a506
51,485
def terminal_side_effect(state, depth_remaining, time_remaining): """ Side effect returns true if we reach a terminal state :param state: the state of the game to evaluate :param depth_remaining: true if there is depth remaining :param time_remaining: true if there is time remaining :return: true if we are terminating """ if not depth_remaining: return True end_state_nodes = [] for alpha in list(map(chr, range(101, 110))): # iterate from e-m end_state_nodes.append(str(alpha)) if state in end_state_nodes: return True return False
231933f2a24559011eaefcd580732c674c9a932c
51,486
def lib2to3_unparse(node): """Given a lib2to3 node, return its string representation.""" code = str(node) return code
eda967fd03c7ccb4412cd2ba04cb522bd9db0508
51,488
def build_dag_id(partner_id): """Builds the DAG ID for the given Airflow variable. Args: partner_id: Partner ID to build the dag_id for. Returns: The DAG ID. """ dag_name = 'algo_readiness_reporting_%s_dag' % partner_id return dag_name
08ab98be05ba6812b2a43cf5de8cbad04bb47be6
51,490
def sum_numbers(numbers): """ Sum an array of numbers :param list(float) numbers: The array of numbers to sum :returns: The sum of the numbers """ return sum(numbers)
e6726e45e0356a4a877e05221a653a48bc237b94
51,491
def _parse_slices(slicing_string): """Construct a tuple of slices from the slicing string. The string must be a valid slicing string. Args: slicing_string: (str) Input slicing string to be parsed. Returns: tuple(slice1, slice2, ...) Raises: ValueError: If tensor_slicing is not a valid numpy ndarray slicing str. """ parsed = [] for slice_string in slicing_string[1:-1].split(","): indices = slice_string.split(":") if len(indices) == 1: parsed.append(int(indices[0].strip())) elif 2 <= len(indices) <= 3: parsed.append( slice(*[ int(index.strip()) if index.strip() else None for index in indices ])) else: raise ValueError("Invalid tensor-slicing string.") return tuple(parsed)
0d3e3ce23837dfb847a5f2bf146f26082a0f2b89
51,500
def format_string(console): """ This function accepts a string of video game console data, delmited by commas (e.g. an element from <vg_consoles>), and returns a formatted string. Parameters: - console (str): A string containing all of the information on the console (i.e. an element from <vg_consoles>). Returns: - (str): A string that has formatted the information from <console> in the following format: "<Console name> was produced in <Release year> by <Production company>" """ pass return f"{console.split(',')[0]} was produced in {console.split(',')[3]} by {console.split(',')[1]}"
ed808588affbf35227297557dcc83b3052ddbf71
51,503
def parse_pmid(medline): """Parse PMID from article Parameters ---------- medline: Element The lxml node pointing to a medline document Returns ------- pmid: str String version of the PubMed ID """ if medline.find('PMID') is not None: pmid = medline.find('PMID').text else: pmid = '' return pmid
2340bbb6f538e1e7a4972f8292372980a80b022b
51,506
from typing import List def split_strings(subtree: dict) -> List[str]: """ Produce the list of strings from the dictionary with concatenated chars \ and lengths. Opposite to :func:`merge_strings()`. :param subtree: The dict with "strings" and "lengths". :return: :class:`list` of :class:`str`-s or :class:`bytes`. """ strings = subtree["strings"][0] if subtree.get("str", True): strings = strings.decode("utf-8") lengths = subtree["lengths"] result = [None] * lengths.shape[0] offset = 0 for i, l in enumerate(lengths): result[i] = strings[offset:offset + l] offset += l return result
5b1e7fbb8f0eda3df389108595ac19bde716d8b0
51,510
def parseConfStr(confStr): """Parse a line of configuration file which is in format of semicolon-separated tuples. Args: confStr (str): String of tuples, each separated by semicolon, e.g., "(h1,s1);(h2,s1)". Returns: list: A list of tuples (key, value pairs). """ pairList = [] specs = confStr.split(';') for spec in specs: if not spec: continue spec = spec.strip() splits = spec.split(',') splits = [ss.strip("()") for ss in splits] splits = tuple(splits) pairList.append(splits) return pairList
ed93c939bf0844d59b4986ce1966bfc78d0f9c08
51,511
import random def ran_num(length=1): """ Random string number generator. This function generates a string with a custom length that contains random digits and characters from a-f. Parameters: length: Number of places the number should have. Returns: A string with random digits and characters. """ number = '' for z in range(length): r = random.randint(0, 15) if 0 <= r <= 9: number += str(r) elif r == 10: number += 'a' elif r == 11: number += 'b' elif r == 12: number += 'c' elif r == 13: number += 'd' elif random == 14: number += 'e' elif r == 15: number += 'f' return number
33687faee692a02b0721aed9c2852a09d7183337
51,515
def _should_allow_unhandled(class_reference, key_name): """Check if a property is allowed to be unhandled.""" if not hasattr(class_reference, "__deserialize_allow_unhandled_map__"): return False return class_reference.__deserialize_allow_unhandled_map__.get(key_name, False)
3475c0eef50a31ad88bcfaaa2d0a6335cffbba24
51,517
from typing import Tuple from typing import Union from typing import Dict from typing import List from typing import Set def build_reverse_graph(graph: dict) -> Tuple[Union[list, set], Dict[int, List[int]]]: """Take the data from a Tracks layer graph and reverse it. Parameters ---------- graph : dict A dictionary encoding the graph, taken from the napari.Tracks layer. Returns ------- roots : int, None A sorted list of integers represent the root node IDs reverse_graph : dict A reversed graph representing children of each parent node. """ reverse_graph = {} roots: Set[int] = set() # iterate over the graph, reverse it and find the root nodes for node, parents in graph.items(): for parent in parents: if parent not in reverse_graph: reverse_graph[parent] = [node] else: reverse_graph[parent].append(node) if parent not in graph.keys(): roots.add(parent) # sort the roots sorted_roots = sorted(list(roots)) return sorted_roots, reverse_graph
a90f4c4da9395b610999d1272fb0a58b9b983ecd
51,519
import unicodedata import re def sanitize_filename(filename): """ Adapted from Django's slugify functions. :param filename: The filename. """ try: filename = filename.decode() except AttributeError: pass value = unicodedata.normalize('NFKD', filename).encode( 'ascii', 'ignore').decode('ascii') # In constrast to django we allow dots and don't lowercase. value = re.sub(r'[^\w\.\s-]', '', value).strip() return re.sub(r'[-\s]+', '-', value)
2163a149e484aa5810308ef2c0608a201be2674f
51,524
def strip_plus1(number: str) -> str: """ Strip leading "+1-" if present. NANP numbers on the platform seem to be stored as 10D only :param number: :return: """ return number and number.startswith('+1-') and number[3:] or number
298872bccd693f3d67df09c9d77c02dce72e5711
51,528
def int_32bit(num): """Return `num` as a list of bytes of its 32-bit representation.""" return list(num.to_bytes(4, "little", signed=True))
2b69f7862a082eb5206d91c637f871ab173598cf
51,530
def _list_union_inter_diff(*lists): """Return 3 lists: intersection, union and differences of lists """ union = set(lists[0]) inter = set(lists[0]) for l in lists[1:]: s = set(l) union = union | s inter = inter & s diff = union - inter return list(union), list(inter), list(diff)
09277948cd19d15bff6918b2285fa81d8c8b3a81
51,533
def check_key(key: str, messages: list) -> bool: """Checks that criterion has a metadata variable and a numeric separated by a comma (","). Parameters ---------- key : str Current criterion. messages : list Message to print in case of error. Returns ------- boolean : bool Whether to keep the key/value or not. """ boolean = False if ',' not in key or len(key.split(',')) != 2: messages.append('Must have a metadata variable and a numeric separated by a comma (",")') boolean = True return boolean
69efc0e3d460343fef0136cf6f6f2912df2a366c
51,535
def time_serializer(a): """ Simply rounds a floating point value for its input, as we must store dates and times as integers in SQLite. """ return round(float(a))
54a73c0e0e2e1d5db1e871d8a5e03c17c50ea842
51,545
def _makeElementsOneBased(elements_list): """ Take a list of a list of element node indexes and increment the node index by one. """ updated_elements = [] for el in elements_list: updated_elements.append([n + 1 for n in el]) return updated_elements
cd5a13f6e4ff1de27c0fbcda681808fa2306dc17
51,547
def identity(x): """Returns its attribute""" return x
87bb32661063be3f377f38ff1216a870174d62ac
51,549
def read_commit_message(filename): # type: (str) -> str """ Read original commit message from file """ with open(filename, "r+") as f: return f.read().strip()
0ca2342743ac319aecf5be1be8fd31a490ebd5c8
51,550
def streams_average(streams): """ get average stream distance from a stream list """ count = 0 sum = 0 for i in streams: sum += i[1] - i[0] count += 1 return sum/count
811041cd56ca495a28fbde473ccdce09d0a11800
51,555
from typing import Dict def extract_model_title(data: Dict) -> Dict[str, str]: """Extract form name as title, with service name as backup.""" form_name = data["forms_meta"].get("FormName") return ( {"nb": form_name} if form_name else {"nb": data["service_meta"].get("ServiceName")} )
01218aa89d025f7f51998cd61b7cf662e9a8ad46
51,557
def _interpolation_max_weighted_deg(n, tau, s): """Return the maximal weighted degree allowed for an interpolation polynomial over `n` points, correcting `tau` errors and with multiplicity `s` EXAMPLES:: sage: from sage.coding.guruswami_sudan.interpolation import _interpolation_max_weighted_deg sage: _interpolation_max_weighted_deg(10, 3, 5) 35 """ return (n-tau) * s
530ab96f0640ce489c745188dd7865cb14dd1e0f
51,561
def init_feed(feed): """ Initialise a feed :param feed: Feed :return: tf.data.Iterator.initializer :return: tensor slices """ feed = feed.feed iterator_tensor = feed.make_initializable_iterator() return iterator_tensor.initializer, iterator_tensor.get_next()
3d9148a4bc064883aad22c771fe68cf5551dd60f
51,564
from typing import List from typing import Dict def read_header(row: List) -> Dict: """Each header entry is a key value pair.""" return {row[0].replace('\ufeff', ''): row[1]}
891c7c9af5944acea55ce83a1784a124abf58628
51,566
def get_norm (nbins, N): """ Returns normalistion that should be used for a PDF against a dataset of N entries, with nbins. Args: nbins(int): number of bins that data is binned in N (int): number of entries in dataset Returns: norm (float): normalisation """ return N/nbins
c716e3c6c3853976c8830d64a3e0fc9a2b2517d5
51,569
def nodes_with_selfloops(G): """Returns an iterator over nodes with self loops. A node with a self loop has an edge with both ends adjacent to that node. Returns ------- nodelist : iterator A iterator over nodes with self loops. See Also -------- selfloop_edges, number_of_selfloops Examples -------- >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_edge(1, 1) >>> G.add_edge(1, 2) >>> list(nx.nodes_with_selfloops(G)) [1] """ return (n for n, nbrs in G.adj.items() if n in nbrs)
3de5ed69020d32fe0ec8e48c3ba371ff8dcd386a
51,573
def extract_srcdest(srcdest): """ Extract src/dest ips from _source. Args: srcdest (dict): Entry from trace_derived_v2 Returns: str, str: ip addresses for source and destination """ return srcdest['src'], srcdest['dest']
ca9944a47c46910189c4f8c14c23e87d6eabc454
51,574
def give_me_proper_embl(cross_refs): """ Filter for references where the first element == 'EMBL', then search for the first occurence where the genome accession is not '-'. This is to get both a valid protein accession and genome accession. :param cross_refs: The full list of SwissProt.record.cross_references :return: """ # Get embl records first embl_refs = [] for ref in cross_refs: if ref[0] == 'EMBL': embl_refs.append(ref) genome_acc, prot_acc = embl_refs[0][1], embl_refs[0][2] for ref in embl_refs: if ref[2] != '-': genome_acc, prot_acc = ref[1], ref[2] break return genome_acc, prot_acc
e50e8cd7486583beaab7890f63e2dea87047d0ed
51,580
def parse_txt(path): """ parse space seperated file into json assumes the format Date Close 02/01/2018 16:00:00 3.66 03/01/2018 16:00:00 3.66 .. .. """ dates = [] values = [] with open(path, 'r') as f: f.readline() # skip first line for line in f: split = line.split() dates.append(' '.join(split[0:1])) values.append(float(split[-1])) return { "dates": dates, "values": values, }
e96d2f61efdbb9af901cb6abb8143191194fd1ea
51,581
import logging def process_covid_json_data(covid_json_data:list) -> tuple: """Returns cases in last 7 days, current hospitalisations, and total deaths from covid json data""" if not covid_json_data: logging.error("No json data was recieved") return 0, 0, 0 #Adds up cases for data entries representing the last 7 days cases_last_7 = 0 for i in range(3, 10): if covid_json_data[i]["newCases"] is not None: cases_last_7 += covid_json_data[i]["newCases"] hospital_cases = 0 i = 1 #Searches through data entries until it finds one with the latest hospital cases while hospital_cases == 0 and i < len(covid_json_data): hospital_cases = covid_json_data[i]["hospitalCases"] if hospital_cases is None: hospital_cases = 0 else: hospital_cases = int(hospital_cases) i += 1 #Searches through data entries until it finds one with the latest death count total_deaths = 0 i = 1 while total_deaths == 0 and i < len(covid_json_data): total_deaths = covid_json_data[i]["cumDeaths"] if total_deaths is None: total_deaths = 0 else: total_deaths = int(total_deaths) i += 1 return cases_last_7, hospital_cases, total_deaths
0ad0f871704b3aba8a014f067ea5f9afc602d4dd
51,583
def seg_to_dm_xy(actuator_total, segment): """ Convert single index of DM actuator to x|y DM coordinates. This assumes the actuators to be arranged on a square grid actuator_total: int, total number of actuators in each line of the (square) DM segment: int, single-index actuator number on the DM, to be converted to x|y coordinate """ actuator_pair_x = segment % actuator_total actuator_pair_y = (segment-actuator_pair_x)/actuator_total return actuator_pair_x, int(actuator_pair_y)
b858be2a0de5aab5efc1e8e734aeda4aaa6c53dd
51,584