content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def get_proportion_selected(val_list, selector, norm=True): """Find the proportion of tracks selected with the given selector. If there are no tracks in the tracks property value list, returns zero. Can also return the number of tracks meeting the condition. Args: val_list: a list of values of a track property, such as tp_pt or trk_chi2rphi. selector: a property that these value can satisfy. For example, "lambda trk_eta: trk_eta <= 2.4". norm: if True, divides the number of tracks meeting the condition by the total number of tracks. This is the default option. Returns: Either the number or proportion of tracks meeting the condition, depending on the value of norm. """ if len(val_list) == 0: return 0 num_tracks_meeting_cond = sum(map(selector, val_list)) return float(num_tracks_meeting_cond) / len(val_list) if norm \ else num_tracks_meeting_cond
0a7f2c8077ef5c25c0f1ef8688c3de9691754a7e
108,817
def binary_search(array, search_term): """ Binary search algorithm for finding an int in an array. Returns index of first instance of search_term or None. """ p = 0 r = len(array) - 1 while p <= r: q = (p + r) // 2 value = array[q] if value == search_term: return q elif value > search_term: r = q - 1 else: # value < search_term p = q + 1 return None
cf4b76f962e82f30d17e3fb259ebcb23498d6f8c
108,819
def geometric_sum(a, r, n=7): """Compute a geometric sum of n terms Arguments: a --- coefficient r --- ratio n --- terms to sum (default: 7) """ total = 0 for i in range (0, n): total = total + a *r ** i return total
ef59457082ec6c01cbd0627b71e004e947724e9c
108,822
def biggest_max(d1, d2): """ (dict of {object: int}, dict of {object: int}) -> int Return -1 if the maximum value in d1 is bigger than the maximum value in d2, 1 if it is smaller, and 0 if they are equal. """ d1_max = max(d1.values()) d2_max = max(d2.values()) if d1_max > d2_max: return -1 elif d1_max < d2_max: return 1 else: return 0
02731786b58afa1e42ca1664cb82e4e6c916f1af
108,831
def key_to_trackingkey(key): """Takes a keypair and returns the tracking key.""" ((a, _), (_, B)) = key return (a, B)
ca180ae9e38265cb7eb74427bccb16eac5a27f1b
108,833
def _insert_newlines(text: str, n=40): """ Inserts a newline into the given text every n characters. :param text: the text to break :param n: :return: """ if not text: return "" lines = [] for i in range(0, len(text), n): lines.append(text[i:i + n]) return '\n'.join(lines)
eeb824b32b479041887e38a0ce2768492702f36f
108,836
def idt(value): """ The identity function. :param value: anything :return: its input """ return value
4708ab6c84aa02fc85644db488f5fc97d5d2cbe5
108,850
import pathlib def volume_bind_args(source, dest): """return tuple specifying a source:dest volume bind mount in docker format.""" # docker run struggle to follow symlinks on mac successfully, see # https://github.com/docker/for-mac/issues/1298 # (and tempfile generates symlinked /var paths...) source = pathlib.Path(source).resolve() return "-v", f"{source}:{dest}"
2b0bc120f04261ff46ba9ead6168fe3ef3c54c43
108,853
def calc_fock_matrix(mol_, h_core_, er_ints_, Duv_): """ calc_fock_matrix - Calculates the Fock Matrix of the molecule Arguments: mol_: the PySCF molecule data structure created from Input h_core_: the one electron hamiltonian matrix er_ints_: the 2e electron repulsion integrals Duv_: the density matrix Returns: Fuv: The fock matrix """ Fuv = h_core_.copy() # Takes care of the Huv part of the fock matrix num_aos = mol_.nao # Number of atomic orbitals, dimension of the mats # solving coulomb term and add to fock matrix for u in range(num_aos): for v in range(num_aos): Fuv[u, v] += (Duv_ * er_ints_[u, v]).sum() # solving exchange term and subtract from value stored in fock matrix for u in range(num_aos): for v in range(num_aos): Fuv[u, v] -= (0.5 * Duv_ * er_ints_[u, :, v]).sum() """ Replace with your implementation Here you will do the summation of the last two terms in the Fock matrix equation involving the two electron Integrals Hint: You can do this with explicit loops over matrix indices, whichwill have many for loops. This can also be done with numpy aggregations, bonus points if you implement this with only two loops. For example, the first term can be implemented like the following: (er_ints[mu,nu]*Duv).sum() """ return Fuv
49daa15ff3dd4aab260f759ee753df5ee5b0a13c
108,856
def concat_all(*args): """concat_all(*args) -> list Concats all the arguments together. Example: >>> concat_all(0, [1, (2, 3)], [([[4, 5, 6]])]) [0, 1, 2, 3, 4, 5, 6] """ def go(arg, output): if isinstance(arg, (tuple, list)): for e in arg: go(e, output) else: output.append(arg) return output return go(args, [])
01721d9aca0a1207a6a798407845c81b06d202c7
108,869
from typing import List from typing import Dict from typing import Any def get_cleaned_housing_units_response(housing_units: List[Dict[str, Any]]) -> List[Dict[str, Any]]: """ Cleans the housing units by removing the fields that cannot be checked on equality operation, like uuid. :param housing_units: A list of dictionaries representing the housing units. :return: The cleaned list of dictionaries representing the housing units. """ for item in housing_units: item.pop('uuid') return housing_units
df498e9013387ed9d9d0c3dfa98416c36c6ca11a
108,870
def is_in_range(min, max): """Validate a number between min and max. >>> is_in_range(10, 20)("1") False >>> is_in_range(10, 20)("10") True >>> is_in_range(10, 20)("20") True >>> is_in_range(10, 20)("21") False >>> is_in_range(10, 20)("hi") False """ def is_valid(value): try: return min <= int(value) <= max except: return False return is_valid
13064e65e9251ed279c2ccc465a2cef73852e6d4
108,872
def revert_uri_escapes(uri: str) -> str: """Revert the escaped character in a EPC pure identity URI Args: uri (str): EPC pure identity URI Returns: str: Reverted escaped characters URI """ return ( uri.replace("%", "%25") .replace('"', "%22") .replace("&", "%26") .replace("/", "%2F") .replace("<", "%3C") .replace(">", "%3E") .replace("?", "%3F") )
fe5e00e6a024da5fef563ef804508ba0e9fdc56e
108,873
def base36decode(number): """Converts base36 string to integer.""" return int(number, 36)
a908d41a0d5c24c6bcf4c90cbf4fe46c86c6fb81
108,877
def load(filename): """Load text from a file""" with open(filename, mode='r') as f: return f.read()
2dd529fdb3102e39fad28bd02d1d6bed8f146d7a
108,879
def df_to_arrays(df, y_column, columns_to_exclude, return_index=False): """Convert data frame to numpy array and split X and y""" # Remove missing rows with missing entries in y preprocessed = df.dropna(subset=[y_column]) # Split to x and y y = preprocessed[y_column] X = preprocessed.drop(columns=list(columns_to_exclude + (y_column,))) # Get array representation X_arr = X.values.copy() y_arr = y.values.copy() # Check if needed to return index if return_index: name_to_idx = dict([(feature, X.columns.get_loc(feature)) for feature in X.columns]) return X_arr, y_arr, name_to_idx else: return X_arr, y_arr
ad65146aba500c5c283f24c48dbb22c8996ce1c6
108,884
def split_conf_str(conf_str): """Split multiline strings into a list or return an empty list""" if conf_str is None: return [] else: return conf_str.strip().splitlines()
785a9a4f81f671df7cc88950180fa001a283742d
108,885
import re def validate_afm(afm: str, extended_result: bool = False): """Checks if the passed AFM is a valid AFM number Parameters ---------- afm : str A string to be check if it's a valid AFM extended_result : bool, optional Return extended object result if True, single boolean otherwise (default is False) Returns ------- str or dict A boolean result or a dictionary indicating the validation of the number """ if len(afm) != 9: return { 'valid': False, 'error': "length" } if extended_result else False if not re.match(r"^\d+$", afm): return { 'valid': False, 'error': "nan" } if extended_result else False if afm == "0" * 9: return { 'valid': False, 'error': "zero" } if extended_result else False body = afm[:8] weighted_sum = 0 for i in range(len(body)): digit = body[i] weighted_sum += int(digit) << (8 - i) calc = weighted_sum % 11 d9 = int(afm[8]) valid = calc % 10 == d9 if extended_result: return { 'valid': valid } if valid else { 'valid': valid, 'error': 'invalid' } return valid
9eda891b1c410f55d0af118f4d2b1e35188e9ced
108,900
import math def r_squared(pts): """Calculate R**2 for a set of points If value returned == 1, these points are on a straight line """ length = len(pts) xs = [pt[0] for pt in pts] ys = [pt[1] for pt in pts] x = sum(xs) y = sum(ys) xx = sum(x**2 for x in xs) yy = sum(y**2 for y in ys) xy = sum(x*y for x, y in zip(xs,ys)) num = (length * xy - x * y) sqt = (length * xx - x * x) * (length * yy - y * y) if sqt <= 0: corr = 0 else: den = math.sqrt(sqt) corr = num / den return corr
1bc038b6b57f8b6a7ca8753ea976ebb2868c9e4f
108,901
def get_repo_url(args): """Return the fully expanded url for this repo. :args: the namespace returned from the argparse parser :returns: string url to clone the repo """ return args.repo_url_format.format(args.repo_url)
27a6f4b3a37e0a87d6ddabfb4b0b1b8d7270c871
108,902
def int_to_dd(t): """ Takes an iterable of integers and returns a dot-decimal string. """ return '.'.join((str(x) for x in t))
ee821fb497222da9c14f6b9cf96bb5b39c09c4d5
108,903
def deal_with_increment(cards, increment): """Deal cards into new order based upon an increment.""" num_cards = len(cards) pos = 0 new_cards = cards.copy() for card in cards: new_cards[pos] = card pos = (pos + increment) % num_cards return new_cards
51517fa654e6b06b96ee4a3f7502608e4adbbad3
108,909
def _get_alpha(mu, t): """Compute the local step size alpha. It will be expressed in terms of the local quadratic fit. A local step size of 1, is equal to `stepping on the other side of the quadratic`, while a step size of 0 means `stepping to the minimum`. If no quadratic approximation could be computed, return None. Args: mu (list): Parameters of the quadratic fit. t (float): Step size (taken). Returns: float: Local effective step size. """ # If we couldn't compute a quadratic approx., return None or -1 if it was # due to a very small step if mu is None: return -1 if t == 0.0 else None elif mu[2] < 0: # Concave setting: Since this means that it is still going downhill # where we stepped to, we will log it as -1. return -1 else: # get alpha_bar (the step size that is "the other side") alpha_bar = -mu[1] / mu[2] # scale everything on a standard quadratic (i.e. step of 0 = minimum) return (2 * t) / alpha_bar - 1
8c29825e3f174c31582ff7bf53d279b1b603b13e
108,911
def format_url(url, city, api): """ Simple Url formatting function. :param url: Base Url in constants.py :param city: :param api: :return: Finished URL ready for querying """ return "http://" + url.format(city, api)
751988e0e1c31cf5f45ebf7f292f44bb5bb1ead1
108,912
def format_result(result, info): """ Formats the given resultset. @param result: the result to format. @param info: return only the header information. """ if info: return '\n'.join([ 'Domain: ' + str(result.domain), 'Request ID: ' + str(result.request_id), 'State: ' + str(result.state), 'Comment: ' + str(result.comment), 'Created: ' + str(result.created)]) else: return str(result)
f95c7ef1ba2634721e681e1b6ef4949b67c4e825
108,914
def maybe_merge_mappings(mapping_1, mapping_2): """ Merges the two maybe mapping if applicable returning a new one. Parameters ---------- mapping_1 : `None`, `mapping` Mapping to merge. mapping_2 : `None`, `mapping` Mapping to merge. Returns ------- merged : `None`, `dict` """ if (mapping_1 is not None) and (not mapping_1): mapping_1 = None if (mapping_2 is not None) and (not mapping_2): mapping_2 = None if mapping_1 is None: if mapping_2 is None: merged = None else: merged = {**mapping_2} else: if mapping_2 is None: merged = {**mapping_1} else: merged = {**mapping_1, **mapping_2} return merged
2307edfa3722f0db5fc3979dde97d17556b8853d
108,920
def get_hashtag_list(tweet): """ input: tweet dictionary returns: list of all hashtags in both the direct tweet and the retweet """ l = [] for d in tweet['entities']['hashtags']: l += [d['text']] if 'retweeted_status' in tweet.keys(): for d in tweet['retweeted_status']['entities']['hashtags']: l += [d['text']] return l
9e24d6640056cb17f3dcba8a0daaa224422af85c
108,923
import textwrap def _format_dict(opts, indent): """ Formats a dictionary as code. """ rows = [] for k, v in sorted(opts.items()): rows.append('%s=%r' % (k, v)) content = ', '.join(rows) st1 = "\n".join(textwrap.wrap(content)) return textwrap.indent(st1, prefix=' ' * indent)
3dfa913a1afe985b8fa778503694320ce83a7807
108,926
def _h_3ab(P): """Define the boundary between Region 3a-3b, h=f(P) Parameters ---------- P : float Pressure [MPa] Returns ------- h : float Specific enthalpy [kJ/kg] Examples -------- >>> _h_3ab(25) 2095.936454 """ return 0.201464004206875e4 + 3.74696550136983*P - \ 0.0219921901054187*P**2+0.875131686009950e-4*P**3
a4592f45c1102ad797cc45e479fef5e035153c08
108,927
def rm_space(string): """ remove the first and last space if present in the str """ if " " in string[0]: string = string[1:] if " " in string[-1]: string = string[:-1] return string
9d0c95308b45504e2d877924d0976399de5f6c50
108,929
def sanitize_path(path: str) -> str: """Sanitized a path for hash calculation. Basically makes it lowercase and replaces slashes with backslashes. Args: path (str): input path Returns: str: sanitized path """ return path.lower().replace("/", "\\")
7152de76dac56d6e1ae64ee31139c3aeff4977b3
108,930
from typing import Union def valid_choice( source: Union[list, dict], heading='', to_lower=True, key_formatter=None, prompt='Type name of your chosen item or its index: ' ): """Display choices to user and ask for an item choice. Args: source (list | dict): Iterable of items to choose from. heading (str) opt: Printed at the top. to_lower (bool) opt: If input should be lowercased. key_formatter (function) opt: For evaluation, result of key_formatter(input) will be used. prompt (str) opt: Print as input prompt. Returns: object: Item or value of key if source is dict. """ if heading: print(heading) source_to_lower = [ item.lower() if isinstance(item, str) else item for item in source ] if to_lower else source while True: print(*[f'{item}: {idx}' for idx, item in enumerate(source)], sep='\n') selected = input(prompt) try: if key_formatter: selected = key_formatter(selected) elif to_lower: selected = selected.lower() if selected in source_to_lower: return selected if isinstance(source, list) else source[selected] selected = int(selected) return source[selected] if isinstance(source, list) else list(source.values())[selected] except (IndexError, KeyError, ValueError): print("Input did not match any of the items or their indexes. Please, try again.")
bebeb90cb5c2e6f49c16e803c14821cb90013d41
108,933
def _get_resource_path(path): """Transform the path into a URL.""" return u'/%s' % path
4d8ba3d6f643dd04492ea25b01b6633f0a51e910
108,934
def last_lines(path, n): """Get the last lines of a file by simply loading the entire file and returning only the last specified lines, without depending on any underlying system commands. Args: path: Path to file. n: Number of lines at the end of the file to extract; if the file is shorter than this number, all lines are returned. Returns: The last ``n`` lines as a list, or all lines if ``n`` is greater than the number of lines in the file. """ lines = None with open(path, "r") as f: lines = f.read().splitlines() num_lines = len(lines) if n > num_lines: return lines return lines[-1*n:]
941f10e1d828d91f4b36a9200e2b6105ed50e8fa
108,944
from typing import List def axis_list_to_bitmap(axes: List[int]): """ List of numeric axes, like [0, 2, 3] to the equivalent NI bitmap. NI uses 1 indexing and we use 0, so this is equivalent to 2 ** 1 + 2 ** 3 + 2 ** 4 :param axes: :return: """ return sum(2 ** (x + 1) for x in axes)
747256b04d456c2b2172a72e8b373018ad7bb557
108,946
def _get_user_repo(url): """ Return the :user/:repo/ from a bitbucket.org url """ if (url.find('://bitbucket.org') > -1) or (url.find('@bitbucket.org') > -1): p = url.split('/') return '%s/%s' % (p[3], p[4]) return None
765ded7a083b7df2283e89fae4dcd7e31e0fef47
108,949
import json def _getMistakesMade(row): """ Parse the notes column of the VHP dataset to extract the mistakes made section. GIVEN: row (str) a str representation of a JSON dictionary RETURN mistakes (str) the mistakes made section """ notes = json.loads(row) mistakes = notes["mistakes"] if type(mistakes) == dict: mistakes = mistakes["answer"] elif type(mistakes) == str: mistakes = mistakes.split("\nanswer: |")[1] if mistakes is None: return "" return mistakes
65459a1d496e85c52e87ce9cf6c8eb2e7cf30ef8
108,958
def pack_index(index: int, length: int) -> int: """Transforms `index` into a positive index counting from the beginning of the string, capping it at the boundaries [0, len]. Examples: >>> pack_index(-1, 5) 4 >>> pack_index(6, 5) 5 >>> pack_index(-7, 5) 0 """ # assert length >= 0 index = index if index >= 0 else index + length return 0 if index < 0 else length if index > length else index
e4fd2ddaf3eaeb0995940da455e8fc4a874e536d
108,960
def return_element(data, k): """Tells you the equivalent negative index Args: data (list of int): Simple array k (int): index you want to know the equivalent negative index Returns: (val, index) val (object): element at position k index: negative index of that position Examples: Here are some examples! >>> l = [2,3,4,5,6,7,8,9,10,11,10,9,8,7,6,5,4,3,2,1] >>> return_element(l, 0) (2, -20) >>> return_element(l, 1) (3, -19) >>> return_element(l, 2) (4, -18) """ idx = k-len(data) return data[idx], idx if data else False
f15912416f4a20fc4aea52a4b135eed2545ae896
108,961
def convert_rational_to_float(rational): """ Convert a rational number in the form of a 2-tuple to a float Args: rational (2-sized list of int): The number to convert Returns: float: The conversion.""" assert len(rational) == 2 return rational[0] / rational[1]
5713c24c461726d49abee8b9df020b501fcb0ac2
108,964
from typing import Optional def parse_bool(s: str) -> Optional[bool]: """Parse string as boolean. Values ``1, ``true``, ``t``, ``yes``, ``y``, ``on`` are parsed as ``True``. Values ``0``, ``false``, ``f``, ``no``, ``n``, ``off`` and empty string are parsed as ``False``. If none maches, return ``None``. Matching is case-insensitive, whitespaces at the beginning and at the end are stripped before performing the match. """ s = s.strip().lower() if s in {"1", "true", "t", "yes", "y", "on"}: return True if s in {"0", "false", "f", "no", "n", "off", ""}: return False return None
4548002526108351614684c7b2fe2f9a107c11e9
108,965
import re def readExpFile(fileName,type): """ This function will read all data from each file, then split it into datasets as an array in data where all datasets have a "name" and its "values" The "values" for exp file will be in a 4 column with delay, data, data error up, data error down and for the fit data will be 2 column with delay, data """ data =[] file = open(fileName,'r', encoding='utf8') allDataFromFile = file.read() allRes = allDataFromFile.split("\n\n") # Each dataset is seperated by two linebreaks file.close() for res in allRes: currentRes="" values=[] dataset={} for l in res.split("\n"): str =l.strip() r = re.match(r"\[([A-Za-z0-9_]+)-([A-Za-z0-9_]+)\]",str) if(r): dataset["name"]=r.group(1).strip() elif(str.startswith("#")): continue elif type == "fit": dataset["dataexist"]=True strList = str.split() if len(strList)==3: values.append([strList[0],strList[2]]) elif type == "exp": strList = str.split() if len(strList)==5: values.append([strList[0],strList[2],strList[3],strList[4]]) dataset["values"]=values if len(dataset["values"]) > 0: data.append(dataset) data_sorted = sorted(data,key=lambda x: x["name"][1:]) return data_sorted
3bfa16a9778f791dc2d634668418b25d9d2698d4
108,966
def move_to_origin(ink): """ Move ink so that the lower left corner of its bounding box is the origin afterwards. """ #print('origin') min_x = min(ink[:, 0]) min_y = min(ink[:, 1]) return ink - [min_x, min_y, 0]
adb73f4e7212acac65290296af00774c96aad436
108,967
def stdDevOfLengths(L): """ L: a list of strings returns: float, the standard deviation of the lengths of the strings, or NaN if L is empty. """ if not L: return float('NaN') mean = sum([len(t) for t in L]) / float(len(L)) quantities = [(len(t) - mean)**2 for t in L] stdDev = (sum(quantities) / len(L))**0.5 return stdDev
62621be9ae0ad79ae523dc014a0b3e8bdbfbb19e
108,974
def last(n, foo): """Gets the last n items in foo.""" if n == 0: return foo[len(foo):] else: return foo[-n:] # Note that foo[-0:] would fail.
c0a1e824ec44b41e7a7d4554398af0522d5ce00b
108,982
def unique(enumerable): """ Returns a list without duplicate items """ return list(set(enumerable))
b7a9cdb589ae56325fef4e6dffb19c17d6f3fd03
108,984
import string import random def rand_str(length, chars=string.ascii_uppercase + string.digits): """ Utility function which generate random string from a given character list Args: length (int): length of the generated string chars (list): list of characters to generate string from Returns: string: randomly generated string """ return ''.join(random.choice(chars) for _ in range(length))
44d24fa8e8ba24c1e316a1f66a6b3255451462b2
108,995
import re def remove_tag(string): """ Most of web scrapped data contains html tags. It can be removed from below re script Parameters ---------- text: str Text selected to apply transformation. Examples: --------- ```python sentence="Markdown sentences can use <br> for breaks and <i></i> for italics" remove_tag(sentence) >>> 'Markdown sentences can use for breaks and for italics' ``` """ text = re.sub('<.*?>', '', string) return text
284f07aed99a403d1a99efce6e37b5564e9f8401
108,997
def frame_to_tc(edit_count, edit_rate): """ Convert sample count to timecode. Args: edit_count(int): number of samples. edit_rate (int): number of sample per second. Returns: Timecode string (format HH:MM:SS:FF). >>> frame_to_tc(48, 24) '00:00:02:00' """ if edit_rate != 0 and edit_count != 0: s, f = divmod(edit_count, edit_rate) m, s = divmod(s, 60) h, m = divmod(m, 60) return "%02d:%02d:%02d:%02d" % (h, m, s, f) else: return "00:00:00:00"
272eac90107ab48c8b6e9aed8a7c6396f149b677
109,002
from typing import Dict def id_dict_is_empty(id_dict: Dict[str, str]) -> bool: """ This predicate can be used to know if the ID dictionary extracted from an OCDM entity is effectively empty. :param id_dict: The ID dictionary to be checked :return: True if the ID dictionary is empty, False otherwise """ if len(id_dict) <= 0: return True else: for key, value in id_dict.items(): if key is not None and key != '' and value is not None and value != '': return False return True
7b0fa028c4b79ddd92618ab5152cca88aa7cf88d
109,005
import hmac import hashlib import base64 def raw_sign(message, secret): """Sign a message.""" digest = hmac.new(secret, message, hashlib.sha256).digest() return base64.b64encode(digest)
22f884a5bb1cfe0294e74108e05baaad577e6e19
109,008
import torch def get_parameter(traced, target: str): """ Returns the parameter given by ``target`` if it exists, otherwise throws an error. See the docstring for ``get_submodule`` for a more detailed explanation of this method's functionality as well as how to correctly specify ``target``. Args: target: The fully-qualified string name of the Parameter to look for. (See ``get_submodule`` for how to specify a fully-qualified string.) Returns: torch.nn.Parameter: The Parameter referenced by ``target`` Raises: AttributeError: If the target string references an invalid path or resolves to something that is not an ``nn.Parameter`` """ module_path, _, param_name = target.rpartition(".") mod: torch.nn.Module = traced.get_submodule(module_path) if not hasattr(mod, param_name): raise AttributeError(mod._get_name() + " has no attribute `" + param_name + "`") param: torch.nn.Parameter = getattr(mod, param_name) return param
ed8ba26c56706bbfce6015ab22fbcb3f34ac6905
109,011
import torch def convert_onehot_to_dense(onehot): """ Args: onehot: [batch_size, seq_len, vocab_size] Returns: [batch_size, seq_len] """ return torch.argmax(onehot, dim=2)
9fec7d5dbce7f4fc21999d5ced66b1d392f7c25e
109,015
import hashlib def get_md5sum(file_path, chunksize=8192): """ Load file by chunks and return md5sum of a file. File might be rather large, and we don't load it into memory as a whole. Name is excluded from calculation of hash. Parameters ---------- file_path : str Location of the file chunksize : int Size of the chunk Returns ------- String with md5sum hex hash """ md5 = hashlib.md5() with open(file_path, "rb") as f: while True: file_fragment = f.read(chunksize) if not file_fragment: break md5.update(file_fragment) return md5.hexdigest()
718643dff367b3db747c58c143bbfedab58eda73
109,018
def countComponents3(n: int, edges: list[list[int]]) -> int: """ union by rank based implementation Args: n (int): [description] edges (list[list[int]]): [description] Returns: int: number of connected components """ connections = [idx for idx in range(n)] rank = [1] * n def get_group(node : int): while node != connections[node]: node = connections[node] return node def connect(u, v) -> None: gu, gv = get_group(u), get_group(v) if gu != gv: if rank[gu] > rank[gv]: connections[gv] = gu elif rank[gv] > rank[gu]: connections[gv] = gu else: connections[gv] = gu rank[v] = rank[v] + 1 for edge in edges: u, v = edge[0], edge[1] connect(u, v) components = set() for node in range(n): components.add(get_group(node)) return len(components)
4c3b870088b30bed85349cd211c7f3154600ac4c
109,024
def is_palindrome_permutation_v2(phrase): """checks if a string is a permutation of a palindrome""" d = dict() for c in phrase: if c not in d: d.update({c: 1}) else: d[c] = 0 return sum(d[key]%2 for key in d) <= 1
ce2c02342f775c2189329158caa9c1a4754216b8
109,025
def get_location_no_json(i, soup): """Gets location info for courses that do not use a JSON timetable""" location_data = soup.find("div", id="panel{}".format(i)).find("div").contents classroom_name = location_data[0].lstrip().rstrip() classroom_info = location_data[1].find("div") if classroom_info is not None: classroom_loc = classroom_info.contents[0].lstrip().rstrip() classroom_name += " - " + classroom_loc return classroom_name
106744648aa8c15e16244cc9b62b854afc60058e
109,026
def _CreateYumSettings(args, messages): """Creates a YumSettings message from input arguments.""" if not any([ args.yum_excludes, args.yum_minimal, args.yum_security, args.yum_exclusive_packages ]): return None return messages.YumSettings( excludes=args.yum_excludes if args.yum_excludes else [], minimal=args.yum_minimal, security=args.yum_security, exclusivePackages=args.yum_exclusive_packages if args.yum_exclusive_packages else [], )
67a9aa88e9c1d66193a5108a76a07099b08a5ea6
109,027
def get_latencies(data): """Get the list of frame latencies and draw latencies for a benchmark. """ msgs = data['messages'][1:] # Skip the first message as a "warmup." all_latencies = [] all_draw_latencies = [] for msg in msgs: # As a sanity check, we can get an average frame latency for the # entire message with: # avg_latency = msg['ms'] / msg['frames'] all_latencies += msg['latencies'] all_draw_latencies += msg['draw_latencies'] # More sanity checking: we should have the same number of overall and # draw-call latencies, and the draw latency should always be less than the # overall latency. assert len(all_latencies) == len(all_draw_latencies) # print(data['fn'], file=sys.stderr) for l, dl in zip(all_latencies, all_draw_latencies): # print(l, dl, file=sys.stderr) assert dl <= l return all_latencies, all_draw_latencies
6b5ba999fe52ab371e2b8baf7e32cbf9c57d190f
109,028
def squaredError(label, prediction): """Calculates the the squared error for a single prediction. Args: label (float): The correct value for this observation. prediction (float): The predicted value for this observation. Returns: float: The difference between the `label` and `prediction` squared. """ return (label-prediction)**2
1370b0b0dc64bda3d046f8bcf910feb372492568
109,029
def decode_predictions(threshed, pred): """ Converts float pred to the output string. """ if int(threshed) == 0: return f"Malignant; Confidence: {round((1-float(pred))*100, 1)}%" elif int(threshed) == 1: return f"Non-Malignant; Confidence: {round(float(pred)*100)}%"
3bb9fd29f53da2942be5a2b5d3cbfc7aabfe1a8a
109,035
def wrap(func, keys): """ Primitive to wrap the output of a function into dictionary keys. Args: func: The function to enable output wrapping on. keys: The name of the keys to use in the output dictionary. """ if type(keys) is not tuple: keys = [keys] def _wrapped(*args, **kwargs): res = func(*args, **kwargs) if type(res) is not tuple: res = [res] return {k:v for k, v in zip(keys, res)} return _wrapped
f3fcdfb2a2796fc09ff48386ac5e4131118cd041
109,040
import re def sanitize_unicode(value): """Removes characters incompatible with XML1.0. Following W3C recommandation : https://www.w3.org/TR/REC-xml/#charsets Based on https://lsimons.wordpress.com/2011/03/17/stripping-illegal-characters-out-of-xml-in-python/ # noqa """ return re.sub(u'[\x00-\x08\x0B\x0C\x0E-\x1F\uD800-\uDFFF\uFFFE\uFFFF]', '', value)
e4a3d9138393b569b3bba0dec05bd8cf5ab328a1
109,044
from datetime import datetime def check_new_day(folder_date): """Function that checks what the current date is and determines if a new folder is required to be made""" return datetime.utcnow().date() != folder_date
6a18d59884aded17cdf150246529e88bac292454
109,045
from typing import Callable from typing import Any def test_lambda_function_object() -> None: """Lambda returned by a function.""" def power(base: int) -> Callable[[int], Any]: """Lambda expression to calculate power.""" # Power (** and pow()) return type is Any # See https://github.com/python/typeshed/issues/285 return lambda exponent: base ** exponent two_raised_to_the_power_of = power(2) assert two_raised_to_the_power_of(3) == 8
662df7bcc7e0ad371479d5ee57e58a071cdbfc75
109,049
def rescue_default(callback, default=""): """Call callback. If there's an exception, return default. It's convenient to use lambda to wrap the expression in order to create a callback. Only catch IndexError, AttributeError, and ValueError. """ try: return callback() except (IndexError, AttributeError, ValueError): return default
805bfdc5c15fef83d4bc4901e8d71654354a522e
109,052
def add_favourites_msg(bus_stop_code): """ Message that will be sent if user add a bus stop code to their favourites """ return 'Bus Stop Code /{} has been added to your favourites! \n' \ 'To view all your favourites, ' \ 'type: /favourites'.format(bus_stop_code)
225a7a992152652784602335cdd900e96ddab1ce
109,060
def train_step_pipe(neox_args, timers, model, data_iterator): """Single training step with DeepSpeed's pipeline parallel engine. """ assert neox_args.deepspeed loss = model.train_batch(data_iter=data_iterator) loss_dict = {'lm loss': loss} if neox_args.precision == "fp16" and model.optimizer.overflow: skipped_iter = 1 else: skipped_iter = 0 # Don't break Megatron's timers because we changed code paths. for t in ['forward', 'backward', 'allreduce', 'optimizer', 'batch generator', 'data loader']: timers(t).reset() return loss_dict, skipped_iter
2c8272babbc7b8fd99f0f56f47e03b1351e546c8
109,064
import json def get_pip_packages(session, which_pip, restriction=None): """Return a list of pip packages. Parameters ---------- session : Session instance Session in which to execute the command. which_pip : str Name of the pip executable. restriction : {None, 'local', 'editable'}, optional If 'local', excluded globally installed packages (which pip has access to if "--system-site-packages" was used when creating the virtualenv directory). If 'editable', only include editable packages. Returns ------- A generator that yields package names. """ # We could use either 'pip list' or 'pip freeze' to get a list # of packages. The choice to use 'list' rather than 'freeze' # is based on how they show editable packages. 'list' outputs # a source directory of the package, whereas 'freeze' outputs # a URL like "-e git+https://github.com/[...]". cmd = [which_pip, "list", "--format=json"] if restriction in ["local", "editable"]: cmd.append("--{}".format(restriction)) out, _ = session.execute_command(cmd) return (p["name"] for p in json.loads(out))
8087dfc3ade9095c319a4154475243c4be5ce565
109,065
def make_type(ttype): """ Makes an object with custom type """ return {"type": ttype}
f7911d01df04551bf1a840839eee3abebac22133
109,067
def get_chunk_ids(list_of_sets): """ Extract all ids from list_of_sets. Args: list_of_sets (list): each set contains sample ids Returns: all_ids (list): all unique ids in list_of_sets """ all_ids = [] for s in list_of_sets: all_ids.extend(s) return list(set(all_ids))
8adb1a64a49ab29df6e3c1fa9016f30d2e6160ef
109,070
def get_job_name(flags): """ Generates the name of the output file for a training run as a function of the config Source setup: <objective>_<dataset>_<test_env>_H<hparams_seed>_T<trial_seed>.json Time setup: <objective>_<dataset>_<test_env>_H<hparams_seed>_T<trial_seed>_S<test_step>.json Args: flags (dict): dictionnary of the config for a training run Returns: str: name of the output json file of the training run """ job_id = flags['objective'] + '_' + flags['dataset'] + '_' + str(flags['test_env']) + '_H' + str(flags['hparams_seed']) + '_T' + str(flags['trial_seed']) return job_id
1178b76c34a897f8a5a6525b2fcf9463fcbd0cc8
109,071
def most_minutes_asleep(naps): """ Return the ID of the guard who naps the most. """ most_minutes = 0 sleepiest = None for guard, recorded_naps in naps.items(): total = 0 for nap in recorded_naps: total += nap[1] - nap[0] if total > most_minutes: most_minutes = total sleepiest = guard return sleepiest
ab9214200b6b8abb3eb9320e43b393a981e009a1
109,078
from typing import Tuple from typing import List def clamp_global_span_to_tokenized_spans(global_span: Tuple[int, int], tokenized_spans: List[Tuple[int, int]]) -> Tuple[ int, Tuple[int, int]]: """ Assume the following scenario: You are given a paragraph of text and a character span (start, end) inside this paragraph. You sentence-tokenize the paragraph. Now, to which sentence does the character span belong and what are its character offsets inside the sentence? This method gives you the answer. In case the character span covers multiple sentence spans, this method will assign the character span to the first affected sentence, with the end of the span coinciding with the end of that sentence. Note: This method bears similarity to `allennlp.data.dataset_readers.reading_comprehension.util.char_span_to_token_span`. :param global_span: the start and end offsets of the span, with global offsets w.r.t. its surrounding paragraph (end is exclusive!) :param tokenized_spans: the start and end offsets of the sentences (end offsets are exclusive!) :return: `(i, (start, end))` where `i` is the index of the sentence in which the start of the character span is located and `start`, `end` are the offsets of the span inside the sentence; clamped to the sentence boundary if necessary """ start_global, end_global = global_span # i_sent is the sentence containing the start of the character span which we want to find i_sent = 1 # Proceed as long as the currently examined sentence ends before the character span begins, or until there are # no more sentences to examine. while i_sent < len(tokenized_spans) and start_global >= tokenized_spans[i_sent - 1][1]: i_sent += 1 # this happens if the span is somewhere inside the previous sentence if i_sent == len(tokenized_spans) or start_global < tokenized_spans[i_sent][0]: i_sent -= 1 start_sentence, end_sentence = tokenized_spans[i_sent] # There is a special case where the start of global_span points to (typically whitespace) characters which lie # in between the end and start of the following tokenized span. In this case, we want to clamp the start of the # sentence to the beginning of the tokenized span which follows the in-between characters, i.e. 0. start_in_span = 0 if start_global < start_sentence else start_global - start_sentence # clamp the end of the span to the end of the sentence we assigned the global span to end_global_clamped = min(end_global, end_sentence) end_in_span = end_global_clamped - start_sentence return i_sent, (start_in_span, end_in_span)
591f445c1fd4b761d982a1759b77b7dad001bf00
109,083
def orientation(p, q, r): """ Returns the orientation of the ordered triplet (p, q, r). The function returns the following values: 0 --> p, q, r, are colinear 1 --> Clockwise 2 --> Counterclockwise """ # Taken from: # http://www.geeksforgeeks.org/check-if-two-given-line-segments-intersect/ val = ((q.y - p.y) * (r.x - q.x)) - ((q.x - p.x) * (r.y - q.y)) if val == 0: # (p, q, r) is colinear return 0 elif val > 0: # (p, q, r) is clockwise return 1 else: # (p, q, r) is counterclockwise return 2
87677abbfa1499d3b7dd6510cd30a7a656f97466
109,084
def maxi(lst): """returns the largest item in a list""" if lst: return sorted(lst)[-1] return lst
c20592b0d8e5548d87d2be3666eb047accacb65c
109,086
import torch def from_unit_cube(x, lb, ub): """Project from [0, 1]^d to hypercube with bounds lb and ub""" assert torch.all(lb < ub) and lb.ndim == 1 and ub.ndim == 1 and x.ndim == 2 xx = x * (ub - lb) + lb return xx
10cb7687000b7d372d89910bb56d1d36abc38b9c
109,090
def matchDictionaries(first, second): """ Match key value pairs in the first dictionary with the second and return the number of matches. Absence of a key in the second dictionary is counted as a mismatch and extraneous keys in the second dictionary are ignored. """ matches = 0 for key in first: if key not in second: continue if first[key] == second[key]: matches += 1 return matches
62eb94aa26a0ff9838ef54d7d734ec19e23f0004
109,092
import struct def float_to_bin(num): """ Converts a float to a single precision float binary representation. """ return bin(struct.unpack('!I', struct.pack('!f', num))[0])[2:].zfill(32)
7e8cb8952931d01365aa41eccfe76fb1e13c971e
109,093
def sum_3_multiples(y): """Calculate the sum of the multiples of 3.""" sum3=0 for i in range(1, y+1): num_to_add=3*i sum3=sum3+num_to_add return sum3
9f04eb2c85d2528a4addf3332181eabf8ec66800
109,103
def get_html_attrs_tuple_attr(attrs, attrname): """Returns the value of an attribute from an attributes tuple. Given a list of tuples returned by an ``attrs`` argument value from :py:meth:`html.parser.HTMLParser.handle_starttag` method, and an attribute name, returns the value of that attribute. Args: attrs (list): List of tuples returned by :py:meth:`html.parser.HTMLParser.handle_starttag` method ``attrs`` argument value. attrname (str): Name of the attribute whose value will be returned. Returns: str: Value of the attribute, if found, otherwise ``None``. """ response = None for name, value in attrs: if name == attrname: response = value break return response
82eddc0749cf306e758060d31f9fbb772c644044
109,106
def get_path(parents, end): """ Returns a list starting with the start node implied by the parents dictionary, and ending with the specified end node. If no path exists, returns an empty list. Parameters: parents - a dictionary rooted at an arbitrary graph node, "start". parents[start] = None parents[some_other_node] = preceding node on shortest path from start to some_other_node end - the node we want to find the path to """ path = [end] if end in parents: endpar = parents[end] else: return path while not endpar == None: path.append(endpar) endpar = parents[endpar] path.reverse() return path
a31b2d2f9a0658aa946687a03e7b799bc8edab03
109,107
import hashlib def generate_hash(file_name, hash_algo="sha256"): """ Generate a hash for the provided file path. :param file_name: The path to the file for which to generate a hash. :param hash_algo: The hash algorithm to use. :return: The generated hash. :rtype: str """ hasher = getattr(hashlib, hash_algo, None) if hasher is None: raise ValueError("Unknown hash algorithm '{}'.".format(hash_algo)) with open(file_name, "rb") as file: hasher.update(file.read()) file_hash = hasher.hexdigest() return file_hash
8a25dd8bb049279a7dd200ba8d5e3c04c93ae976
109,109
def by_title(title, tiddlers): """ Return those tiddlers that match title. """ return [tiddler for tiddler in tiddlers if tiddler.title == title]
82d49ee5e1a2c7501e392c4039dc24b043466df0
109,110
def revSentence(sentence): """ A function which reverses the words in a sentence. Parameters: sentence(string); the sentence which is to be reversed. Returns: reversedSentence(string); the reversed version of 'sentence'. Raises: Exception; If an unexpected error occurs. Eg; if user enters an invalid input(int,float,list etc). """ try: reversedSentence = "" words = sentence.split(" ") # Getting a list of words in the sentence if len(words) == 1: return sentence for i in range(len(words)-1,-1,-1): # For loop which counts backwards from number of words - 1 till 0. reversedSentence = reversedSentence + words[i] if i != 0: # Adding spaces between words while it is not the last word reversedSentence += " " return reversedSentence except: print("Error in reversing sentence : Please try again with a sentence of type 'string' in which the words are seperated with spaces.")
be7ddcdffe224ca5358b0c4152595cbd6de7ed55
109,122
from pathlib import Path import pickle def load_properties_of_cropped(path: Path): """ Load property file of after cropping was performed (files are name after case id and .pkl ending) Args: path (Path): path to file (if .pkl is missing, it will be added automatically) Returns: Dict: loaded properties """ if not path.suffix == '.pkl': path = Path(str(path) + '.pkl') with open(path, 'rb') as f: properties = pickle.load(f) return properties
ec054ab55fa32020109a273844e4ac5571e7cb3c
109,125
def __get_reference_coordinate(feature, referencePoint): """ Returns TSS or TES of a gtf feature. """ if (feature.strand == '+' and referencePoint == 'TSS') or\ (feature.strand == '-' and referencePoint == 'TES'): feature.end = feature.start else: assert(feature.strand == '-' and referencePoint == 'TSS') or\ (feature.strand == '+' and referencePoint == 'TES') feature.start = feature.end return feature
3fad07fc5ca69751dbbdfab2c465b6002c4f7461
109,128
def safeGetAttr(node, attrName): """ Return an attribute from a node by name. Returns None if the attribute does not exist. Args: node (PyNode): The node with the attribute attrName (str): The attribute name """ if node.hasAttr(attrName): return node.attr(attrName)
6efa65c5d10beb31fd44383cd81f56faab077c4b
109,130
def to_hex_string(num, minDigits=0): """ Helper function that converts a number to a 0 padded hex string >>> to_hex_string(5, 4) '0005' >>> to_hex_string(16) '10' >>> to_hex_string(20, 4) '0014' >>> to_hex_string(255) 'FF' >>> to_hex_string([255, 255]) 'FFFF' >>> to_hex_string([1, 1]) '0101' >>> to_hex_string([0, 0, 0]) '000000' >>> to_hex_string(0, 4) '0000' """ if isinstance(num, float): num = int(num) if isinstance(num, int): s = hex(num)[2:] if minDigits > len(s): s = "0" * (minDigits - len(s)) + s if s.endswith('L') or s.endswith('l'): s = s[:-1] else: s = "".join([to_hex_string(x, 2) for x in num]) if minDigits > len(s): s = "0" * (minDigits - len(s)) + s return s.upper()
fb423cfb17e694da4a89cf2bf55382fd14d8de6c
109,131
def unchanged(value): """Return same value.""" return value
081a8144c2dddcdf5554f1c9a0b6f0558a2b75d8
109,134
from typing import Union def points_to_inches(points: Union[float, int]) -> Union[float, int]: """Convert the input from points to inches (~72 points per inch).""" return points / 72.27
5b873effddce7a2c9ee0192b7d8ef4ffb20a9863
109,135
def append_period(text): """ Append a period at the end of the sentence""" if text[-1] == '\"': return text[0:-1]+'.\"' return text
afb163840d73278cea5b6d39c7c6816b2d9145e2
109,138
import torch def _condensed_zeros_like(t): """Get a small-storage deterministic tensor with the same shape and dtype as t Similar to `torch.zeros(1, dtype=out.dtype).expand(out.shape)`, but this works with quantized dtypes as well. Similar to `torch.empty(1, dtype=out.dtype).expand(out.shape)`, but always returns the same data. """ ret = torch.empty_like(t).flatten()[1].clone().expand(t.shape) assert ret.storage().size() == 1 ret.storage()[0] = 0 return ret
7510d6f7fb85f66deebc46dcbbc7cd54a900e57a
109,144
def rename(ds, **names): """ Rename all variables etc that have an entry in names Parameters ---------- ds : xarray Dataset A dataset to be renamed names : dict Dictionary of {old_name: new_name} """ for k, v in names.items(): if k in ds: if v in ds: # New name already exists if all(ds[k].values == ds[v].values): ds = ( ds.assign_coords({v: ds[v].rename({v: k})}) .swap_dims({k: v}) .drop(k) ) else: ds = ds.rename({k: v}) return ds
d349850624dce8f5ddb73aa98e97875e21898a46
109,145
import pickle def import_data(df_sampled_path="../data/interim/europarl_english_german.pkl"): """ Function to import data. """ with open(df_sampled_path, "rb") as input_file: return pickle.load(input_file)
a21fe80b43ae4c1b034c8a6c137bab72fa629e2a
109,146
from statistics import mean def _merge_qc_stats(r1: dict, r2: dict) -> dict: """ Merge appropriate metrics (e.g. coverage) for R1 and R2 FASTQs. Args: r1 (dict): parsed metrics associated with R1 FASTQ r2 (dict): parsed metrics associated with R2 FASTQ Returns: dict: the merged FASTQ metrics """ merged = { 'qc_stats': {}, 'r1_per_base_quality': r1['per_base_quality'], 'r2_per_base_quality': r2['per_base_quality'], 'r1_read_lengths': r1['read_lengths'], 'r2_read_lengths': r2['read_lengths'] } for key in r1['qc_stats']: if key in ['total_bp', 'coverage', 'read_total']: merged['qc_stats'][key] = r1['qc_stats'][key] + r2['qc_stats'][key] if r2 else r1['qc_stats'][key] else: val = mean([r1['qc_stats'][key], r2['qc_stats'][key]]) if r2 else r1['qc_stats'][key] merged['qc_stats'][key] = f'{val:.4f}' if isinstance(val, float) else val return merged
29b9fa566da3a392d0d155f13caaf1c4f776823b
109,151
def comment_out(text, comment="#"): """ Comment out some text, using the specified comment character(s) at the start of each line. """ text = text.strip() result = "" for line in text.split("\n"): if line: result += comment+" "+line+"\n" else: result += comment+"\n" return result.strip()
11a6a1d99fb6ed47ae380b0215115d168d5cf491
109,154
import requests def _authenticate(consumer_key, consumer_secret): """Authenticate with the Telstra API.""" token_data = { 'client_id': consumer_key, 'client_secret': consumer_secret, 'grant_type': 'client_credentials', 'scope': 'SMS' } token_resource = 'https://api.telstra.com/v1/oauth/token' token_response = requests.get( token_resource, params=token_data, timeout=10).json() if 'error' in token_response: return False return token_response
b0c4b6d0e4f46d142b5c8ac250c04d737a56fe2a
109,157
def _get_data_dtype(img): """Returns the dtype of an image. If the image is non standard (no get_data_dtype member), this function relies on the data itself. """ try: return img.get_data_dtype() except AttributeError: return img.get_data().dtype
799db14a11e25f3e87b7fa1113be1e3dea9691e3
109,161
def dR_da_i(dR_dq, bases_i): """ See :meth:`compute_dR_dq` :param dR_dq: Derivative of the cost funtions w.r.t. the quaternion q :param basis_i: B-spline basis of index i """ dR_da_i = dR_dq * bases_i return dR_da_i.reshape(4, 1)
2ebebcfc0d0e786f76e0670a1a633db67092d309
109,164