content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def get_linear(x1, y1, x2, y2, offset=1000): """ Return a and b for ax+b of two points x1,y1 and x2,y2 """ # If two points distincts if x1 != x2: # Compute the slope of line a = (((y1 - y2)) *offset) // (x1 - x2) b = y1*offset - a*x1 else: a = 0 b = 0 return a,b,offset
d2ee9d76bdf355e11948c8a2c01793ddd4ba63eb
251,040
def _get_drop_index_command(index_name: str) -> str: """ Returns the command for dropping an index. """ return f"DROP INDEX IF EXISTS {index_name};"
29d104784ee2605012f4c5ccc1a59e7be6ac5629
312,797
def to_title(s): """ Returns the title field from a spec name. """ return s.title()
25113f2e6b9193c0464d810761b97c5265cb5004
158,489
import time def _Await(fn, timeout_secs): """Waits up to timeout_secs for fn() to return True.""" deadline = time.time() + timeout_secs while time.time() < deadline: if fn(): return True time.sleep(0.2) return False
20258002747f1c3e2663a414c7f57e201ba9443e
111,953
def is_last_page(page): """ The last page is either empty or only the one Tweet, the last Tweet of the previous page repeated. Args: page: a Twitter timeline page Returns: boolean: True if page is the last page. """ return len(page) == 1 or len(page) == 0
31a47f348230c7c4f285865b5c1b374153297eab
135,220
def first_of(attr, match, it): """ Return the first item in a set with an attribute that matches match """ if it is not None: for i in it: try: if getattr(i, attr) == match: return i except: pass return None
9221897112feaed61602a1d16777dafbf42a1dd4
641,418
import torch def hz_to_mel(freqs: torch.Tensor): """ Converts a Tensor of frequencies in hertz to the mel scale. Uses the simple formula by O'Shaughnessy (1987). Args: freqs (torch.Tensor): frequencies to convert. """ return 2595 * torch.log10(1 + freqs / 700)
02ac7b5af09a12ae0040ea612c6e1a1d478baf35
678,381
def unlinearize_term(index, n_orbitals): """Function to return integer index of term indices. Args: index(int): The index of the term. n_orbitals(int): The number of orbitals in the simulation. Returns: term(tuple): The term indices of a one- or two-body FermionOperator. """ # Handle identity term. if not index: return (()) elif (0 < index < 1 + n_orbitals ** 2): # Handle one-body terms. shift = 1 new_index = index - shift q = new_index // n_orbitals p = new_index - q * n_orbitals assert index == shift + p + q * n_orbitals return ((p, 1), (q, 0)) else: # Handle two-body terms. shift = 1 + n_orbitals ** 2 new_index = index - shift s = new_index // n_orbitals ** 3 r = (new_index - s * n_orbitals ** 3) // n_orbitals ** 2 q = (new_index - s * n_orbitals ** 3 - r * n_orbitals ** 2) // n_orbitals p = (new_index - q * n_orbitals - r * n_orbitals ** 2 - s * n_orbitals ** 3) assert index == (shift + p + q * n_orbitals + r * n_orbitals ** 2 + s * n_orbitals ** 3) return ((p, 1), (q, 1), (r, 0), (s, 0))
0f1a044c4b95b594a3b3e20b7623c1da4ed67275
66,609
from typing import Union from typing import Iterable from typing import Mapping from typing import Callable import json def map_nested_objs( obj: Union[Iterable, Mapping], func: Callable ) -> Union[Iterable, Mapping]: """ This function recursively applies `func` to each obj nested within a dictionary/array structure. Ex given ob [{"a": 1}, {"b": 2}], `func()` will be applied 1 and 2 This function applies `func` in place and returns `ob` Args: obj: object to recursively run `func` on func: function to apply recursively Returns: The updated obj """ if isinstance(obj, str): return map_nested_objs(json.loads(obj), func) elif isinstance(obj, Mapping): return {k: map_nested_objs(v, func) for k, v in obj.items()} elif isinstance(obj, Iterable): return [map_nested_objs(v, func) for v in obj] else: return func(obj)
1e5dad24fc4fcb73e86735ecc8529ffbc50da6d3
585,704
import csv from io import StringIO def csv_res2_dict_lst(res): """Convert CSV string with a header into list of dictionaries""" return list(csv.DictReader(StringIO(res), delimiter=","))
5271cd4ef1e82fdc77b0d69c58faedf2f971c07c
692,921
import torch def entropy(p): """Compute the entropy of a probability distribution""" plogp = p * torch.log(p) plogp[p == 0] = 0 return -plogp.sum(dim=-1)
ac04e6115efbd2b086bbcf83c09c3eb8e2863d1c
241,387
def valid_packet(packet, constraint): """ Check if a hex encoded packet received from rflib.Rfcat.RFrecv has the constraint value in it. Idea From: https://github.com/mossmann/stealthlock/blob/master/sl.py#L17 :param packet: :param constraint: :return: """ if constraint not in packet: return False return True
78dc257f3c2ea3c47bb9b9943bf7a0b7ab3aa12b
344,148
def pk_decrypt(encrypted, private_key): """ Return `encrypted` decrypted by `private_key` as a string. encrypted: list Chunks of encrypted data returned by :func:`pk_encrypt`. private_key: :class:`Crypto.PublicKey.RSA` Private portion of key pair. """ data = '' for chunk in encrypted: data += private_key.decrypt(chunk) return data
68e7588e3f6a0107cec1b5228ee79658102f1895
318,753
def TSKVolumeGetBytesPerSector(tsk_volume): """Retrieves the number of bytes per sector from a TSK volume object. Args: tsk_volume: a TSK volume object (instance of pytsk3.Volume_Info). Returns: The number of bytes per sector or 512 by default. """ # Note that because pytsk3.Volume_Info does not explicitly defines info # we need to check if the attribute exists and has a value other # than None. Default to 512 otherwise. if hasattr(tsk_volume, u'info') and tsk_volume.info is not None: block_size = getattr(tsk_volume.info, u'block_size', 512) else: block_size = 512 return block_size
f67a5678187e13b2f62e67db16b3eaa2d3aba6f2
264,978
import re def CamelToSnakeCase(input_string): """Converts camelCase to snake_case.""" # Prepend every uppercase character with an underscore # e.g. camelCase -> camel_Case with_underscores = re.sub(r'([A-Z])', r'_\1', input_string) # Ensure a name starting with an uppercase letter does not have an underscore # e.g. CamelCase -> _Camel_Case -> Camel_Case without_leading_underscore = with_underscores.lstrip('_') # Convert all characters to lowercase # e.g. camel_Case -> camel_case return without_leading_underscore.lower()
9c1109c7213251eb63c4cd777b79528564d4a5ee
577,749
def get_iou(bboxes1, bboxes2): """ Adapted from https://gist.github.com/zacharybell/8d9b1b25749fe6494511f843361bb167 Calculates the intersection-over-union of two bounding boxes. Args: bbox1 (numpy.array, list of floats): bounding box in format x1,y1,x2,y2. bbox2 (numpy.array, list of floats): bounding box in format x1,y1,x2,y2. Returns: int: intersection-over-onion of bbox1, bbox2 """ ious = [] for bbox1, bbox2 in zip(bboxes1, bboxes2): bbox1 = [float(x) for x in bbox1] bbox2 = [float(x) for x in bbox2] (x0_1, y0_1, x1_1, y1_1) = bbox1 (x0_2, y0_2, x1_2, y1_2) = bbox2 # get the overlap rectangle overlap_x0 = max(x0_1, x0_2) overlap_y0 = max(y0_1, y0_2) overlap_x1 = min(x1_1, x1_2) overlap_y1 = min(y1_1, y1_2) # check if there is an overlap if overlap_x1 - overlap_x0 <= 0 or overlap_y1 - overlap_y0 <= 0: ious.append(0) continue # if yes, calculate the ratio of the overlap to each ROI size and the unified size size_1 = (x1_1 - x0_1) * (y1_1 - y0_1) size_2 = (x1_2 - x0_2) * (y1_2 - y0_2) size_intersection = (overlap_x1 - overlap_x0) * (overlap_y1 - overlap_y0) size_union = size_1 + size_2 - size_intersection iou = size_intersection / size_union ious.append(iou) return ious
54b5ef225a4ec2600a9fdf921bcd5edf0be77faa
377,551
def diff_view(str1, str2): """ Calculate the lengths of the longest common prefix and suffix between str1 and str2. Let str1 = axb of length m and str2 = ayb of length n, then this function finds and returns i and j such that: str1[0:i] = str2[0:i] = a and str1[m-j:] = str2[n-j:] = b. In the case that a or b does not exist (no common prefix or suffix), then i or j are 0. :param str1: the first string :param str2: the second string :return: common prefix and suffix lengths (i.e. i and j; see description) """ m, n = len(str1), len(str2) len_limit = min(m, n) prefix_len, suffix_len = 0, 0 while prefix_len < len_limit and str1[prefix_len] == str2[prefix_len]: prefix_len += 1 # was using negative indexing, # I just think this way is better understandable while suffix_len < len_limit \ and str1[m - 1 - suffix_len] == str2[n - 1 - suffix_len]: suffix_len += 1 return prefix_len, suffix_len
997f5a0ff4d054b59bf08618ba0f15d9c4b2f0d1
658,173
import torch def Jacvec(y, x, v): """Computes a Jacobian vector product - J v Arguments: y (torch.tensor): output of differentiated function x (torch.tensor): differentiated input v (torch.tensor): vector to be multiplied with Jacobian from the left """ return torch.autograd.grad(y, x, v, retain_graph=True)
0f50814d9afe5bed70f8f71ca590075013edf87c
448,839
import json def load_json(filepath: str): """Load JSON from file""" with open(filepath, "r", encoding="utf8") as f: return json.loads(f.read())
939d18a201d33d612172731163fa9c7e6b08e744
512,063
def court_counties(text: str) -> list: """ Return a list of county names from the text of the statute. NOTE: This is highly dependent upon the writing style of the Texas Government Code, which, to date, is fairly consistent. Args: text (str): Text of statute to parse. Returns: (list): List of counties mentioned in the statute """ search = 'Judicial District is composed of ' start_pos = text.find(search) + len(search) end_pos = text.find('.', start_pos+1) counties = text[start_pos: end_pos] \ .replace(', and ', ',') \ .replace(' and ', ',') \ .split(',') return [x.replace(' and ', '') .replace('County', '') .replace('Counties', '') .replace('county', '') .replace('counties', '') .strip() for x in counties]
dc66a5fd444e80c607217ddb7f14363f0c04e29a
374,264
def _path(from_object, to_object): """ Calculates the 'path' of objects starting from 'from_object' to 'to_object', along with the index of the first common ancestor in the tree. Returns (index, list) tuple. """ if from_object._root != to_object._root: raise ValueError('No connecting path found between ' + str(from_object) + ' and ' + str(to_object)) other_path = [] obj = to_object while obj._parent is not None: other_path.append(obj) obj = obj._parent other_path.append(obj) object_set = set(other_path) from_path = [] obj = from_object while obj not in object_set: from_path.append(obj) obj = obj._parent index = len(from_path) i = other_path.index(obj) while i >= 0: from_path.append(other_path[i]) i -= 1 return index, from_path
0ccfe54d36832b8dce3c55168f02abb3c79261ef
10,312
from pathlib import Path from typing import Dict from typing import Any import yaml def read_metadata(path_job: Path) -> Dict[str, Any]: """Read the jobs metadata information from the job's workdir.""" path_metadata = path_job / "metadata.yml" if not path_metadata.exists(): msg = f"There is no file with metadata: {path_metadata}" raise FileNotFoundError(msg) with open(path_metadata, 'r') as handler: return yaml.load(handler.read(), Loader=yaml.FullLoader)
2aa88f0d06eb20c32a9dce7e18ac31c8703845be
341,594
def add(first_number: int, second_number: int) -> int: """ Adds two integers :param first_number, second_number: two numbers to be added :return: [int] sum of the two input numbers :raise ValueError if the numbers are not integers """ if isinstance(first_number, int) and isinstance(second_number, int): return first_number + second_number else: raise ValueError( "Invalid error. Expected (int, int) got ({}, {})".format(type(first_number), type(second_number)))
ba0273db0427c044fc6ec1bbe4f22f7f1159b8c4
602,529
import logging def get_parse_tsv_line_fn( return_none_on_error = False, reverse = False): """A higher-order function producing TSV line-parsing functions. Args: return_none_on_error: Whether to return None on encountering an error (such as too few TSV columns) rather than raising an Error. reverse: When True, returns ([`target`], `source`) instead of ([`source`], `target`). Useful for working with "reverse" (a.k.a. "noise" models that go from `target` to `source`. Returns: A parsing function that goes from a text line to a ([source], target) pair (or a ([`target`], `source`) pair when `reverse`=True). """ def parse_tsv_line(line): """Parses the first two columns, `source` and `target`, from a TSV line. Any further columns are ignored. Args: line: A text line. Returns: a tuple ([source], target), with `source` being wrapped in a list. Raises: ValueError: when the line has less than two TSV columns and `return_none_on_error`=False. """ split = line.rstrip("\n").split("\t") if len(split) < 2: message = 'TSV line has less than two tab-delimited fields:\n"{}"'.format( line) if return_none_on_error: logging.warning(message) return None else: raise ValueError(message) source, target = split[:2] if reverse: return [target], source else: return [source], target return parse_tsv_line
f7e42b1fd70a7ea346a183eb3cfafe897200c240
205,504
def _dim_arg(value, units): """Concatenate a specified units string to a numerical input. Parameters ---------- value : str or number Valid expression string in the AEDT modeler. For example, ``"5mm"``. units : str Valid units string in the AEDT modeler. For example, ``"mm"``. Returns ------- str """ try: val = float(value) return str(val) + units except: return value
194edeb27aab15e3313671741b8192c27528ba95
490,744
def yes_or_no(message, default) -> bool: """ Simple method to ask yes or no. Args: message: prompt message. Returns: bool """ while True: res = input(message) if res == "": res = default while res.lower() not in ("yes", "no"): print("[*] Please enter 'yes' or 'no' > ") res = input(message) if res == "no": return False else: return True
4ab542ce1db21e4304881fe8adea891adf3c3429
467,546
def simple2string(x): """ Simple objects (bytes, bool, float, int, None, str) are converted to string and returned. Other types are returned as None. """ if isinstance(x, bytes) or \ isinstance(x, bool ) or \ isinstance(x, float) or \ isinstance(x, int ) or \ x is None or \ isinstance(x, str ): return str(x) else: return None
e08f5959c519b3e2f3ea64c553d8d5856fd76ae6
152,394
def tau_model(y, t, io, tau, p_nom): """Define ODE for a simplified dynamic model using time constant""" dydt = (p_nom * io - y) / tau return dydt
27fe58dbe64c65b98ee4c1b195b80f32b32ee1e1
332,694
def gen_topline_employment_change(shortened_dv_list): """Create variables for the topline employment change for the current and previous months.""" topline_employment_change = round((shortened_dv_list[11] - shortened_dv_list[10]) * 1000) prev_topline_employment_change = round((shortened_dv_list[10] - shortened_dv_list[9]) * 1000) return topline_employment_change, prev_topline_employment_change
5afea158ffc75558cd4d615dacdf7e9945189741
104,167
def url_joiner(url, path, trailing=None): """Join to sections for a URL and add proper forward slashes""" url_link = "/".join(s.strip("/") for s in [url, path]) if trailing: url_link += "/" return url_link
888abf4917fbcb476f39a6cda7cc46880bd9aa4e
642,085
def float2str(f, var=None): """ Convert float to nicely formatted string :param f: float to convert to string :type f: float :param var: (optional) tkinter.StringVar to write value to :type var: None or tkinter.StringVar :return: string with nicely formatted float, or None :rtype: str or None """ s = "{:f}".format(f) idx_point = s.find(".") if idx_point > -1: s = s.rstrip("0") if len(s) == idx_point + 1: s = s[:-1] if not s: s = "0" if var is not None: var.set(s) return s
15927fb1b5858f814a367282fd4fee1f69ea4ebd
613,843
def set_location(wkt, db): """Return location constraint payload parameter.""" if 'POLYGON((' not in wkt: msg = 'WKT bounding box must be in POLYGON((...)) format' raise ValueError(400, msg) if db == 'neotoma': return {'loc': wkt} elif db == 'pbdb': return {'loc': wkt} # NEW RESOURCE: Add databse specific WKT bounding box vocabulary here else: return {}
ac8c522554cb1aa19c352078153859c007118f3d
227,299
def point_on_segment(s, p): """ Check if a point lies on a segment. Parameters ---------- s: numpy.array A 2D line segment represented by its two endpoints. p: numpy.array The point that will be checked if in segment. """ is_on_segment = (p[0] >= s[:, 0].min()) & (p[0] <= s[:, 0].max()) & ( p[1] >= s[:, 1].min()) & (p[1] <= s[:, 1].max()) return is_on_segment
f5411ac6f9f623713ab93eb3883ecec409f5656e
506,636
import shutil def center_text(msg: str, *, pad: str = ' ') -> str: """Centers text horizontally for display within the current terminal, optionally padding both sides. :param msg: message to display in the center :param pad: if provided, the first character will be used to pad both sides of the message :return: centered message, optionally padded on both sides with pad_char """ term_width = shutil.get_terminal_size().columns surrounded_msg = ' {} '.format(msg) if not pad: pad = ' ' fill_char = pad[:1] return surrounded_msg.center(term_width, fill_char)
3ead8c3bce298d779205bf0d6743ec6ac05c4d7a
673,711
def quick_sort_out_of_place(array): """Recursive QuickSort Implementation: - O(nlog(n)) time - O(n) space (out of place) - unstable - pivot = mean of the range (best on normal, numerical distributions) """ # Base Case if len(array) < 2: return array # Recurisive Case - choose a pivot pivot = (min(array) + max(array)) // 2 # Divide and Conquer - partition by the pivot lower_partition = [val for val in array if val < pivot] middle = [val for val in array if val == pivot] upper_partition = [val for val in array if val > pivot] # combine - recurse on left and right partitions return ( quick_sort_out_of_place(lower_partition) + middle + quick_sort_out_of_place(upper_partition) )
64e0b976962f5079b510573bfd90677d25340ae6
263,823
def list_views(collection) -> list: """ finds a given collection's `_design`-docs and extracts the view names found inside of them. :param collection: couchdb collection :type collection: :class:`couchdb.Database` :rtype: list """ desdocs = [collection[name] for name in collection if name.startswith("_design/")] views = [] for doc in desdocs: path = doc.id.split("/")[-1] views.extend(map(lambda v: path + "/" + v, doc.get("views", {}).keys())) return views
bb98110bac3c5e06ff39a41e5189c12f953872b0
267,294
def format_output_bytes(record, truncate): """Return a byte-string representation of a BlipRecord. If `truncate` is True, the payload is replaced with the string "..." :param record: BlipRecord object :type record: BlipRecord :param truncate: Boolean argument which causes truncation on True :type truncate: bool :rtype: bytes """ fmt_string = "<Record: Exchange={}, Type={}, Length={}, Payload={}>\n" payload = "..." if truncate else record.payload return bytes(fmt_string.format( record.exchange, record.payload_type, len(record.payload), payload), 'utf-8')
e27c16e7497812f6e223c938b39060b793315de4
488,363
from typing import List def get_current_probability(c_lambdas: List[float], last_probability: float, step: int, walk_type: str) -> float: """ Computes the transition probability for the next step according to the respective definition as in the paper. :param c_lambdas: :param last_probability: :param step: as Ising variable :param walk_type: :return: """ if step == '' or step == 0: # at the beginning of the walk just return p0 return last_probability if walk_type == 'success_punished': return c_lambdas[0] * last_probability + (0.5 * (1 - c_lambdas[0]) * (1 - step)) elif walk_type == 'success_rewarded': return (c_lambdas[0]) * last_probability + (0.5 * (1 - c_lambdas[0]) * (1 + step)) elif walk_type == 'success_punished_two_lambdas': return 0.5 * (((1 + step) * c_lambdas[0]) * last_probability + (1 - step) * ( 1 - (c_lambdas[1]) * (1 - last_probability))) elif walk_type == 'success_rewarded_two_lambdas': return 0.5 * (((1 - step) * c_lambdas[0]) * last_probability + ((1 + step) * ( 1 - (c_lambdas[1]) * (1 - last_probability)))) else: raise Exception(f'Unexpected walk type: {walk_type}')
45911b4d4f833972cc583113bfa49dc15da4afb0
461,278
def get_orders_dict(orders): """Form a dictionary of current order buys and sells """ list_orders = list(orders) orders_dict = {} orders_dict["sells"] = [] orders_dict["buys"] = [] for order in list_orders: if order["side"] == "sell": temp_price = round(float(order["price"]), 2) orders_dict["sells"].append(temp_price) if order["side"] == "buy": temp_price = round(float(order["price"]), 2) orders_dict["buys"].append(temp_price) return orders_dict
9d126d759dd0b3da7c584f6d4163243d8b2cee43
17,183
def is_xgboost_regressor(regressor_type): """ :param regressor_type: string. Case insensitive. :return: boolean indicating whether the regressor type is the xgboost regressor. """ return regressor_type.upper() == 'XGB'
669da05d0bf724eb5310d84bfe29403f5d43173f
328,489
from typing import Union from typing import Optional def try_str_int(value: str) -> Union[str, int]: """Try to convert str to int and return int or original str.""" converted: Optional[int] = None try: converted = int(value) except ValueError: pass return value if converted is None else converted
b476cbe293fbf813a6d942bc1b6321619e06f396
557,913
import requests def get_metadata_value(key): """ Fetch the key from the metadata server """ url = 'http://metadata/computeMetadata/v1/instance/' + key headers = {'content-type': 'application/json', 'Metadata-Flavor': 'Google'} r = requests.get(url, headers=headers) return r.text
2ba0ae04e6e427afe1b4fc3e7a091947946b2556
108,780
def do_error(message: str): """ Print an assembly/runtime error message Parameters ---------- message: str, mandatory The error message to display Returns ------- N/A Raises ------ N/A Notes ----- N/A """ print() print(message) return True
2a7b7d8529d79b0722e952e5716cfcaba9192a6e
281,415
from typing import List def anydup(lst: List) -> bool: """Check if are there any duplicate value in the input list Parameters ---------- lst : list list of values Returns ------- bool """ seen = set() for e in lst: if e in seen: return True seen.add(e) return False
44ec835dd61d2ed7489a2924cbeb4a529442bdf2
595,772
def get_float_value(item): """ Get float value """ return item
270007dd308cadd009fc59c1cab38406724bef10
297,532
def update_filepaths(df, bands, ds_path): """Updates the image paths to the correct data directory provided by CFG.data Args: df (pd.DataFrame): full dataframe CFG: python class object as config Returns: updated_df (pd.DataFrame): dataframe with updated filepaths. """ updated_df = df.copy(deep=True) for band in bands: updated_df[f'{band}_path'] = str(ds_path) + '/train_features/' + updated_df.chip_id + f'/{band}.tif' updated_df['label_path'] = str(ds_path) + '/train_labels/' + updated_df.chip_id + '.tif' return updated_df
56c76e701d8193b1f813e189aa3670889345cf42
525,795
def remove_keys_recursively(obj, fields_to_remove): """Remove specified keys recursively from a python object (dict or list). Args: obj (dict/list): from where keys need to be removed. fields_to_remove (list): fields to remove Returns: dict/list: Cleaned object """ if isinstance(obj, dict): obj = { key: remove_keys_recursively(value, fields_to_remove) for key, value in obj.items() if key not in fields_to_remove } elif isinstance(obj, list): obj = [ remove_keys_recursively(item, fields_to_remove) for item in obj if item not in fields_to_remove ] return obj
3e61dc7745fe3e79561be325775e86a83a5b3254
116,774
def resolve_templating_engine(args): """ Figures out what templating engine should be used to render the stack """ # Figure out what templating engine to use. # Only use -t option when stack comes from stdin if args.stack.name == "<stdin>": return args.templating_engine elif ".mako" in args.stack.name[-5:]: return "mako" elif ".jinja" in args.stack.name[-6:]: return "jinja" elif ".yaml" in args.stack.name[-5:]: return "yaml" raise NotImplementedError("Templating engine not supported. Must be set " "to 'mako', 'jinja', or '' in the command line " "or by using the equivalent file extension")
aef956cd3a5a9cca8451f069a986407af631694e
25,147
import re def IsValidHexColor(color): """ Checks the validity of a hex color value: - the color string must consist of 6 hexadecimal digits """ return not re.match("^[0-9a-fA-F]{6}$", color) == None
864d0e5744ea23a631890186e105af0ed2750462
418,373
def gf_mul(multiplicand, multiplier): """ Galois Field multiplication function for AES using irreducible polynomial x^8 + x^4 + x^3 + x^1 + 1 """ product = 0 a = multiplicand b = multiplier while a * b > 0: if b % 2: product ^= a if a >= 128: a = (a << 1) ^ 283 else: a <<= 1 b >>= 1 return product
be84919453c9bf7fdaa111250185b8f31f47d0a6
565,222
def get_tables(client, dset): """ Get all the names of the table inside a bigquery dataset Parameters: ----------- client: a google.cloud bigquery connection dset: a conected bigquery dataset Output: ----------- list: a list of all the name of the tables inside dset """ return [x.table_id for x in client.list_tables(dset)]
89aed3a2445cb9005b88a4e3e3d0d954c8359c14
452,286
from typing import List def ngrams(tokens: List, n: int): """ Args: tokens: List of elements n: N-gram size Returns: List of ngrams """ return [tokens[i : i + n] for i in range(len(tokens) - n + 1)]
197bf76e6113eaf83589887e7ac35020a32ab1ab
80,738
def isopt(c): """Function that checks wheter the passed character is an operator.""" if c == "+" or c == "-" or c == "*" or c == "/": return True else: return False
ec6c4ef16a48ecdd514a856ec30914ad58395de4
607,009
def getSelectRedirect(params): """Returns the pick redirect for the specified entity. """ if params.get('args'): return '/%(url_name)s/pick?%(args)s' % params else: return '/%(url_name)s/pick' % params
1eae1150f180986b74ac2ec9bc31c3d3d1f566e1
16,698
import re def pay_interval(payment_phrase: str) -> str: """Find first match of pay interval key word payment_phrase: text containg pay interval information: raises: ValueError if cannot find a payment interval insde payment_phrase """ interval_exp = '(?P<interval>hour|day|week|biweek|month|year|annual)' match = re.search(interval_exp, payment_phrase.lower()) if match: interval = match.group('interval') else: print(f'Could not find payment interval in {payment_phrase}') raise ValueError return interval
f7b3d8d92302225eeec57648c2dcad569a67032d
488,577
def monta_lista_base(lista_ref): """A partir de uma lista_ref constrói uma nova lista de mesmo tamanho e elementos None Args: lista_ref (list): lista de referência para o tamanho da nova lista Returns: [list]: lista com elementos None com mesmo tamanho de lista_ref """ return [None]*len(lista_ref)
1ab395059bac4aec09480a7d01404a827acd8f1d
641,341
def get_version(module): """ Attempts to read a version attribute from the given module that could be specified via several different names and formats. """ version_names = ["__version__", "get_version", "version"] version_names.extend([name.upper() for name in version_names]) for name in version_names: try: version = getattr(module, name) except AttributeError: continue if callable(version): version = version() try: version = ".".join([str(i) for i in version.__iter__()]) except AttributeError: pass return version
cd3f9d00f255b7eec39333ba95bcecc6274f7ad6
250,552
import binascii def encrypt(message: bytes, pad: bytes) -> bytes: """ Performs the encryption between the message and pad. Arguments: message: bytes - the message to encrypt, raw bytes not in hex pad: bytes - the pad to encrypt with, hex form Returns: bytes - the encrypted message """ # take the pad out of hex form pad = binascii.unhexlify(pad) # make sure the message and pad are the same length assert len(message) == len(pad) encrypted = [] for m, p in zip(message, pad): # iterate through the message and pad taking the # exclusive or of each pair of bytes in the message and pad encrypted.append(m ^ p) # return the encrypted message in hex form return binascii.hexlify(bytes(encrypted))
cabe84341a6950ef3b5b4edf7c86f2cfda38c97a
246,590
def _mp_pil_image_save(self, _mp_func, _mp_fmt, _mp_params, fp, format=None, **params): """Replacement function for PIL.Image.Image.save. This does not change any functionality of the function but allows for overrides to be put in place. This works around openpyxl.writer.excel.ExcelWriter._write_images which hardcodes image format to .png. It also allows adding extra parameters such as quality. This should be used as a base for a functools.partial function to expose only the same arguments to the rest of the program. Args: _mp_func: original function. _mp_fmt: format string to override with. _mp_params: dict of keyword arguments to override in params. fp: file to save to. format: ignored. **params (optional): keyword arguments passed to the image writer. """ params.update(_mp_params) ret = _mp_func(fp, format=_mp_fmt, **params) # Reset file position for openpyxl 2.4.5 or it will write 0 sized images. if hasattr(fp, 'seek'): fp.seek(0) return ret
62c4f0e58929c7b75fbd0f7b5851d0e1980f5949
500,824
def get_filename(url: str) -> str: """Returns the filename from a link. Args: url (str): The url to a file location on the website. Returns: str: Only the filename. """ return url.split("/")[-1]
7396cd5813c68d8f8a39fbd3cac4444fea45f299
494,125
def create_information_dictionary_for_sites(hpo_dfs, selected_hpo_names, most_popular_race_cids): """ Function is used to create a dictionary that contains the racial makeup of a selected number of sites (expressed as a percentage, from a source dataframe) Parameters ---------- hpo_dfs (dictonary): has the following structure key: string representing an HPO ID value: dataframe that contains information about the different race concepts (IDs and names) and their relative spread within the site selected_hpo_names (list): contains strings that represent the different HPOs that will ultimately be translated to a dictionary most_popular_race_cids (list): list of the most popular concept IDs (across all sites) Returns ------- racial_percentages (dictionary): has the following structure key: race concept ID value: list, each index represents one of the sites in the 'selected_hpo_names' parameter. the value represents the proportion of persons from the HPO who have the reported race concept ID """ racial_percentages = {} # want to get the percentages for each of the race concept IDs for race_concept_id in most_popular_race_cids: race_percentage_list = [] # want to look at the sites in parallel - access their dataframe for hpo in selected_hpo_names: df = hpo_dfs[hpo] temp = df.loc[df['race_concept_id'] == race_concept_id] if temp.empty: race_percentage_list.append(0) else: val = float(temp['percent_of_site_persons']) # convert to float race_percentage_list.append(val) racial_percentages[race_concept_id] = race_percentage_list return racial_percentages
a66037eeabeaf2e9c88a6b3931b59f209c43167e
287,917
def blaney_criddle(tmean, p, k=0.85): """Evaporation calculated according to [blaney_1952]_. Parameters ---------- tmean: pandas.Series, optional average day temperature [°C] p: pandas.Series/float, optional bright sunshine (hour day-1) k: float, optional calibration coefficient [-] Returns ------- pandas.Series containing the calculated evaporation. Examples -------- >>> et_blaney_criddle = blaney_criddle(tmean) Notes ----- Based on equation 6 in [xu_2001]_. .. math:: PE=kp(0.46 * T_a + 8.13) References ---------- .. [blaney_1952] Blaney, H. F. (1952). Determining water requirements in irrigated areas from climatological and irrigation data. .. [xu_2001] Xu, C. Y., & Singh, V. P. (2001). Evaluation and generalization of temperature‐based methods for calculating evaporation. Hydrological processes, 15(2), 305-319. """ et = k * p * (0.46 * tmean + 8.13) return et
7666d7d4d36dce06331227331391bc5b4918cc16
54,958
def has_dims(xobj, dims, kind): """ Checks that at the minimum, the object has provided dimensions. Args: xobj (xarray object): Dataset or DataArray to check dimensions on. dims (list or str): Dimensions being checked. kind (str): String to precede "object" in the error message. """ if isinstance(dims, str): dims = [dims] if not all(dim in xobj.dims for dim in dims): raise ValueError( f"Your {kind} object must contain the " f"following dimensions at the minimum: {dims}" ) return True
bcaf44471974bdb32de9e44f2c62b8927f4a37d7
439,389
import re def _IsValidEnvVarName(name): """Validates that a user-provided arg is a valid environment variable name. Intended to be used as an argparse validator. Args: name: str, the environment variable name to validate Returns: bool, True if and only if the name is valid """ return re.match('^[a-zA-Z_][a-zA-Z0-9_]*$', name) is not None
b7dece16347b8c25de0e649e793734dcdc46334c
392,495
def get_centers_inside_boxes(centers, boxes): """ Args: - centers (Tensor): shape (NA, NB, 3) - boxes (Tensor): shape (NA, NB, 6) Returns: - mask: shape (NA, NB), indicates whether centers is insdie boxes """ cond1 = centers >= boxes[..., :3] cond2 = centers <= boxes[..., 3:] cond = cond1 & cond2 mask = (cond[..., 0] & cond[..., 1] & cond[..., 2]) return mask
e947a0060cc2416e6dae52054ac6e8ca5f69eb5e
334,465
def ask_for_input(question): """ Ask an user for input. """ response = '' while not response: response = input(question) return response
e884f913e28ed3c4baf3db1fb64f031ebe08c7b2
199,518
def remove_extension(template): """ Given a filename or path of a template file, return the same without the template suffix. :param unicode template: The filename of or path to a template file which ends with '.template'. :return: The given filename or path without the '.template' suffix. """ return template[:-len('.template')]
e3280dad8fa15110665681abbe78ba0308d58e0a
270,861
import json def prepare_message(gcn_dict): """Add pretty printing for message Args: gcn_dict : Received GCN message Returns: Formated GCN message for pretty printing """ gcn_json = json.loads(json.dumps(gcn_dict)) return ( "*Title:* {title}\n" "*Number:* {number}\n" "*Subject:* {subject}\n" "*Date*: {date}\n" "*From:* {from}\n\n" "{body}" ).format(**gcn_json["header"], body=gcn_json["body"])
dfb787f877debef7ebd3aef81c19752c1999bde5
540,112
def bet_size_sigmoid(w_param, price_div): """ Part of SNIPPET 10.4 Calculates the bet size from the price divergence and a regulating coefficient. Based on a sigmoid function for a bet size algorithm. :param w_param: (float) Coefficient regulating the width of the bet size function. :param price_div: (float) Price divergence, forecast price - market price. :return: (float) The bet size. """ return price_div * ((w_param + price_div**2)**(-0.5))
cbc6c8d70f6f000e701f140ccbae34b55d7a46df
696,253
import json def extract_genres(genres_str): """Extracts the genres in string form as a list of genres Arguments: genres_str {string} -- string containing the genres Returns: list -- the extracted genres """ genres_str = genres_str.replace("'", '\"') genres_json = json.loads(genres_str) genres_list = [] for elem in genres_json: genres_list.append(elem['name']) return genres_list
34dcc0ad7927f61610ac393f71bc744fff18e215
43,025
def count_nodes(depth, num_branches): """ Count the number of nodes in the current depth. """ return int(num_branches ** depth)
343faad672a4b350c2808f509ff8aa1393afccad
456,275
import time def poll_until(predicate, interval): """ Perform steps until a non-false result is returned. This differs from ``loop_until`` in that it does not require a Twisted reactor and it allows the interval to be set. :param predicate: a function to be called until it returns a non-false result. :param interval: time in seconds between calls to the function. :returns: the non-false result from the final call. """ result = predicate() while not result: time.sleep(interval) result = predicate() return result
dd3cabfe14074bbca0d4e556af19310634fac12b
348,538
def fixture_other_case_id() -> str: """Return name of a another case""" return "maturecogar"
95f390abf7c13e0cc00d0877e8cb08b52c0777fd
142,447
def accumulation_distribution(close, low, high, volume): """ Cumulative indicator that makes us of price and volume to assess whether an asset is being accumulated or distributed. :param close: closing price :param low: lowest price :param high: highest price :param volume: daily volume :return: ADI: Accumulation/Distribution Indicator """ # Calculate current money flow volume cmfv = (((close - low) - (high - close)) / (high - low)) * volume ADI = cmfv.cumsum() return ADI
f193d256322898bf9dd871fc9b6e8ec238d52f86
94,432
from pathlib import Path def is_package_file(f: Path, sap_code: str, name_pattern: str) -> bool: """Determine if the file is the right application JSON file :param f (Path): json file path :param sap_code (str): Adobe SAP code for product being processed :param name_pattern (str): json filename patter to test""" return sap_code in str(f) and name_pattern in str(f)
d9e9be639d7388f21a345938bd666f0bb5dfaa8b
633,979
def string_SizeInBytes(size_in_bytes): """Make ``size in bytes`` human readable. Doesn"t support size greater than 1000PB. Usage:: >>> from __future__ import print_function >>> from weatherlab.lib.filesystem.windowsexplorer import string_SizeInBytes >>> print(string_SizeInBytes(100)) 100 B >>> print(string_SizeInBytes(100*1000)) 97.66 KB >>> print(string_SizeInBytes(100*1000**2)) 95.37 MB >>> print(string_SizeInBytes(100*1000**3)) 93.13 GB >>> print(string_SizeInBytes(100*1000**4)) 90.95 TB >>> print(string_SizeInBytes(100*1000**5)) 88.82 PB """ res, by = divmod(size_in_bytes, 1024) res, kb = divmod(res, 1024) res, mb = divmod(res, 1024) res, gb = divmod(res, 1024) pb, tb = divmod(res, 1024) if pb != 0: human_readable_size = "%.2f PB" % (pb + tb / float(1024)) elif tb != 0: human_readable_size = "%.2f TB" % (tb + gb / float(1024)) elif gb != 0: human_readable_size = "%.2f GB" % (gb + mb / float(1024)) elif mb != 0: human_readable_size = "%.2f MB" % (mb + kb / float(1024)) elif kb != 0: human_readable_size = "%.2f KB" % (kb + by / float(1024)) else: human_readable_size = "%s B" % by return human_readable_size
2ca967f9196b2f5a36ea83c4937d1cd8624467b9
647,175
def get_agent_value_estimate(agent, state, action): """ Obtains the agent's value estimate for a particular state and action. Args: state (torch.Tensor): state of size [batch_size, n_state_dims] action (torch.Tensor): action of size [batch_size, n_action_dims] Returns a dictionary of action-value estimates: direct: the estimate using the Q-network, size [batch_size] estimate: the full estimate (using the model), size [batch_size] """ agent.reset(); agent.eval() state = state.to(agent.device); action = action.to(agent.device) direct_estimate = agent.q_value_estimator(agent, state, action, direct=True).detach().view(-1).cpu().numpy() estimate = agent.q_value_estimator(agent, state, action).detach().view(-1).cpu().numpy() return {'direct': direct_estimate, 'estimate': estimate}
c676beed0a7f779a212a0cda03a838770bdc846b
125,181
def fileExtension(filename): """Returns file extension if exists Arguments: filename (str): file name Returns: str: file extension or None """ if not isinstance(filename, str): return None fext = filename.split('.') if len(fext) < 2: return None else: return fext[-1]
605c8e18648d17fa4c447c895e8162001495be93
486,017
def ID(obj): """Get an unique ID from object for dot node names""" return hex(id(obj)).replace('-','_')
663534a12ea892e4f6e1e50785d67c9e11d31b94
403,224
from typing import Iterable import torch from typing import Dict from typing import List def cluster_strings(strings: Iterable[str]) -> torch.Tensor: """ given a list of strings, assigns a clustering, where each pair of identical ground truth strings is in the same cluster return a torch.LongTensor containing the cluster id of each ground truth """ cluster_id_by_truth: Dict[str, int] = {} cluster_l: List[int] = [] for n, truth in enumerate(strings): cluster_id = cluster_id_by_truth.setdefault(truth, len(cluster_id_by_truth)) cluster_l.append(cluster_id) return torch.tensor(cluster_l, dtype=torch.int64)
7821fa946e7a07be13411f54913ee71f1e67dc3a
20,116
def count_parameters(model): """Count TensorFlow model parameters. Parameters ---------- model: TensorFlow model Returns ------- total_parameters """ total_parameters = 0 # iterating over all variables for variable in model.trainable_variables: local_parameters=1 shape = variable.get_shape() # getting shape of a variable for i in shape: local_parameters*=i # mutiplying dimension values total_parameters+=local_parameters return int(total_parameters)
a696e8ceaf975a61e3228dffc11ee1c14b891c84
69,282
import itertools def sum_phrases(phrases): """ converts nested list of phrases to simple total sum list """ answer = list(itertools.chain.from_iterable(phrases)) while type(answer[0]) == list: answer = list(itertools.chain.from_iterable(answer)) return answer
ffac082946a6660c410c1b3615e6cf6e05957edb
182,015
def test_is_true(v): """ Helper function tests for a value that evaluates to Boolean True """ return bool(v)
21d84b0d4e93952921660b6afad9958d5fc78036
663,074
def get_roi_names(contour_data): """ This function will return the names of different contour data, e.g. different contours from different experts and returns the name of each. Inputs: contour_data (dicom.dataset.FileDataset): contour dataset, read by dicom.read_file Returns: roi_seq_names (list): names of the """ roi_seq_names = [roi_seq.ROIName for roi_seq in list(contour_data.StructureSetROISequence)] return roi_seq_names
431e52f70a1153af164f9babb6066bb52661a8f7
675,693
def get_images(instances: dict) -> list: """Extracts all image ids and file names from annotations file. """ return [(image["id"], image["file_name"]) for image in instances["images"]]
2ac6a35aef5c8c8bac9c329bea83a2920c5e9573
339,730
from typing import Dict def fit_config(rnd: int) -> Dict[str, str]: """Return a configuration with static batch size and (local) epochs.""" config = { "epoch_global": str(rnd), "epochs": str(5), "batch_size": str(128), } return config
079cdc09ade57d6fc7d0ecf5fbb109dffec11630
167,348
def getYN(inputtext=''): """Promtpes the user for a Yes or No response. Returns 'y' or 'n' based on user response. Non-Y/N responses prompt user to re-enter a response. *inputtext defines the text preceding the user interaction""" while 1: outputtext=input(inputtext + '\nPlease Enter Y/N:\n') if outputtext.lower() in ['y','n']: return outputtext.lower()[0] else: print('INVALID SELECTION')
fc39a4d2095de2541c026adf10279dc6294ef347
457,135
def build_drops(drop_multp, drops): """ Multiply the sounds by the number of time you want to hear the a drop sound """ return drops * drop_multp
29263d1a7af186ee101af3c27b5119ebc88a89fa
177,753
def get_source_with_id(result): """Return a document's `_source` field with its `_id` added. Parameters ---------- result : dict A document from a set of Elasticsearch search results. Returns ------- dict The document's `_source` field updated with the doc's `_id`. """ result['_source'].update({'_id': result['_id']}) return result['_source']
be9b25ad65a8474aa41d3f927664abdb89a674d5
14,975
def filter_names(names, text=""): """ Returns elements in a list that match a given substring. Can be used in conjnction with compare_varnames to return a subset of variable names pertaining to a given diagnostic type or species. Args: ----- names: list of str Input list of names. text: str Target text string for restricting the search. Returns: -------- filtered_names: list of str Returns all elements of names that contains the substring specified by the "text" argument. If "text" is omitted, then the original contents of names will be returned. Examples: --------- Obtain a list of variable names that contain the substrings "CO", "NO", and "O3": >>> import gcpy >>> import xarray as xr >>> refdata = xr.open_dataset("ref_data_file.nc") >>> devdata = xr.open_dataset("dev_data_file.nc") >>> vardict = gcpy.compare_varnames(refdata, devdata) >>> var_CO = gcpy.filter_names(vardict['commonvars'], "CO") >>> var_NO = gcpy.filter_names(vardict['commonvars'], "NO") >>> var_O3 = gcpy.filter_names(vardict['commonvars'], "O3") """ if text != "": filtered_names = [k for k in names if text in k] else: filtered_names = [k for k in names if k] return filtered_names
267289c26295351a8441e769eacced665ca481ad
501,416
def _nens_user_extract_username(claims): """Return the username from the email claim if the user is a N&S user. A N&S user is characterized by 1) coming from either "Google" or "NelenSchuurmans" identity provider and 2) having (verified) email domain @nelen-schuurmans.nl. """ # Get the provider name, return False if not present try: provider_name = claims["identities"][0]["providerName"] except (KeyError, IndexError): return if provider_name not in ("Google", "NelenSchuurmans"): return if not claims.get("email_verified", False): return # Unpack email username, domain = claims.get("email", "a@b").split("@", 1) if domain != "nelen-schuurmans.nl": return return username
35a1ead03fe33d769c18e5ea52f5a55195a49dee
177,955
def mod(p): """ Compute modulus of 3D vector p: array Cartesian coordinates """ return (p[0]**2 + p[1]**2 + p[2]**2)**0.5
a15755be4e49120fa323ece0e456ae947d826b6d
44,081
def parser_module_name(parser_fname): """ Generates module path for the given parser name >>> parser_module_name('some_parser.py') >>> 'bot.parsers.some_parser' """ return ".".join(["bot", "parsers", parser_fname])
5bba9bcc4fb8e574b10e3126517aaa1e9b5e870b
192,112
import base64 def decode_base85(cipher: str): """Used to solve a layer of base85""" try: return base64.a85decode(cipher).decode("utf-8") except: return ""
e7b1b84deadad1cc1ef95440ddcac6889a70f952
294,437
def tag_in_tags(entity, attribute, value): """ Return true if the provided entity has a tag of value in its tag list. """ return value in entity.tags
ad88be5f8848b387f2a261ce5506dffde285a1d8
1,296
def raster_ds_proj(raster_ds): """Return the projection WKT of an opened raster dataset Args: raster_ds (:class:`gdal.Dataset`): An opened GDAL raster dataset Returns: str: Well known text (WKT) formatted represetnation of the projection """ return raster_ds.GetProjection()
8f2ad70645f5d372f750c4694fd62e87041e705d
551,060
def mat_mul(mat_a, mat_b): """ Function that multiplies two matrices, mat_a and mat_b. Each entry of the resulting matrix, mat_c, is a "dot-product" of a row of mat_a with a column of mat_b, i.e. C_{ij} = Sum_{k} A_{ik} * B_{kj}, where index {i} iterates through rows of mat_a, index {j} iterates through columns of mat_b, and pivot index {k} iterates through columns/rows of mat_a/mat_b. :param mat_a: list of lists with user defined a_ij elements :param mat_b: list of lists with user defined b_ij elements :return: mat_c = mat_a x mat_b, a list of lists with c_{ij} = Sum_{k} a_{ik} * b_{kj} elements """ # check if operation can be done if len(mat_a[0]) == len(mat_b): print("The product of the two matrices is:") pass else: return "You cannot multiply these matrices!\n" \ "The number of columns in first matrix should equal " \ "the number of rows in second matrix!\n" # sort of minimalist one-line solution # first we transform second matrix to a tuple of column tuples tuple_mat_b = tuple(zip(*mat_b)) # now we use nested list comprehension with a return return [[ sum(el_a * el_b for el_a, el_b in zip(row_mat_a, col_mat_b)) \ for col_mat_b in tuple_mat_b] for row_mat_a in mat_a] # hereafter is a solutin detailing the one-line solution presented above: """ # the number of rows in matrix mat_c equals the number of rows in mat_a # the number of columns in matrix mat_c equals the number of columns in mat_b mat_c_row = len(mat_a) mat_c_col = len(mat_b[0]) # initialize mat_c with zeroes mat_c = [[0 for idx in range(mat_c_col)] for jdx in range(mat_c_row)] for idx in range(mat_c_row): # index going through rows of mat_c for jdx in range(mat_c_col): # index going through columns of mat_c for kdx in range(len(mat_a[0])): # index going through columns/rows of mat_a/mat_b mat_c[idx][jdx] += (mat_a[idx][kdx] * mat_b[kdx][jdx]) return mat_c """
489e2b820127db61c3f86f215bf7b09136eb2f6f
501,110
import math def cal_line_length(point1, point2): """Calculate the length of line. Args: point1 (List): [x,y] point2 (List): [x,y] Returns: length (float) """ return math.sqrt( math.pow(point1[0] - point2[0], 2) + math.pow(point1[1] - point2[1], 2))
c1f86763523da7eb7e713319cba6429268021471
625,853
def _aggregates_associated_with_providers(a, b, prov_aggs): """quickly check if the two rps are in the same aggregates :param a: resource provider ID for first provider :param b: resource provider ID for second provider :param prov_aggs: a dict keyed by resource provider IDs, of sets of aggregate ids associated with that provider """ a_aggs = prov_aggs[a] b_aggs = prov_aggs[b] return a_aggs & b_aggs
b2da5ff44a0c8abf3204987d79a3eebdd783efb1
663,994
def project_data(X, U, k): """Computes reduced data representation (projected data) :param X: Normalized features' dataset :type X: numpy.array :param U: eigenvectors of covariance matrix :type U: numpy.array :param k: Number of features in reduced data representation :returns: Reduced data representation (projection) :rtype: numpy.array """ U_reduce = U[:, 0:k] Z = X.dot(U_reduce) return Z
9fc05e0796a8fa57fdd75b17d4c92d26d429db86
316,043