content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def get_neighbor_distances(ntw, v0, l): """Get distances to the nearest node neighbors along connecting edges. Parameters ---------- ntw : spaghetti.Network spaghetti Network object. v0 : int Node id l : dict key is tuple (start node, end node); value is float. Cost per edge to travel, e.g. distance. Returns ------- neighbors : dict key is int (node id); value is float (distance) Examples -------- >>> import pysal.explore.spaghetti as spgh >>> from pysal.lib import examples >>> ntw = spgh.Network(examples.get_path('streets.shp')) >>> neighs = spgh.util.get_neighbor_distances(ntw, 0, ntw.edge_lengths) >>> neighs[1] 102.62353453439829 """ edges = ntw.enum_links_node(v0) neighbors = {} for e in edges: if e[0] != v0: neighbors[e[0]] = l[e] else: neighbors[e[1]] = l[e] return neighbors
78a9e00d2e9d3c1985a04958c5853b5ffc837916
103,850
from typing import List from typing import Dict from typing import Any from typing import Tuple from typing import MutableMapping def _kwargs_from_call(param_names: List[str], kwdefaults: Dict[str, Any], args: Tuple[Any, ...], kwargs: Dict[str, Any]) -> MutableMapping[str, Any]: """ Inspect the input values received at the wrapper for the actual function call. :param param_names: parameter (*i.e.* argument) names of the original (decorated) function :param kwdefaults: default argument values of the original function :param args: arguments supplied to the call :param kwargs: keyword arguments supplied to the call :return: resolved arguments as they would be passed to the function """ # pylint: disable=too-many-arguments resolved_kwargs = dict() # type: MutableMapping[str, Any] # Set the default argument values as condition parameters. for param_name, param_value in kwdefaults.items(): resolved_kwargs[param_name] = param_value # Override the defaults with the values actually supplied to the function. for i, func_arg in enumerate(args): if i < len(param_names): resolved_kwargs[param_names[i]] = func_arg else: # Silently ignore call arguments that were not specified in the function. # This way we let the underlying decorated function raise the exception # instead of frankensteining the exception here. pass for key, val in kwargs.items(): resolved_kwargs[key] = val return resolved_kwargs
996714eaa88a88bb52d2a45d82195f9a7e9b1755
103,851
def share_of_shelf_index(products_of_brand_x, total_products): """Return share of shelf index showing the percentage of total products made up by brand X. Args: products_of_brand_x (int): Number of products of brand X in portfolio, category, or on shelf. total_products (int): Total number of products of all brands in portfolio, category, or on shelf. Returns: Percentage of shelf, category, or portfolio made up by brand X """ return (products_of_brand_x / total_products) * 100
cde139fd7eab948033c125d6342b7f3ae2d9163c
103,852
import re def validate_mysql_password(password): """Validates the given password using the default MDS MySQL password rules - Should have at least one number. - Should have at least one uppercase and one lowercase character. - Should have at least one special symbol. - Should be between 8 to 20 characters long. Args: password (str): The password to validate Returns: True if the given password is valid, False otherwise """ reg = (r"^(?=.*[a-z])(?=.*[A-Z])(?=.*\d)(?=.*[@$!%*#?&])" r"[A-Za-z\d@$!#%*?&]{6,20}$") # compiling regex pattern = re.compile(reg) # searching regex return re.search(pattern, password)
0f0c62666b2c9e2565d30805beb8083bc360b9b1
103,853
def igraph_centrality(g, weights='weight', cost=False, damping=0.99): """Return dict of vertex centrality measures Parameters ---------- g : igraph object may be directed or indirected, weighted or unweighted weights : str, default='weight' name of node attribute to weight edges, Set to None for unweighted cost : bool, default is False if True, then weights are costs; else weights are importances Returns ------- result : dict of {centrality str : score float} if cost: 'eigenvector', 'pagerank', 'authority', 'hub' ignore weights if not cost: 'betweenness', 'closeness' ignore weights """ if not g.is_weighted() or not weights in g.es.attribute_names(): weights = None out = dict() #out['indegree'] = g.indegree() #out['outdegree'] = g.outdegree() out['outweight'] = g.strength(weights=weights, mode='out') out['inweight'] = g.strength(weights=weights, mode='in') # weights applicable only if importance if not g.is_weighted(): out['eigenvector'] = g.eigenvector_centrality(weights=weights if not cost else None) out['pagerank'] = g.pagerank(weights=weights if not cost else None, damping=damping) out['authority'] = g.authority_score(weights=weights if not cost else None) out['hub'] = g.hub_score(weights=weights if not cost else None) # weights applicable only if cost out['betweenness'] = g.betweenness(weights=weights if cost else None) out['closeness'] = g.closeness(weights=weights if cost else None) #out['clustering'] = g.as_undirected().transitivity_local_undirected() return out
1e310006efbfcbd4d86f4d015d8e35385d007e67
103,854
def lexical_diversity(set_, list_): """ A function for computing lexical diversity """ if len(list_) < 1: return 0 return 1.0*len(set_)/len(list_)
36b8625affcdad2f56eb313ac309ccb19b4175a6
103,856
def _calc_nemsio_hgt(f): """Calculates the geopotential height from the variables hgtsfc and delz. Parameters ---------- f : xarray.DataSet the NEMSIO opened object. Can be lazily loaded. Returns ------- xr.DataArray Geoptential height with varialbes, coordinates and variable attributes. """ sfc = f.hgtsfc dz = f.delz z = dz + sfc z = z.rolling(z=len(f.z), min_periods=1).sum() z.name = 'geohgt' z.attrs['long_name'] = 'Geopotential Height' z.attrs['units'] = 'm' return z
7c5778f85cea0b6a0e6b141dd93f1e27008ec38a
103,861
def normalize_tourism_kind(shape, properties, fid, zoom): """ There are many tourism-related tags, including 'zoo=*' and 'attraction=*' in addition to 'tourism=*'. This function promotes things with zoo and attraction tags have those values as their main kind. See https://github.com/mapzen/vector-datasource/issues/440 for more details. """ # noqa zoo = properties.pop('zoo', None) if zoo is not None: properties['kind'] = zoo properties['tourism'] = 'attraction' return (shape, properties, fid) attraction = properties.pop('attraction', None) if attraction is not None: properties['kind'] = attraction properties['tourism'] = 'attraction' return (shape, properties, fid) return (shape, properties, fid)
1318477fc71c6298cc09f12809f3b846554082f5
103,865
import itertools def _extract_group_summary_data(dataseries, name=None): """ Given a list of dataseries, determine the earliest start date, latest end date, and create a string of their various data types. :param list dataseries: dataseries :param name: display name for the group if desired :type name: str or None :return: overall metadata for a bunch of dataseries :rtype: dict """ start_dates = [series['start_date'] for series in dataseries] end_dates = [series['end_date'] for series in dataseries] data_types = set(list(itertools.chain.from_iterable([series['data_types'] for series in dataseries]))) return { 'name': name, 'start_date': min(start_dates), 'end_date': max(end_dates), 'data_types': ', '.join(sorted(data_types)), 'parameters': dataseries }
83eed2a3c9f99fbb47c1ecae388cc0bcd4d81e6f
103,876
def get_partitioner_dict(*modules): """ Given a list of modules which have a `partition` function and optionally a `name` variable, return a dictionary that maps `name` -> `partition` for any modules that have a `name`. This allows for partitioner modules to specify a standardized name by which they can be referenced. """ partitioners = {} for m in modules: if name := getattr(m, 'name', None): partitioners[name] = m return partitioners
6946622f76e8f12e910b2b308f32155739e95486
103,877
def flatten(*args): """Recursively flattens a list containing other lists or single items into a list. Examples:: >>> flatten() [] >>> flatten(2) [2] >>> flatten(2, 3, 4) [2, 3, 4] >>> flatten([2, 3, 4]) [2, 3, 4] >>> flatten([[2, 3], [4, 5], 6, [7, 8]]) [2, 3, 4, 5, 6, 7, 8] """ if len(args) == 0: return [] if len(args) > 1: return flatten(list(args)) if hasattr(args[0], "__iter__") and not isinstance(args[0], str): return sum(list(map(flatten, args[0])), []) return list(args)
6223e19df0e850d7da29ae6959a4ae182eb1797c
103,879
def extract_grid_blocks(img, blocks_per_image=(3, 3)): """ Divide image into grids(blocks) :param img: input 1-channel image :param blocks_per_image: number of grids in tuple(height, width) :return: 2D list containing pixel value for each slot """ img_height, img_width = img.shape[0], img.shape[1] bx = int(img_width / blocks_per_image[1]) by = int(img_height / blocks_per_image[0]) grids = [] for i in range(blocks_per_image[0]): start_i = i * by end_i = start_i + by for j in range(blocks_per_image[1]): start_j = j * bx end_j = start_j + bx grid = img[start_i:end_i, start_j:end_j] grids.append(grid) return grids
729a62422ca4353d04a154554f87ed7725816dbe
103,881
import torch def to_tensor(input): """Converts the input into a ``torch.Tensor`` of the default dtype.""" if torch.is_tensor(input): return input.to(torch.Tensor()) else: return torch.tensor(input).to(torch.Tensor())
d2156cedcecd35f6276e76645507932ea9f9be5f
103,885
def onlyinA(listA, listB): """ return element that's only in list A but not in B""" setA = set(listA) setB = set(listB) return list(setA.difference(setB))
2c3dd3b995430ab172706e49c306c97d3e1cd0c8
103,886
def get_any_of(getter, possible_keys, default=None): """Search for the value of any of `possible_keys` in `dictionary`, returning `default` if none are found. >>> get_any_of( {"A":1}, ["C","D","A"], "UNDEFINED") 1 >>> get_any_of( {"X":1}, ["C","D","A"], "UNDEFINED") 'UNDEFINED' """ for key in possible_keys: val = getter.get(key.upper(), None) if val is None: val = getter.get(key.lower(), None) if val is not None and val not in ["undefined", "UNDEFINED"]: return val else: return default
9a6f5612e00e1fed2734334f2d0c8d8aab628a3f
103,887
def read_txt_as_list(filepath): """ Load txt file content into list :param filepath: filepath name :return: list of tokens """ f = open(filepath, 'r+') data = [line.rstrip('\n') for line in f.readlines()] f.close() return data
c360c709464de0e7a6e3109a82ff853b7d9e79a2
103,892
def v_or_n(value): """Return None unless the value contains data""" return value.rstrip() if value else None
20e9f58057bc7fb37e066f5939cc08eb2832d972
103,894
def get_py_tree_path(behaviour): """ Accept a behaviour/composite and return a string representation of its full path """ path = "" target = behaviour while True: path = "{}>{}".format(target.name, path) target = target.parent if not target: break path = path[:-1] return path
52f5f5a7a0b246d3f02ce95a22dbb79a82582337
103,911
from bs4 import BeautifulSoup def html_extraction(text): """ Remove from the text html tags found on stack overflow data. :param text: string containing textual data mixed with specific html tags found on stack overflow data :return: a string in which specific html tags and it's content are removed and others tags are remove but not their content """ soup = BeautifulSoup(text, 'lxml') tags = ('a', 'div', 'code', 'blockquote') for tag in tags: for occurrence in soup.find_all(tag): _ = occurrence.extract() return " ".join(soup.text.split())
88e2f5d771e012f1f6320b00078af2d9d4b2a66c
103,912
def intersection(l1, l2): """ Determines the intersection between two lines l1 and l2. l1 and l2 each represent a line with line formula ax + by = c, where a: l[0], b: l[1], c: -l[2] The intersection can be found by calculating determinants d, dx and dy :param l1: line 1 :param l2: line 2 :return: Intersection x, y """ d = l1[0] * l2[1] - l1[1] * l2[0] dx = l1[2] * l2[1] - l1[1] * l2[2] dy = l1[0] * l2[2] - l1[2] * l2[0] if d == 0: return False x = dx / d y = dy / d return x, y
099e53a6c0d84b10c6d658eb80fc6cdeb846bc11
103,914
def months_to_seconds(months): """ Converts `months` to seconds. """ return 2.62974e6 * months
b1fbf82dfe7e433c542b51c027d0ba3885cef162
103,920
def perform_update_group_status(dispatcher, ugs_intent): """Perform an :obj:`UpdateGroupStatus`.""" return ugs_intent.scaling_group.update_status(ugs_intent.status)
9d018f2b0096f977428800a4f4b7893b82b14f1a
103,921
def list_to_unicode_str(lst): """ Convert list of large integers to a unicode string. """ return "".join(map(chr, lst))
1fc3dde75806c2cafe0c6965b15b9b19e53d05a4
103,926
def cbsa_to_location_id(cbsa_code: str) -> str: """Turns a CBSA code into a location_id. For information about how these identifiers are brought into the CAN code see https://github.com/covid-projections/covid-data-public/tree/main/data/census-msa """ return f"iso1:us#cbsa:{cbsa_code}"
5341741ba22133dd11fcc7e5a91cab6b399f18f8
103,930
def replace_html(index): """Formats html. Args: index: index html content Returns: Modified index.html content """ body = """ [[clicktag_layer]] <script> var clickTag = "[[clicktag_url]]"; </script> <script data-exports-type="dclk-quick-preview">if(typeof studio !== "undefined" && studio.Enabler) { studio.Enabler.setRushSimulatedLocalEvents(true); } </script> <script> window.top.postMessage({ msg: "adcase", format:"footerFixed", params: { height: [[height]], type:"standard", action:"setup" }}, "*"); </script> </body>""" head = "<head><meta name=\"ad.size\" content=\"width=[[width]],height=[[height]]\">" index = index.replace("<meta name=\"ad.size\"", "<meta name=\"old.adsize\"") index = index.replace("<head>", head) index = index.replace("</body>", body) index = index.replace("<meta name=\"GCD\"", "<meta name=\"old.GCD\"") return index
38e3f3d3cb8d1767f0e119fd25ec4abaf94007df
103,931
import re def clean_text_from_general_punctuation_unicode(text): """Clears the text from general punctuation unicodes https://apps.timwhitlock.info/unicode/inspect/hex/2000-206F""" return re.sub(r"([\u2000-\u206F])", " ", text)
338034c83475131d3a3478aafe3497350da4910a
103,932
def layer_type(layer): """Gets the type of a layer Args: layer (keras Layer): layer you want the type of Returns: type (str): what kind of layer it is. Eg "Dense", "Conv2D", "SimpleRNN" """ return layer.__class__.__name__
82c9061e2a68d4e63fd54ef617949678fd7d60ce
103,935
import functools def parameterize_one(name, values): """ A decorator that paramaterizes a test case. :param name: The parameter name to parameterize. :param values: The values to supply to the parameter. """ def decorator(f): @functools.wraps(f) def run_test(self, *args, **kwargs): for value in values: arg = {name: value} with self.subTest(**arg): f(self, *args, **arg, **kwargs) return run_test return decorator
5ffd11a2e84044a12cbaf45338574b37f5f572a7
103,936
def round_up( val, mult_of ): """ Rounds up the value to the given multiple """ return val if val % mult_of == 0 else val + mult_of - val % mult_of
324b03387e873d07fc88ee304a9cdb0556e7b50e
103,938
def unpack_unique_together(opts, only_fields=()): """ Unpack unique together fields. :param opts: Model options :type opts: `django.db.models.options.Options` :param only_fields: Fields that should be considered. :type only_fields: `collections.Iterable` :return: Flat list of fields. """ fields = [] for field in opts.unique_together: fields.extend(list([f for f in field if f in only_fields])) return fields
fef88cdf85ff589d31ced8477718427d629bee01
103,940
import time def await_condition(condition, timeout=2000): """Return True if a condition is met in the given timeout period""" for _ in range(timeout): if condition(): return True time.sleep(0.001) return False
4173e7d96b64f5574becbb6bab5aa49f99da0395
103,947
def _get_spot_volume(image, spot_z, spot_y, spot_x, radius_z, radius_yx): """Get a subimage of a detected spot in 3 dimensions. Parameters ---------- image : np.ndarray Image with shape (z, y, x). spot_z : np.int64 Coordinate of the detected spot along the z axis. spot_y : np.int64 Coordinate of the detected spot along the y axis. spot_x : np.int64 Coordinate of the detected spot along the x axis. radius_z : int Radius in pixels of the detected spot, along the z axis. radius_yx : int Radius in pixels of the detected spot, on the yx plan. Returns ------- image_spot : np.ndarray Reference spot in 3-d. """ # get boundaries of the volume surrounding the spot z_spot_min = max(0, int(spot_z - radius_z)) z_spot_max = min(image.shape[0], int(spot_z + radius_z)) y_spot_min = max(0, int(spot_y - radius_yx)) y_spot_max = min(image.shape[1], int(spot_y + radius_yx)) x_spot_min = max(0, int(spot_x - radius_yx)) x_spot_max = min(image.shape[2], int(spot_x + radius_yx)) # get the volume of the spot image_spot = image[z_spot_min:z_spot_max + 1, y_spot_min:y_spot_max + 1, x_spot_min:x_spot_max + 1] return image_spot
eb0b59b55b974059bf8c8b6a34acbdfd522795c0
103,949
def process_results(output): """Utility function which decodes stdout text from the water supply model Returns ======= results : dict A dictionary where keys are the results e.g. `cost` and `water` """ results = {} raw_results = output.decode('utf-8').split('\n') for result in raw_results[0:2]: values = result.split(',') if len(values) == 2: results[str(values[0])] = float(values[1]) return results
6afddefb13ad40c0f4f89c46a2ca43b4ad1c6af2
103,951
def initialize_weights(y_data, initial_val=1): """ initialize all weights with a value. :param y_data:[xr dataset] y_dataset data. this is used to get the dimensions :param initial_val: [num] a number to initialize the weights with. should be between 0 and 1 (inclusive) :return: [xr dataset] dataset weights initialized with a uniform value """ weights = y_data.copy(deep=True) for v in y_data.data_vars: weights[v].load() weights[v].loc[:, :] = initial_val return weights
1c9ccf86349854cd1000ec6114a84f50f5b5941f
103,954
def commonancestors(*nodes): """ Determine common ancestors of `nodes`. >>> from anytree import Node >>> udo = Node("Udo") >>> marc = Node("Marc", parent=udo) >>> lian = Node("Lian", parent=marc) >>> dan = Node("Dan", parent=udo) >>> jet = Node("Jet", parent=dan) >>> jan = Node("Jan", parent=dan) >>> joe = Node("Joe", parent=dan) >>> commonancestors(jet, joe) (Node('/Udo'), Node('/Udo/Dan')) >>> commonancestors(jet, marc) (Node('/Udo'),) >>> commonancestors(jet) (Node('/Udo'), Node('/Udo/Dan')) >>> commonancestors() () """ ancestors = [node.ancestors for node in nodes] common = [] for parentnodes in zip(*ancestors): parentnode = parentnodes[0] if all([parentnode is p for p in parentnodes[1:]]): common.append(parentnode) else: break return tuple(common)
77b68d3eb2cc5aa27c8b67dca6935cc39cf66163
103,957
from typing import Sequence from typing import Optional def ritem_prefix(items: Sequence[str], prefix: str) -> Optional[str]: """ Args: items (sequence of str): The items to search prefix (str): The prefix to find Returns: The rightmost element that starts with `prefix`, or `None` """ for item in reversed(items): if item.startswith(prefix): return item return None
ef1cd2bcfc1f98fe69f6a166d5d520ddff5b7c1c
103,958
import ast def expr2ast(expr_text): """ Parse the given text as a module, and then return the first element in the body of that module. This is shorthand for the idiom of parsing a single expression, but it doesn't check that the string is a single expression (it could be any number of valid statements) """ return ast.parse(expr_text, mode='exec').body[0]
ee43f16d0247c2c44fcd5a05b32dc601ede08d2a
103,962
def _parse_phone_number(phone_number: str) -> str: """Parse the digits from a string and format as +1xxxxxxxxxx""" phone_number = "".join(filter(str.isdigit, phone_number)) if len(phone_number) == 10: phone_number = f"1{phone_number}" return f"+{phone_number}"
436393c1f256be8aea224cc895c97b29aa989f0d
103,963
from typing import Counter def extract_predicate_token(data, pred_list=[], verbose=True): """ Builds a set that contains the relationship predicates. Filters infrequent tokens. """ token_counter = Counter() total = 0 for img in data: for relation in img['relationships']: predicate = relation['predicate'] if not pred_list or predicate in pred_list: token_counter.update([predicate]) total += 1 tokens = set() token_counter_return = {} for token, count in token_counter.most_common(): tokens.add(token) token_counter_return[token] = count if verbose: print(('Keeping %d / %d predicates with enough instances' % (len(tokens), len(token_counter)))) return tokens, token_counter_return
4b3b65b9777c3ac480f4923193899e3e8308b6a0
103,967
import csv def load_fsd50k_vocabulary(data_path): """Load vocabulary of FSD50K to relate FSD50K labels with AudioSet onthology Args: data_path (str): Path to the vocabulary file Returns: * fsd50k_to_audioset (dict): vocabulary to convert FSD50K to AudioSet * audioset_to_fsd50k (dict): vocabulary to convert from AudioSet to FSD50K """ fsd50k_to_audioset = {} audioset_to_fsd50k = {} with open(data_path, "r") as fhandle: reader = csv.reader(fhandle, delimiter=",") for line in reader: fsd50k_to_audioset[line[1]] = line[2] audioset_to_fsd50k[line[2]] = line[1] return fsd50k_to_audioset, audioset_to_fsd50k
32bc77a4fee70cc25079c4bbaeca14109681e7a0
103,970
def number_of_radial_points(Z): """ select the number of radial grid points for the subintegral around an atom with atomic number Z """ # Hydrogen atom receives an initial quota of 20 points Nr = 20 # Each shell receives an additional 5 points if Z >= 2: Nr += 5 if Z >= 11: Nr += 5 if Z >= 19: Nr += 5 if Z >= 37: Nr += 5 if Z >= 55: Nr += 5 if Z >= 87: Nr += 5 return Nr
3f80a7eb61d8a458ca9b0117211ebf5f1b2a1bea
103,972
import random import string def random_string_of_length(length): """ Generates a random string of ASCII letters of a fixed length. Args: length (int): fixed length desired Returns: string: random string generated """ return "".join(random.choices(string.ascii_letters, k=length))
9614be403fd0a0cf6996fbcdaaa283a53a035ac2
103,973
import requests def fetch_repos( language: str = "", spoken_language_code: str = "", since: str = "daily", ) -> dict: """Fetch trending repositories on GitHub Parameters: language (str, optional): Filtering by language, eg: python spoken_language_code (str, optional): The spoken language, eg: en for english since (str, optional): The time range, choose from: [daily, weekly, monthly]. Defaults to "daily" Returns: A list of dicts containing information for the trending repositories found """ url: str = "https://ghapi.huchen.dev/repositories?" url += "language=" + language url += "&since=" + since url += "&spoken_language_code=" + spoken_language_code res = requests.get(url).json() for repo in res: repo["fullname"] = f"{repo['author']}/{repo['name']}" return res
fa0b8c4a155db8ae7159c0158dfe1ec5a592c559
103,975
def _infidelity(a, b): """Infidelity between two kets.""" return 1 - abs(a.overlap(b))
b76765e61abd9da3c36f0942e6c7096efe0827ae
103,977
def _get_frequency_offset_alias(frequency): """Get pandas offset alias for given frequency.""" if frequency == 'hourly': return '1H' if frequency == 'daily': return '1D' if frequency == 'monthly': return '1MS' if frequency == 'yearly': return '1A' return 'QS-DEC'
afa54d0f2522150f7014cc23238cb1b15b494de4
103,979
def combineTernaryValues(statsValue, transValue): """ both values are in {0, 1, 2} and this function maps them to a unique space of {0, ..., 11} """ return statsValue + (3 * transValue + 3)
95045aab82a8ba4647ccf454c8f6ceea92cd04c3
103,983
def zeros_after(n: int) -> int: """Counts the number of trailing zeros in n Args: n (int): The number to count the zeros of Returns: int: The number of trailing zeros in n """ zeros = 0 for i in str(n)[::-1]: if i != "0": break zeros += 1 return zeros
b0bd855fe170a1026bafbe6d610f6c71a5ffb978
103,986
def _simple_string_value(tree, kind, name): """ base function for extracting a simple parameter value. :param tree: the parameters tree. :param kind: the xml-tag name of the parameter. :param name: the name of the parameter. :returns value: the content of the parameter 'Value' as string.""" query = ".//{kind}[@Name='{name}']/Value".format(kind=kind, name=name) return tree.find(query).text
473919cc08fb0b1fd0b73dcdcdc730affd29daff
103,987
def format_date_url(date, fmt='%Y-%m-%d'): """ Formats a date as a string for making ABE requests. :param {datetime} date: the date to format :param {str} fmt: the format to use (defaults to YYYY-MM-dd) :return: the date in the specified string format """ return date.strftime(fmt)
6f29e82da681efa6d9f2ed30343c4ef02e11d77e
103,988
import hashlib def _hash_file(filename, blocksize=65536): """Hashes the given file. :param filename: Path to file :returns: Hex digest of the md5 checksum """ hasher = hashlib.md5() with open(filename, 'rb') as infile: buf = infile.read(blocksize) while len(buf) > 0: hasher.update(buf) buf = infile.read(blocksize) return hasher.hexdigest()
aecf6518956c1d55aef9eef9200706e421a35f51
103,995
def _height(node): """Return height of tree rooted at node.""" return 1 + max(_height(node.left), _height(node.right)) if node else -1
1f806f12ce644ee76c003cee39fb7c30097eb291
104,001
def isCongruent(a , b , m): """ Checks if a is congruent to b modulo m i.e., a ≡ b (mod m) Parameters ---------- a : int denotes a in equation a ≡ b (mod m) b : int denotes b in eqaution a ≡ b (mod m) m : int denotes m in equation a ≡ b (mod m) return : bool return true if a is congruent to b modulo m otherwise false """ return (a%m) == (b%m)
5c1dddd0bef3c0ffed4279fe02b680414eb7d2d5
104,008
def make_file_safe_api_name(api_name): """Make an api name safe for use in a file name""" return "".join([c for c in api_name if c.isalpha() or c.isdigit() or c in (".", "_", "-")])
8a768ce9e189a79bc1ed3fa8b91218b50cd5aefc
104,009
def get_generated_cols(X_original, X_transformed, to_transform): """ Returns a list of the generated/transformed columns. Arguments: X_original: df the original (input) DataFrame. X_transformed: df the transformed (current) DataFrame. to_transform: [str] a list of columns that were transformed (as in the original DataFrame), commonly self.cols. Output: a list of columns that were transformed (as in the current DataFrame). """ original_cols = set(X_original.columns) current_cols = set(X_transformed.columns) generated_cols = list(current_cols - (original_cols - set(to_transform))) return generated_cols
8332ac6e33e5519bcb83d65b5acf0989d39aff1a
104,015
def extract_metadata_from_query_results(query_results): """ Given a Sparql query result, extract nationality, gender and birthdate :param query_results: :return: """ if query_results["results"]["bindings"]: raw_metadata = query_results["results"]["bindings"][0] gender = raw_metadata['gender']['value'].lower() birth_date = raw_metadata['birthdate']['value'] if "nationality_dbp" in raw_metadata.keys(): nationality = raw_metadata['nationality_dbp']['value'].lower() elif "nationality_dbo" in raw_metadata.keys(): nationality = raw_metadata['nationality_dbo']['value'].lower() else: nationality = "" return birth_date, gender, nationality else: raise ValueError
b277565870b043f9e2f26d223711ae65e49b7314
104,016
def anonymize_reverse_name(name): """ Anonymize reverse name before storing in the database. At most the 3 last labels are left unmasked. All the preceding labels are masked by using "[…]". """ anonymized_name = "" mask = "[…]" if name: unmasked_labels = 3 if name[-1] == ".": unmasked_labels += 1 splitted = name.rsplit(".", unmasked_labels) splitted[0] = mask anonymized_name = ".".join(splitted) return anonymized_name
0824f367e96f6c06aad927b8d7490a38c92af9b2
104,020
def block_renormalization(signal): """Renormalization step of blocking method.""" signal = [0.5 * (signal[2 * i] + signal[2 * i + 1]) for i in range(len(signal) // 2)] return signal
11541647d8e1d5d87409708ba63129bafbbdc804
104,034
def add(x, y): """ This function adds the given integer arguments :param x: integer :param y: integer :return: integer """ return x + y
84b74aa3bc29eb7d8ac6d0987b7e786e18126080
104,035
def prettify_label(label: str) -> str: """Fix parameter label to look nice for plots. Replace underscores with whitespace, TeXify some stuff, remove unnecessary things, etc. Parameters ---------- label : str Original label. Returns ------- str Prettified label. """ return ( label.replace("_", " ") .replace("ttbar", r"$t\bar{t}$") .replace("tW", r"$tW$") .replace("muF", r"$\mu_F$") .replace("muR", r"$\mu_R$") .replace("AR ", "") .replace("RhoT", "Rho T") .replace("hdamp", r"$h_{\mathrm{damp}}$") .replace("DRDS", "DR vs DS") .replace("EffectiveNP", "Eff. NP") .replace("EffNP", "Eff. NP") .replace("ptreweight", r"top-$p_{\mathrm{T}}$-reweight") .replace("MET", r"$E_{\mathrm{T}}^{\mathrm{miss}}$") )
8d3424e9c2715b3fb3d0e5ad236379997c417f42
104,036
import yaml def unmarshal_yaml(yaml_file, replacements={}): """ Unmarshals yaml into a python object. `replacements` allow substituting values in the yaml file. Ex: replacements = {"NAMESPACE", "kubeflow"} metadata: - name: ... - namespace: ${NAMESPACE} will become metadata: - name: ... - namespace: kubeflow """ with open(yaml_file) as file: contents = file.read() for r_key, r_value in replacements.items(): contents = contents.replace(f"${{{r_key}}}", r_value) return yaml.safe_load(contents)
18aa154084054a537c52d19635490a4ee5f0e046
104,037
import re def _convert_number_string(number_string): """ Function to convert mixed number character strings to a number. Mapping: ``{'K': 1000, 'M': 1000000}`` Parameters ---------- number_string : str Number string to be mapped Returns ------- number : int or float Converted number, tries to return a integer if possible. Examples -------- >>> _convert_number_string('64K') 64000 # 64 * 1000 >>> _convert_number_string('0.2M') 200000 # 0.2 * 1000000 """ map_number = {'K': 1000, 'M': 1000000} pure_number = re.sub('[A-Z]', '', number_string) rv = int(pure_number) if pure_number.isdigit() else float(pure_number) for i in number_string: if i.isalpha(): rv *= map_number[i] return rv
1c9afd75bbefd2877d641c3b3cf3397ae0d41c56
104,041
def create_influxdb_points_body(hostname, measurements, timestamp): """ :param str hostname: the hostname :param dict measurements: a mapping of str measurement names to float values :param timestamp: Unix timestamp in seconds :type timestamp: :class:`datetime.datetime` :return: a `list` of `dict`s """ return [ dict( measurement=name, tags=dict( host=hostname, ), time=timestamp, fields=dict( value=value, ) ) for name, value in measurements.items() ]
0d58208ed86b3ee5bd307cf25e2ce9acad5af2d2
104,042
def line_number_in_contents(contents, regex, default=1): """Find the line number where regex first occurs inside contents. If regex has a matching group, the line number of the start of the matching group will be returned. If the regex is not found, 'default' is returned. """ m = regex.search(contents) if not m: return default if m.groups(): startpos = m.start(1) # start of group 1 (the paren section) else: startpos = m.start(0) # start of the entire regexp return contents.count('\n', 0, startpos) + 1
51ebe0a23ebc19bbb997bddf651179088d32a81f
104,044
def crop(image, top, left, height, width): """ This method crops the image according to parameters :param image: image to process :param top: top coordinate of the cropped image :param left: left coordinate of the cropped image :param height: height of the cropped image :param width: width of the cropped image :return: cropped image if succeeded, or None if requested crop exceeds the boundaries of the current image """ cur_shape = image.shape if top + height > cur_shape[0] or left + width > cur_shape[1]: print("Requested crop exceeds the boundaries of the current image") return processed_image = image[top:top + height, left: left + width, ...] return processed_image
b8a4f85ecde6f23a4b6905feb4d6fd3d5bd81036
104,046
def has_latex_attr(x): """ Return ``True`` if ``x`` has a ``_latex_`` attribute, except if ``x`` is a ``type``, in which case return ``False``. EXAMPLES:: sage: from sage.misc.latex import has_latex_attr sage: has_latex_attr(identity_matrix(3)) True sage: has_latex_attr("abc") # strings have no _latex_ method False Types inherit the ``_latex_`` method of the class to which they refer, but calling it is broken:: sage: T = type(identity_matrix(3)); T <type 'sage.matrix.matrix_integer_dense.Matrix_integer_dense'> sage: hasattr(T, '_latex_') True sage: T._latex_() Traceback (most recent call last): ... TypeError: descriptor '_latex_' of 'sage.matrix.matrix0.Matrix' object needs an argument sage: has_latex_attr(T) False """ return hasattr(x, '_latex_') and not isinstance(x, type)
3782dabbced7dfb8571e7512113c738c161f7e31
104,048
import logging def get_completed_assignments(project, workers): """ Get's the completed assignments :param project: (Project) The project to use :param workers: (List<String>) The list of worker usernames to get completed assignments for :return: List<Assignment> The list of completed assignments """ if not workers: workers = project.workers.search() worker_query = "{} in ({})".format(project._worker_schema.user_id, ",".join(["'{}'".format(w) for w in workers])) worker_ids = [w.object_id for w in project.workers.search(where=worker_query)] if not worker_ids: logging.getLogger().info("No assignments completed by specified workers") return [] logging.getLogger().info("Querying source features...") assignment_query = "{} in ({}) AND {} is not NULL".format(project._assignment_schema.worker_id, ",".join(["'{}'".format(w) for w in worker_ids]), project._assignment_schema.completed_date) completed_assignments = project.assignments.search(assignment_query) return completed_assignments
799864d6e23329dc569fc90306cf6570fee0a560
104,050
def job_tasks(conf): # type: (dict) -> list """Get all tasks for job :param dict config: configuration object :rtype: list :return: list of tasks """ return conf['tasks']
60a9cf40228bc2a54c7039fd8b1c5d8161179240
104,059
def unmarshall_entity(entities_str): """ Deserialize entities string to RuntimeEntity list """ entities = [] for entity_str in entities_str.split(';'): splitted = entity_str.split(':') entities.append({'entity': splitted[0], 'value': splitted[0]}) return entities
1b36eb00c7d21153e6baf6f427cb613dea85c9a7
104,064
def cria_posicao(x, y): """ cria_posicao: int, int --> posicao Recebe dois inteiros positivos e devolve uma posição (x, y). """ if not (type(x) == type(y) == int and x >= 0 and y >= 0): raise ValueError('cria_posicao: argumentos invalidos') return [x, y]
43de0f010c7f2bc48dfddf94473879ecb5f88139
104,065
import yaml def load_yaml(data_path): """Reads a yaml file""" with open(data_path, 'r') as f: return yaml.safe_load(f)
11ce97303ce85982a15a5da5badac13d44e9bd6b
104,072
import typing def build_issue_doc(org:str, repo:str, title:str, text:typing.List[str]): """Build a document string out of various github features. Args: org: The organization the issue belongs in repo: The repository. title: Issue title text: List of contents of the comments on the issue Returns: content: The document to classify """ pieces = [title] pieces.append(f"{org.lower()}_{repo.lower()}") pieces.extend(text) content = "\n".join(pieces) return content
56bebe316a7a8787c954eb5e74cc1b0e79f26714
104,074
def game_over(player_decks): """ Determines if either of the player decks are empty and if so, game is over. Parameters: player_decks - Decks for each player Returns: True if either deck is empty, False otherwise """ return_value = False for deck in player_decks: if len(deck) == 0: print("GAME OVER") return_value = True break return return_value
7806a483b4c5decc5b7ab7ff3bd909547da7a3e4
104,078
def pMorphIa( morphology ): """ P(D|Ia) : The probability that one would observe a SN host galaxy to have the given morphology, assuming that the SN is of type Ia Morphology may be specified as a hubble type : [ 'E', 'S0', 'Sa', 'Sb', 'Sbc', 'Sc', 'Scd', 'Irr' ] or using the CANDELS visual classification metrics for spheroid/disk/irregular and mixed morphologies : : [ 's', 'sd', 'd', 'di', 'i' ] RETURNS : P(D|Ia), errPplus, errPminus """ if morphology == 'E' : return( 0.141, 0.021, -0.018 ) elif morphology == 'S0' : return( 0.217, 0.026, -0.023 ) elif morphology == 'Sa' : return( 0.149, 0.022, -0.019 ) elif morphology == 'Sb' : return( 0.177, 0.023, -0.021 ) elif morphology == 'Sbc': return( 0.117, 0.019, -0.017 ) elif morphology == 'Sc' : return( 0.120, 0.019, -0.017 ) elif morphology == 'Scd': return( 0.076, 0.016, -0.013 ) elif morphology == 'Irr': return( 0.003, 0.004, -0.002 ) elif morphology == 's' : return( 0.253, 0.028, -0.025 ) elif morphology == 'sd' : return( 0.255, 0.028, -0.025 ) elif morphology == 'd' : return( 0.353, 0.032, -0.030 ) elif morphology == 'di' : return( 0.103, 0.018, -0.015 ) elif morphology == 'i' : return( 0.035, 0.011, -0.009 ) elif morphology == 'u' : return( 1, 0.1, -0.1 ) else : return( 1, 0.1, -0.1 )
812e86f4022bd57bdf2f796c71d099bd61f4fd1e
104,079
def ensure_is_lst(z): """If a variable is not a list, return a list with only the variable. Args: z (object): input variable Returns: z if it is a list, otherwise [z] """ if isinstance(z, list): return z else: return [z]
f506d19b59994842ab2a24faf3a873ac6d486cbc
104,080
def cli_table_to_recs(table_string): """Takes a table string (like that from a CLI command) and returns a list of records matching the header to values.""" lines = table_string.split('\n') # Take the first line as the header header_line = lines.pop(0).split() route_recs = [] for line in lines: vals = line.split() rec = dict(zip(header_line, vals)) route_recs.append(rec) return route_recs
f773a9b7d481e05084744039bf3c400b48559edc
104,082
def identity(x): """No-op.""" return x
711ff65dc3817b2bc5e58d5f360e118558e54130
104,083
def scoring_matrix_inplace(this_c, that_c, scoring_points): """ Generate a scoring matrix such that matching has the highest value, transitions are penalised, transversions are penalised even more, and gaps are penalised the most. Reminder: A-G, C-T transitions, other combinations ase transversions. """ if this_c == that_c: return scoring_points['M'] if this_c == '_' or that_c == '_': return scoring_points['G'] maxb, minb = max(this_c, that_c), min(this_c, that_c) if (minb == 'A' and maxb == 'G') or (minb == 'C' and maxb == 'T'): return scoring_points['Ti'] else: return scoring_points['Tv']
263909d3680c3af077a05673f51986ebfb6fb153
104,086
import locale def currency_symbol_default(**kwargs): """Returns the currency symbol for the current locale.""" return str(locale.localeconv()['currency_symbol'])
715edd767129642055c4bb6ca42f0925285c0b66
104,089
import json def read_json(filename): """ A function read the json file and process the Neighborhoods and Coordinates Parameter: filename, a json file) Return: result, a dictionary contains key: Neighborhood, value: a list of coordinate corresponding to the that neighborhood """ with open(filename) as f: data = json.load(f) result = {} for feature in data['features']: # NATName = Neighborhood Tabulation Area Name neighborhood = feature['properties']['NTAName'] coordinates = feature['geometry']['coordinates'][0] # 3 layers or 2 layers if len(coordinates[0]) > 2: # 3 layers coordinates = coordinates[0] result[neighborhood] = coordinates return result
90bbc8facdc129b4d2cbfe22227ffe004195db16
104,096
import torch def train(model, iterator, optimizer, criterion, clip): """ TRAINING: At each iteration: get the source and target sentences from the batch, $X$ and $Y$ zero the gradients calculated from the last batch feed the source and target into the model to get the output, $\hat{Y}$ as the loss function only works on 2d inputs with 1d targets we need to flatten each of them with .view we slice off the first column of the output and target tensors as mentioned above calculate the gradients with loss.backward() clip the gradients to prevent them from exploding (a common issue in RNNs) update the parameters of our model by doing an optimizer step sum the loss value to a running total Finally, we return the loss that is averaged over all batches. """ model.train() epoch_loss = 0 for i, batch in enumerate(iterator): # trg is labels, src is input src = batch.src trg = batch.trg optimizer.zero_grad() output = model(src, trg) # trg = [trg len, batch size] # output = [trg len, batch size, output dim] output_dim = output.shape[-1] # ? output = output[1:].view(-1, output_dim) # prediction trg = trg[1:].view(-1) # labels loss = criterion( output, trg ) # calculates both the log softmax as well as the negative log-likelihood of our predictions. loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), clip) # prevent explosion optimizer.step() # gradient descent / adam epoch_loss += loss.item() # sum all the loss return epoch_loss / len(iterator)
5bea27ca9b0ad40f04034ccf0e33109f17c9460a
104,098
def get_eventnames(file): """Return a list of sorted temporal event names in trials.""" trials = file.intervals['trials'] event_names = [] for name in trials.colnames: t = trials[name].data[-1] try: if trials['start_time'][-1] <= t <= trials['stop_time'][-1]: event_names.append(name) except: pass ts = [trials[name].data[-1] for name in event_names] event_names = [name for _, name in sorted(zip(ts, event_names))] return event_names
b0bb8c3e4b844475b63230425750467a63bb2c3d
104,106
import json def read_json(json_loc): """Read a given json file in as a dictionary. Args: json_loc (str): path to the json file to be read in Returns: (dictionary): A dictionary of the json that was read in """ with open(json_loc, "r") as read_file: dictionary = json.load(read_file) return dictionary
c285a6bec47ab3458168a08b2524faaae84ccb05
104,110
def get_signature_bytes(path): """ Reads file from disk and returns the first 262 bytes of data representing the magic number header signature. Args: path: path string to file. Returns: First 262 bytes of the file content as bytearray type. """ with open(path, 'rb') as fp: return bytearray(fp.read(262))
42071810917729898a2942075308e8f4061e5930
104,114
import math def centroid_distance(image_centroid, object_centroid, row): """computes distance between an object centroid and image centroid""" X1 = image_centroid[0] Y1 = image_centroid[1] X2 = object_centroid[row][0] Y2 = object_centroid[row][1] distance = math.sqrt((X1-X2)**2+(Y1-Y2)**2) return distance
f08cbb934980405d5db567cf5579a302066504ad
104,122
def relative_to_absolute_url(path, domain): """Convert relative path to absolute url with current site domain.""" return 'http://{domain}{path}'.format(domain=domain, path=path)
abfa3e966575ca08ebb48285f0f84c9d43ceb438
104,123
import io def scalar_to_param(gmpl_param_name, param_value, isstringio=True): """ Convert a scalar to a GMPL representation of a parameter. :param gmpl_param_name: name for resulting GMPL param value :type gmpl_param_name: str :param param_value: :param isstringio: True (default) to return StringIO object, False to return string :return: GMPL dat code for scalar parameter either as a StringIO object or a string. :rtype: StringIO or str Example: scalar_to_param('n_prds_per_day', 48) --> 'param n_prds_per_day := 48;' """ param = 'param ' + gmpl_param_name + ' := ' + str(param_value) + ';\n' if isstringio: param_out = io.StringIO() param_out.write(param) return param_out.getvalue() else: return param
a3a4363886eea4266733b89cdfab53a828018de9
104,125
import re def github_sanitize_id(x): """ Sanitize an ID by near-GitHub standards (see toc_filter.rb in https://github.com/jch/html-pipeline): * remove punctuation besides hyphens and underscores * change spaces to hyphens * downcase Note that it doesn't: * add unique suffixes (-1, -2, etc.) """ return re.sub(r'[^-\w ]', '', x.lower(), re.U).replace(' ', '-')
18cd99d54a216e6e564591455df20b1b721dabd7
104,126
def report_config_defaults(report_config, test_harness=None): """Creates copy of report_config and sets defaults for missing entries.""" config = report_config.copy() config['report_project'] = report_config.get( 'report_project', 'google.com:tensorflow-performance') config['report_dataset'] = report_config.get('report_dataset', 'benchmark_results_dev') config['report_table'] = report_config.get('report_table', 'result') # Details about where the test was run if test_harness: config['test_harness'] = test_harness else: config['test_harness'] = report_config.get('test_harness', 'unknown') config['test_environment'] = report_config.get('test_environment', 'unknown') config['platform'] = report_config.get('platform', 'unknown') config['platform_type'] = report_config.get('platform_type', 'unknown') config['accel_type'] = report_config.get('accel_type', 'unknown') config['framework_describe'] = report_config.get('framework_describe', 'unknown') return config
018734d5ea178ceb8d409713ab24022bba84b3dd
104,128
import binascii def data_encoder(data, length=None): """Encode unformatted binary `data`. If `length` is given, the result will be padded like this: ``data_encoder('\xff', 3) == '0x0000ff'``. """ s = binascii.hexlify(data).decode('ascii') if length is None: return '0x' + s else: return '0x' + s.rjust(length * 2, '0')
c7be0dbd1bb8cb4162dc159fe9c8b777ad08292a
104,129
def find_irreversible_reactions(model): """Return list of reactions that are irreversible.""" return [rxn for rxn in model.reactions if rxn.reversibility is False]
abc19d82822b89cd28e24ea37b08f70818918d2b
104,134
def create_gql_request(readable_request: str) -> str: """ Function that creates a valid request to use in http_request. args: st (String): A multiline string with a request in a human readable format Returns: A string that is formatted to suit the http_request function """ format_desc = readable_request.splitlines() final = "" for line in format_desc: final += line + '\\n' return final[:-2]
dfbfdcfe2c633213108cf455428cd33641c79dd3
104,140
import json def loadConfig(config_filename): """Loads a configuration file.""" with open(config_filename, 'rb') as f: return json.loads(f.read())
453f203931a390de6c0096362da48e9ed51cb164
104,141
import torch def stack_subsample_frames_no_sync(x, x_lens, stacking=1, subsampling=1): """ Stacks frames together across feature dim, and then subsamples input is batch_size, feature_dim, num_frames output is batch_size, feature_dim * stacking, num_frames / subsampling """ assert stacking == subsampling # x is [B, H, T] x = x.transpose(1, 2) T = x.size(1) padded = torch.nn.functional.pad(x, (0, 0, 0, (stacking - (T % stacking)) % stacking)) B, T, H = padded.size() x = padded.reshape(B, T // stacking, -1) x = x.transpose(1, 2) x_lens = (x_lens.int() + stacking - 1) // stacking return x, x_lens
a6d77a9e1710f3ec156f35660b2d5b76f1ec07b5
104,142
import json def load_json(path, deserializer=None): """Load an object from a JSON file, optionally with a custom deserializer""" with open(path, 'r') as jf: data = json.load(jf, object_hook=deserializer) return data
16e9c98b79aedf48dfba55d21c8811ce1ad0b150
104,145
from typing import Iterable def xy_flatten(iterable: Iterable[Iterable[float]]) -> Iterable[float]: """ Flatten [(x, y)...] to [x, y, ...]. Inverse of xy_iter() Note: This will actually flatten any iterable of iterables, but the intent is just for graphical things like (1, 2) or Point(3, 4). """ return (v for xy in iterable for v in xy)
bc4909b4a490e48050946afe16ebaf64eda0ca18
104,146
def update_transmission_parameters(parameters, compartments_to_update): """ Update parameters with transmission rates for each compartment with altered immunity/sucseptibility to infection """ for compartment in compartments_to_update: parameters.update( { "contact_rate_" + compartment: parameters["contact_rate"] * parameters["rr_transmission_" + compartment] } ) return parameters
fa0a087f08f602a0cc41b88bd8e48b92e9969da5
104,147
def spit(st, tok): """ Split a the given string at the given toekn :param st: string to split :param tok: token to split at :return: split string """ return st.split(tok)
ebb61ff120625728d1c2fa3a4b636a4cf7cc072e
104,148
def _map_dtypes(type_names, field_widths): """ Create dtype string based on column lengths and field type names. Parameters ---------- type_names : list List of type names from file header field_widths : list List of field width values Returns ------- dtypes : list List of dtype for each column in data """ dtypes = [] for i, name in enumerate(type_names): if name == 'int': dtypes.append('i8') elif name == 'double': dtypes.append('f8') elif name == 'char': dtypes.append('a{0}'.format(field_widths[i])) else: raise ValueError('Unexpected type name: {0}.'.format(name)) return dtypes
a223f65414486bc0669962fee1610760e933ffea
104,150
def transform_value(value, subject_number): """Transform value with subject_number.""" return (value * subject_number) % 20201227
ef89b8828b5b35c7ffe408b384b764a124ef87fe
104,157
import string def isprintable(data) -> bool: """ This is a convenience function to be used rather than the usual ``str.printable`` boolean value, as that built-in **DOES NOT** consider newlines to be part of the printable data set (weird!) """ if type(data) is str: data = data.encode("utf-8") return all(c in bytes(string.printable, "ascii") for c in data)
1d2a542ea1d3ebd499363f57911f36858d82e876
104,158