content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def quote(s, always=False): """Adds double-quotes to string if it contains spaces. :Parameters: s : str or unicode String to add double-quotes to. always : bool If True, adds quotes even if the input string contains no spaces. :return: If the given string contains spaces or <always> is True, returns the string enclosed in double-quotes. Otherwise returns the string unchanged. :rtype: str or unicode """ if always or ' ' in s: return '"%s"' % s.replace('"', '""') # VisualBasic double-quote escaping. return s
90a16b92d3369a7f2893be57e9b05462fe5c33b9
65,577
def within(v1, v2, eps): """ Return True if v1 is with eps of v2. All params are numbers """ return abs(v1 - v2) < eps
20ef507e3a86e455a44c7b6a2e71c9fd4941200a
65,582
import pathlib def is_local_path(location: str) -> bool: """ Check if the given location is a local path :param location: holds the location of the schema definition :return: True if the schema is a local path """ try: return pathlib.Path(location).exists() except OSError: # when something goes wrong, the location is most likely not a local file return False
2a31a2bccd2cd5f7174bd12e74f916a48ccf9554
65,587
def get_all_occ(iterable, value): """ Returns a all index occurences of `value` in the `iterable` """ return [ i for i, k in enumerate(iterable) if k == value ]
a1c6db8cc89f65b6c3f704b0fcdc27a9ca59b3c5
65,588
def remove_draw_parameter_from_composite_strategy(node): """Given that the FunctionDef is decorated with @st.composite, remove the first argument (`draw`) - it's always supplied by Hypothesis so we don't need to emit the no-value-for-parameter lint. """ del node.args.args[0] del node.args.annotations[0] del node.args.type_comment_args[0] return node
54fd2824abffa8cde0af85e4293d66b9e19115ea
65,596
def _powerlaw(x0, y0, slope, x): """ From point (x0, y0) and parameter slope, returns y = f(x) such that: > f(x) = a * (x ** slope) > f(x0) = y0 Parameters ---------- x0, y0, slope, x : float Returns ------- y = f(x) : float """ return y0 * ((x/x0) ** slope)
197c64300637b2dff0004842f783d2858a387812
65,597
def unquote_shell_arg(arg): #=============================================================================== """ Return an argument with quotes removed if present. """ sarg = str(arg) if len(sarg) == 0: return sarg quote_char = sarg[-1] if quote_char not in ('"', "'"): return sarg # Deal with a starting quote that might not be at the beginning if sarg[0] == quote_char: return sarg[1:-1] pos = sarg.find(quote_char) if pos == len(sarg) - 1: # No first quote, don't know what else to do but return as is return sarg return sarg[:pos] + sarg[pos+1:-1]
73eed32380e4569381c5aeed3400f5205925be58
65,598
def get_keyframe_range(obj): """ Returns minimal and maximal frame number of the animation associated with the object. """ anim = obj.animation_data if not anim or not anim.action: return [0, 0] res = None for fcu in anim.action.fcurves: for keyframe in fcu.keyframe_points: t = keyframe.co[0] if res is None: res = [t, t] else: if t < res[0]: res[0] = t if t > res[1]: res[1] = t return res
63e1c1917fd5b0c60d7cfa71de6a820a01886ed8
65,601
def gt(x, y): """Implement `gt`.""" return x > y
ceb5d4cf5812a587fc8aaea8f2c5e97b6ac7c973
65,610
def calc_mid_bar(value1, value2, *args): """Calculate percentage of value out of the maximum of several values, for making a bar chart. Return the midpoint between the height of the first and second parameter.""" top = max(args + (value1, value2)) percent = (value1 + value2) / 2 / top * 100 return percent
6d107316469a2bf2f9edbbc8cbff72afccc4e2d3
65,611
import calendar def to_seconds(timestamp): """Convert timestamp into integers since epoch.""" try: return calendar.timegm(timestamp.utctimetuple()) except Exception: return None
c7ad223c9148d25e3bfef047f99b2fb260ef0581
65,612
import collections def _select_targets(y, min_threshold=10, max_threshold=None): """_select_targets. Return the set of targets that are occurring a number of times bounded by min_threshold and max_threshold. """ c = collections.Counter(y) y_sel = [] for y_id in c: if c[y_id] > min_threshold: if max_threshold and c[y_id] < max_threshold: y_sel.append(y_id) else: y_sel.append(y_id) return y_sel
f94a29a04caeff00ab6503b2079528b0a1acaae7
65,613
import pickle def inverse_scale(dir, set, data): """ Transform the stellar properties back to the original data range. Parameters ---------- dir : string Directory where the data scaler is saved. set : string The dataset to transform, either train, validation, or test. data : array_like, floats Scaled stellar property predictions. Returns ---------- predictions : array_like, float Stellar property predictions in the original data range. """ scaler = pickle.load(open(dir+'y_%s_scaler.sav' % set, 'rb')) return scaler.inverse_transform(data.reshape(-1,1)).flatten()
df667114efbc07c92e72a06874aba8e0386b7ecd
65,614
from typing import Dict from typing import Any def subdict(cols: Dict[str, Any], subkeys: list) -> Dict[str, Any]: """Take a dictionary and subset it based on a list of keys. Args: full_dict (Dict[str, Any]): The full dictionary to be subsetted subkeys (list): list of keys to be contained in the subset Returns: Dict[str, Any]: A subsetted dictionary """ return {key: cols[key] for key in subkeys}
72f5ae909909a13dd7c2ec312b3159313bf3b01b
65,617
def get_num_groups(filters): """ Helper to select number of groups for GroupNorm. Select 16 by default (as in paper) Otherwise takes filters//4 so we at least have 4 groups """ return min(filters // 4, 16)
9baeab0bd1a999c7ef6eda1f41fd70c3bb612e7b
65,618
def find_childless_node(digraph): """Finds and returns a childless node in the digraph, or returns None if there are none. """ for node in digraph.nodes(): if node.num_children() == 0: return node return None
42a14ebec2941550643e19b2558d44b5e47444ec
65,626
def genes_to_rwr_tsv(genes): """convert a list of genes to a tsv string for use with RWR tools""" return "".join([f"report\t{gene}\n" for gene in genes])
92c36d31b67c73bc39bd40569c85916bcb8cb183
65,635
def buscar_palabra(texto, palabra, numero): """ Cuenta la cantidad de veces que aparece una palabra en un texto. Esta función también documenta el tipo de las variables (str, int, float). Es opcional hacerlo. Args: texto (str): String en donde hay que buscar la palabra. palabra (str): Palabra a buscar. Returns: int: Un entero que representa a la cantidad de veces que aparece la palabra en el texto. """ return texto.split(sep=" ").count(palabra)
657e5fa4ab5a074a773871805017f3a6d7f2ff67
65,640
def utterance_to_capsules(utterance): """ Transform an Utterance into a list of capsules :param utterance: :return: """ capsules = [] for triple in utterance.triples: capsule = {"chat": utterance.chat.id, "turn": utterance.turn, "author": utterance.chat_speaker, "utterance": utterance.transcript, "utterance_type": triple['utterance_type'], "position": "0-" + str(len(utterance.transcript)), ### "subject": triple['subject'], "predicate": triple['predicate'], "object": triple['object'], "perspective": triple["perspective"], ### "context_id": None, "date": utterance.datetime.isoformat(), "place": "", "place_id": None, "country": "", "region": "", "city": "", "objects": [], "people": [] } capsules.append(capsule) return capsules
6b6cf73a0142f176559fedf1eec2ce5a870af28c
65,641
def CheckTreeIsOpen(input_api, output_api, source_file_filter=None): """Make sure the tree is 'open'. If not, don't submit.""" return input_api.canned_checks.CheckTreeIsOpen( input_api, output_api, json_url='http://chromiumos-status.appspot.com/current?format=json')
b04d4122c8a0e28d63c01086e793a598f492825d
65,642
def _get_size(height): """ Determines the size of the grid for the given height map. """ Lx = 0 Ly = 0 for (x,y) in height: Lx = max(x+1,Lx) Ly = max(y+1,Ly) return Lx,Ly
9f9e0bd8acc78448c721368097521a27ab2cbf5a
65,643
from typing import OrderedDict def times(*combined): """Generate a product of N sets of combinations. times(combine(a=[1,2]), combine(b=[3,4])) == combine(a=[1,2], b=[3,4]) Args: *combined: N lists of dictionaries that specify combinations. Returns: a list of dictionaries for each combination. Raises: ValueError: if some of the inputs have overlapping keys. """ assert combined if len(combined) == 1: return combined[0] first = combined[0] rest_combined = times(*combined[1:]) combined_results = [] for a in first: for b in rest_combined: if set(a.keys()).intersection(set(b.keys())): raise ValueError("Keys need to not overlap: {} vs {}".format( a.keys(), b.keys())) combined_results.append(OrderedDict(list(a.items()) + list(b.items()))) return combined_results
6a8475de9b35f4a931969e304880ebe657a3e6ee
65,644
from typing import List def split_string(input_string: str) -> List[str]: """ Takes a string, splits between commas and returns a list with split components as list elements :param input_string: input """ return input_string.split(",")
0d5156796f552cde7675d03806e9ad4130a3eaea
65,647
def could_overlap(xy_i, swarm, d): """Return a list of all swarm points that could overlap with one point.""" _, y_i = xy_i neighbors = [] for xy_j in swarm: _, y_j = xy_j if (y_i - y_j) < d: neighbors.append(xy_j) return neighbors
9e55738365ef0ebfd305de3673c18e02c09336b8
65,650
def check_nuvs_file_type(file_name: str) -> str: """ Get the NuVs analysis file type based on the extension of given `file_name` :param file_name: NuVs analysis file name :return: file type """ if file_name.endswith(".tsv"): return "tsv" if file_name.endswith(".fa"): return "fasta" if file_name.endswith(".fq"): return "fastq" raise ValueError("Filename has unrecognized extension")
dfd08adc37f121f30c93cc42ac72dce719a473c4
65,659
def get_category_id(youtube_, category): """ Returns the ID of a given category. Args: youtube_: YouTube API client instance category (str): category to get the ID for. Returns: str """ categories = youtube_.videoCategories().list(part="snippet", regionCode="US").execute()["items"] return next((c["id"] for c in categories if c["snippet"]["title"] == category))
c43b830f5dd0025ea3e39a070c41a1c7834a02a4
65,663
def chunks(iterable: list, amount: int): """ Split a list into x chunks :param iterable: List of stuff to chunk :param amount: How many chunks? :return: List of lists """ avg = len(iterable) / float(amount) out = [] last = 0.0 while last < len(iterable): out.append(iterable[int(last):int(last + avg)]) last += avg return out
9eddf0c5de5eb2080615f00bc9b450321fb75e8a
65,665
def get_keystone_token(conf, client): """Gets Keystone Auth token.""" body = { 'auth': { 'passwordCredentials': { 'username': conf.auth.username, 'password': conf.auth.password }, }, } header = {"Content-Type": "application/json", "Accept": "application/json"} response = client.post(url=conf.auth.url, headers=header, data=body) response_body = response.json() return response_body['access']['token']['id']
8506d0b033d12d1cfb0f9501f0aca356760c620f
65,666
def iterate(dataset, dims): """Iterate over all xarray dimensions except specified ones. Parameters ---------- dataset : xarray Dataset Input dataset dims : sequence of strings Dimension to exclude from iteration Returns ------- names : sequence of strings Names of coordinates that are being iterated. iterator : iterator over (key, par, val) key : sequence of values in the same order as returned by "names" val : subset of dataset corresponding to iteration """ names = [p for p in list(dataset.dims) if p not in dims] stacked = dataset.stack(internal_iterator=names) stacked = stacked.transpose('internal_iterator', *dims) def iterator(): for i, p in enumerate(stacked.internal_iterator): key = p.data.tolist() val = stacked.sel(internal_iterator=p).drop('internal_iterator') val = val.assign_coords(**dict(zip(names, key))) yield (key, val) return names, iterator()
9d89ce0748717f913e3047a75b2cf78431feacf1
65,669
def decode_entity(x, mask): """Decode sequences of entities from weight matrix Args: x (torch.Tensor): output with shape (B, T, num_entities) mask (torch.BoolTensor): (B, T) Returns: (list[list[int]]): best sequences of entities of this batch, representing in indexes (B, *) """ first_invalid = mask.sum(1) # (B,) preds = x.argmax(dim=-1) # (B, T) path = [preds[i].data[:first_invalid[i].item()].tolist() for i in range(preds.shape[0])] # (B, *) return path
f15d7e45aa08da3c5947fbef27eea6689d9fe57a
65,670
def calc_asue_el_th_ratio(th_power): """ Estimate Stromkennzahl according to ASUE 2015 data sets Parameters ---------- th_power : float Thermal power in Watt Returns ------- el_th_ratio : float Current el. to th. power ratio (Stromkennzahl) """ el_th_ratio = 0.0799 * th_power ** 0.1783 return el_th_ratio
cfd86c6bab5626ae56047ae4838eb87c0387025f
65,674
import re def _detect_non_english(text): """Function that detects if there's non-english characters in text Args: text (string): text of a news article Returns: boolean: True if there's non-english characters exist """ # korean if re.search("[\uac00-\ud7a3]", text): return True # japanese if re.search("[\u3040-\u30ff]", text): return True # chinese if re.search("[\u4e00-\u9FFF]", text): return True # arabic if re.search("[\u0600-\u06FF]", text): return True # devanagari (hindi) if re.search("[\u0900-\u097F]", text): return True return False
de875ad93bd0c736e97ea5173775216ca15c0fd3
65,676
import collections def extract_duplicates(lst): """ Return all items of a list that occur more than once. Parameters ---------- lst: List[Any] Returns ------- lst: List[Any] """ return [item for item, count in collections.Counter(lst).items() if count > 1]
890e1004ae7d6cf5b2eb7d62f848097981ee2bd2
65,680
def get_lvl(ob): """ Returns the number of parents of a given object, or, in other words, it's level of deepness in the scene tree. Parameters ---------- ob : bpy.types.object Blender object Returns ------- lvl : int The number of parents of the object. For 'None' -1 is returned. """ lvl = -1 while ob: ob = ob.parent lvl += 1 return lvl
21fc286110251d82440aa311c42e1d4c769f3ae3
65,682
def calc_transpose(func, in_data, **kwargs): """[Transpose](https://docs.chainer.org/en/v4.3.0/reference/generated/chainer.functions.transpose.html) Transpose operation is just copying memory with no FLOPs. | Item | Value | |:-------|:------| | FLOPs | $$ 0 $$ | | mread | $$ \| x \| $$ | | mwrite | $$ \| x \| $$ | | params | `axes`: transpose axes | """ x, = in_data return (0, x.size, x.size, {'axes': func.axes})
8889b575db3e00e35f932ce955458e4455a75a62
65,690
def set_up_iter(it): """ defined the time step for the first iteration and sets total simulation time at start to 0 s Arguments --------------- it number of iterations Results --------------- dt0 first time step t_passed initialize time control """ iter_max = it dt0 = 0.01 t_passed = 0 return iter_max, dt0, t_passed
91955ff335513223af40e8b636714433c8b5b827
65,694
def gpio_pin(port, pin): """ GPIO pin definition. """ return ("pin", (port, pin))
99b24cd15d3f6d1895e3c1952244cd8f11c3a905
65,696
from typing import Optional def ema(x: float, mu: Optional[float] = None, alpha: float = 0.3) -> float: # taken from https://github.com/timwedde/rich-utils/blob/master/rich_utils/progress.py """ Exponential moving average: smoothing to give progressively lower weights to older values. Parameters ---------- x : float New value to include in EMA. mu : float, optional Previous EMA value. alpha : float, optional Smoothing factor in range [0, 1], [default: 0.3]. Increase to give more weight to recent values. Ranges from 0 (yields mu) to 1 (yields x). """ return x if mu is None else (alpha * x) + (1 - alpha) * mu
3ace775b50744aff39747938858b42ae88c9191b
65,701
def exec(container, command, out=True): """ Run one or more commands inside a docker container. Paremters --------- container : docker container command : str or list of commands. Multiple commands are joined with ' && ' out : print the output from running the commands """ # convert to list if not isinstance(command, list): command = [command] # run the command command = ' && '.join(command) result = container.exec_run(f"sh -c '{command}'") # show the if out: print(result.output.decode()) return result.exit_code, result.output.decode()
c6d52cbba87c73ef71cb1516f4e66ed708367dc4
65,708
def processWpaReturn(output): """ Extract AP info from wpa_supplicant output Args: output, string by bytes Returns: A list of dictionary with 3 fields: "ssid", "quality" and "signal" """ lines = output.split('\n') length = len(lines) AP_info = [] if length > 1: for i in range(1, length): # Start from the second line of output fields = lines[i].split('\t') if len(fields) < 5: continue bssid = fields[0] signal = fields[2] ssid = fields[-1] frequency = fields[1] AP = {'bssid': bssid, 'signal': signal, 'ssid': ssid+ '(' + frequency + ')'} AP_info.append(AP) else: print("Networks not detected.") return AP_info
be042eea3e3809b53be484d42537d6d47b3d4885
65,710
from bs4 import BeautifulSoup def parse_body(message): """Parses text body from email message object with BeautifulSoup. Parameters: message (email.Message object): Loaded email with Python standard library email module. Returns: body (str): Email text body. """ body = '' if message.is_multipart(): for part in message.walk(): content_type = part.get_content_type() content_disposition = str(part.get('Content-Disposition')) if (content_type in ['text/html', 'text/txt'] and 'attachment' not in content_disposition): body = part.get_payload(decode=True) break else: body = message.get_payload(decode=True) return BeautifulSoup(body, 'html5lib').get_text()
d53b51f9dfc4af3c37b460f1091df0061fbe16b5
65,712
def _is_mxp_header(line): """Returns whether a line is a valid MXP header.""" line = line.strip() return (len(line) > 2 and line.startswith('*') and line.endswith('*') and line[1:-1].strip().lower().startswith('exported from'))
16e070f1ec2cc01e78cfc7c341336dbe15621206
65,714
def expand_corepath(pathname, session=None, node=None): """ Expand a file path given session information. :param str pathname: file path to expand :param core.emulator.session.Session session: core session object to expand path with :param core.nodes.base.CoreNode node: node to expand path with :return: expanded path :rtype: str """ if session is not None: pathname = pathname.replace("~", "/home/%s" % session.user) pathname = pathname.replace("%SESSION%", str(session.id)) pathname = pathname.replace("%SESSION_DIR%", session.session_dir) pathname = pathname.replace("%SESSION_USER%", session.user) if node is not None: pathname = pathname.replace("%NODE%", str(node.id)) pathname = pathname.replace("%NODENAME%", node.name) return pathname
64bb3d3a09cef8eaa9fdd6753938aa63c6e8f6e0
65,715
from typing import Dict import yaml def load_config(path: str) -> Dict: """Loads YAML file Args: path (str): path to yml file Returns: Dict: yaml config """ print(f"Loading parameters from {path}.") with open(path, "r") as stream: try: config = yaml.safe_load(stream) except yaml.YAMLError as exc: print(exc) quit() return config
2856441b09b0213fcd0e2249a0fe702364aaf45a
65,717
def humanize_seconds(seconds: int): """Convert seconds to readable format.""" seconds = int(seconds) days = seconds // 86400 hours = (seconds - days * 86400) // 3600 minutes = (seconds - days * 86400 - hours * 3600) // 60 seconds = seconds - days * 86400 - hours * 3600 - minutes * 60 result = ( ("{0} day{1}, ".format(days, "s" if days != 1 else "") if days else "") + ("{0} hour{1}, ".format(hours, "s" if hours != 1 else "") if hours else "") + ("{0} minute{1}, ".format(minutes, "s" if minutes != 1 else "") if minutes else "") + ("{0} second{1} ".format(seconds, "s" if seconds != 1 else "") if seconds else "") ) return result
80d195647b093a028123d2bb0986fef0e297bdc4
65,719
def modules_tmpdir(tmpdir, monkeypatch): """Add a temporary directory for modules to sys.path.""" tmp = tmpdir.mkdir('tmp_modules') monkeypatch.syspath_prepend(str(tmp)) return tmp
3d5aeb2fdb502f01ecdf196fd7bfc401e237e83f
65,722
def _get_ordering(actual, desired): """Find an ordering of indices so that desired[i] == actual[ordering[i]]. Parameters ---------- actual, desired : string, same length Two strings differring only in the permutation of their characters. Returns ------- ordering : list of int A list of indices into `actual` such that ``desired[i] == actual[ordering[i]]``. Examples -------- >>> actual = "XYCZT" >>> desired = "TZYXC" >>> _get_ordering(actual, desired) [4, 3, 1, 0, 2] """ ordering = [] for elem in desired: ordering.append(actual.find(elem)) return ordering
617c6ed428ba23ac258acd7851f0f3c5dc7386ad
65,725
def get_queue_for_pilot(resources): """ Determine which queue this pilot resource belongs in. Args: resources (dict): resources dict Returns: str: queue name """ if resources['gpu']: return 'gpu' elif resources['memory'] >= 8: return 'memory' else: return 'default'
565b588c69cb90bc3c3b13f522d4caf87211ad74
65,730
def get_AllAlternativeImages(self, page=True, region=True, line=True, word=True, glyph=True): """ Get all the ``pc:AlternativeImage`` in a document Arguments: page (boolean): Get images on ``pc:Page`` level region (boolean): Get images on ``pc:*Region`` level line (boolean): Get images on ``pc:TextLine`` level word (boolean): Get images on ``pc:Word`` level glyph (boolean): Get images on ``pc:Glyph`` level Returns: a list of :py:class:`AlternativeImageType` """ ret = [] if page: ret += self.get_AlternativeImage() for this_region in self.get_AllRegions(['Text']): if region: ret += this_region.get_AlternativeImage() for this_line in this_region.get_TextLine(): if line: ret += this_line.get_AlternativeImage() for this_word in this_line.get_Word(): if word: ret += this_word.get_AlternativeImage() for this_glyph in this_word.get_Glyph(): if glyph: ret += this_glyph.get_AlternativeImage() return ret
ff45f9ffb1af2e06f831e9be7721af39c5ed14ce
65,733
def gen_program_id(program_obj): """ Generates the Elasticsearch document id for a Program Args: program_obj (Program): The Program object Returns: str: The Elasticsearch document id for this object """ return "program_{}".format(program_obj.id)
ba20d6b7ddacee40b61ba7a284add03ac37bbba8
65,735
def find_thumbnail(filename): """Return a string containing regexp for thumbnail file. Regexp consists of filename without file extension and trailing underscore. Assume: secure_filename by Werkzeug rename uploads so that file extension is always present. """ return '.'.join(filename.split('.')[:-1]) + '_'
e2fbb9d81388c237a17283fb1ed93ee15bfc6de3
65,736
import requests def grid(url, resource='/grid'): """Return grid definitions. Args: url (str): protocol://host:port/path resource (str): /the/grid/resource (default: /grid) Returns: dict Example: >>> chipmunk.grid(url='http://host:port/path) [{"name":"tile", "proj":null, "rx":1.0, "ry":-1.0, "sx":150000.0, "sy":150000.0, "tx":2565585.0, "ty":3314805.0}, {"name":"chip", "proj":null, "rx":1.0, "ry":-1.0, "sx":3000.0, "sy":3000.0, "tx":2565585.0, "ty":3314805.0}] """ url = '{}{}'.format(url, resource) return requests.get(url=url).json()
721e798d9029924395c90e5bcda42b56f1352247
65,743
def lulc_area_sql(grid_name, lulc_name): """ Returns a sql statement that intersects a grid with the LULC table and sums the areas Args: grid_name (string): the PostGIS table name for the grid lulc_name (string): the PostGIS table name for the LULC table Returns: sql (string): a sql statement """ sql = ("SELECT grid.i, " "grid.j, " "SUM(CASE WHEN pixel = 1 " "THEN ST_Area(ST_Intersection(grid.way, lulc.way)) " "ELSE 0 END) AS Agriculture, " "SUM(CASE WHEN pixel = 2 " "THEN ST_Area(ST_Intersection(grid.way, lulc.way)) " "ELSE 0 END) AS Bare, " "SUM(CASE WHEN pixel = 3 " "THEN ST_Area(ST_Intersection(grid.way, lulc.way)) " "ELSE 0 END) AS Forest, " "SUM(CASE WHEN pixel = 4 " "THEN ST_Area(ST_Intersection(grid.way, lulc.way)) " "ELSE 0 END) AS Grassland, " "SUM(CASE WHEN pixel = 5 " "THEN ST_Area(ST_Intersection(grid.way, lulc.way)) " "ELSE 0 END) AS Shrubland, " "SUM(CASE WHEN pixel = 6 " "THEN ST_Area(ST_Intersection(grid.way, lulc.way)) " "ELSE 0 END) AS Frozen, " "SUM(CASE WHEN pixel = 7 " "THEN ST_Area(ST_Intersection(grid.way, lulc.way)) " "ELSE 0 END) AS Urban, " "SUM(CASE WHEN pixel = 8 " "THEN ST_Area(ST_Intersection(grid.way, lulc.way)) " "ELSE 0 END) AS Water, " "grid.way FROM %s AS grid, " "%s AS lulc " "WHERE ST_Intersects(grid.way, lulc.way) " "GROUP BY grid.i, grid.j, grid.way ORDER BY grid.j, grid.i ASC") sql = sql % (grid_name, lulc_name) return sql
f274a9f5056db38e40141a201b98294d962aea57
65,750
def build_lm_labels(sequence, pad_token_id): """ Padding token are replaced by the value -1 so they are not taken into account in the loss computation. """ padded = sequence.clone() padded[padded == pad_token_id] = -1 return padded
f500142fc5582f3e4afcf4fd15141b872a284f15
65,753
def create_upstream_index( df, downstream_col="downstream", upstream_col="upstream", exclude=None ): """Create an index of downstream ids to all their respective upstream ids. This is so that network traversal can start from a downstream-most segment, and then traverse upward for all segments that have that as a downstream segment. Parameters ---------- df : DataFrame Data frame containing the pairs of upstream_col and downstream_col that represent the joins between segments. downstream_col : str, optional (default "downstream") Name of column containing downstream ids upstream_col : str, optional (default "upstream") Name of column containing upstream ids exclude : list-like, optional (default None) List-like containing segment ids to exclude from the list of upstreams. For example, barriers that break the network should be in this list. Otherwise, network traversal will operate from the downstream-most point to all upstream-most points, which can be very large for some networks. Returns ------- dict dictionary of downstream_id to the corresponding upstream_id(s) """ ix = (df[upstream_col] != 0) & (df[downstream_col] != 0) if exclude is not None: ix = ix & (~df[upstream_col].isin(exclude)) # NOTE: this looks backward but is correct for the way that grouping works. return ( df[ix, [downstream_col, upstream_col]] .set_index(upstream_col) .groupby(downstream_col) .groups )
f3895c54d190af7beec1deda34813956278f8f57
65,755
def make_ngrams(tokens, N): """ Returns a list of N-long ngrams from a list of tokens """ ngrams = [] for i in range(len(tokens)-N+1): ngrams.append(tokens[i:i+N]) return ngrams
2dbeee18a31d6f0f745b03c310a29a2bd87ec535
65,758
def query_string(query_dict): """Convert a dictionary into a query string URI. Args: query_dict (dict): Dictionary of query keys and values. Returns: str: Query string, i.e. ?query1=value&query2=value. """ queries = [ '{0}={1}'.format(key, query_dict[key]) for key in query_dict.keys() ] queries_string = '&'.join(queries) return '?{0}'.format(queries_string)
5f2131b7b3c5448528a08414de03a64ea0226e72
65,765
import torch def huber_loss(x): """The classical robust Huber loss, with first and second derivatives.""" mask = x <= 1 sx = torch.sqrt(x) isx = torch.max(sx.new_tensor(torch.finfo(torch.float).eps), 1/sx) loss = torch.where(mask, x, 2*sx-1) loss_d1 = torch.where(mask, torch.ones_like(x), isx) loss_d2 = torch.where(mask, torch.zeros_like(x), -isx/(2*x)) return loss, loss_d1, loss_d2
b2ea6c562545be5096da200b0c1797fb16c46be4
65,769
import importlib def get_attribute(module_name: str, attribute_name: str): """ Get the specified module attribute. It most cases, it will be a class or function. :param module_name: module name :param attribute_name: attribute name :return: module attribute """ assert isinstance(module_name, str) assert isinstance(attribute_name, str) _module = importlib.import_module(module_name) return getattr(_module, attribute_name)
bddae4a14e77bccc8818ba588117f1492ea66620
65,777
def get_conflicting_domains( desired_domains, hmc_access_mode, adapter, partition, all_crypto_config, all_partitions): """ Internal function that determines those domains from the desired domains on a particular adapter that cannot be attached to a particular partition in the desired mode because they are already attached to other partitions in a mode that prevents that. """ conflicting_domains = {} if adapter.uri in all_crypto_config: domains_dict = all_crypto_config[adapter.uri] for di in desired_domains: if di in domains_dict: # The domain is already attached to some # partition(s) in some access mode for am, p_uri in domains_dict[di]: if am == 'control': # An attachment in control mode does not # prevent additional attachments continue if p_uri == partition.uri and \ am == hmc_access_mode: # This is our target partition, and the # domain is already attached in the desired # mode. continue p = all_partitions[p_uri] conflicting_domains[di] = (am, p.name) return conflicting_domains
08aaf58df8eea1054341f2dcbff93f143edd78ec
65,779
def fizzbuzz(a, b, n): """Takes integers a, b, and n. Returns a string respective to the fizzbuzz specification listed in the header. """ if n % (a*b) == 0: return 'FB' elif n%a == 0: return 'F' elif n%b == 0: return 'B' else: return str(n)
a00b7681a4c09328d4fac33f27a357f30ccc764a
65,784
def week_delta(start, stop): """ Takes two datetime objects and returns number of weeks between them :param datetime.datetime start: :param datetime.datetime stop: :return int: """ delta = (stop - start) / 7 return delta.days
b20105aaf2a9df4b42a86edeb4a6de1a0c89fbbd
65,788
def new_overlay_size(overlay, ratio): """ Determines an appropriate size for the `overlay`, given the image in `start`. Currently the overlay will be 1/5 of the width, or 1/25 of the total size. Returns a tuple containing the new dimensions. """ overlay_new_width = int(overlay.size[0] * ratio) overlay_new_height = int(overlay.size[1] * ratio) return (overlay_new_width, overlay_new_height)
a825ee290594a2d089dc7129741747b758bf21f9
65,789
import torch def rollout(tensor): """ tensor: B x C x .... -> B * C x ... """ shape = tensor.shape new_shape = [shape[0] * shape[1]] if len(shape) > 2: new_shape += shape[2:] return torch.reshape(tensor, new_shape)
264b3a6e52f439e1c6fc3d8ad38a54ffb4201df5
65,790
import requests import gzip from bs4 import BeautifulSoup def href_in_gz(link): """ Returns all application hyperlinks in the xml.gz link - link: the xml.gz link to get all application hyperlinks >>> link = 'https://apkpure.com/sitemaps/art_and_design-2.xml.gz' >>> xml_hrefs = href_in_gz(link) >>> xml_hrefs[0] 'https://m.apkpure.com/art-what/com.tradefwd.ArtWhat' """ resp = requests.get(link) data = gzip.decompress(resp.content) soup = BeautifulSoup(data, features='lxml') xml_hrefs = [l.get_text().replace('apkpure.com', 'm.apkpure.com') for l in soup('loc')] return xml_hrefs
d4af69d7c38cd24c3324535dfdcc989732b2ff35
65,791
def beta_exp(a, b): """ Expected value of beta distro https://en.wikipedia.org/wiki/Beta_distribution """ return a / (a + b)
92a10f1e091fbb857b513995a4ad628111e7f71d
65,794
def truncate(text="", max_len=50): """ Ensure a string complies to the maximum length specified. :param text: Text to be checked for length and truncated if necessary :type text: str :param max_len: Max length of the returned string :type max_len: int, optional :return: Text in :param text: truncated to :param max_len: if necessary :rtype: str """ return text if len(text) < max_len else text[:max_len]
c5522d7d19b397a317cb6eccbc6cb71fd1316476
65,795
def StringifyBuildEntry(entry): """Pretty print a build entry. Args: entry: a build entry from MakeBuildEntry. Returns: A printable string. """ return '%s %s %d: %s' % (entry['build_id'], entry['builder_name'], entry['build_number'], ' '.join(entry.get('suite_ids', [])))
c38cc10860d130d67e4ff3ffcf514f653546afa2
65,796
def _make_square(x, y, w, h): """Force the x, y slices into a square by expanding the smaller dimension. If the smaller dimension can't be expanded enough and still fit in the maximum allowed size, the larger dimension is contracted as needed. Args: x, y: slice objects w, h: the (width, height) of the maximum allowed size Returns: x and y slices that define a square """ ws = x.stop - x.start hs = y.stop - y.start dx = hs - ws if dx < 0: return _make_square(y, x, h, w)[::-1] # subimage is now always skinny def pad(z, dz, zmax): dz1 = int(0.5 * dz) dz2 = dz - dz1 ddz = max(0, dz1 - z.start) - max(0, z.stop + dz2 - zmax) return slice(z.start - dz1 + ddz, z.stop + dz2 + ddz) dy = min(0, w - dx - ws) return pad(x, dx + dy, w), pad(y, dy, h)
5cc5452d773cecb5e81650b7a2504b741dc374ba
65,798
from typing import SupportsFloat def is_num(obj: SupportsFloat) -> bool: """Return True if `obj` can be casted to float.""" try: float(obj) except ValueError: return False return True
2c6b31c19a69ac87c14c11cdb2978d89bbf7aa62
65,799
def option_menu() -> int: """ Ask user to choose option. :precondition: input must be a number that corresponds with an option :postcondition: will return the user's choice as an int :return: input as an int """ while True: print("Please select an option from the following menu.") try: return int(input(""" 1. Global Statistics 2. Information about my Country 3. Search by Country 4. News Articles 5. Search Stocks 6. Am I Eligible for the Canadian Emergency Response Benefit Funding? 7. Show effect of COVID-19 on DOW Jones Index 8. Quit \n""").strip()) except ValueError: print("Please input a number that corresponds to an option on the menu.")
f1e5774fb50dc3722bb6f0684bbdec21113d96d3
65,803
def _parse_line(line): """ Parses a line into key, value tuple :return key, value tuple """ if line: tokens = line.split('=', 2) if len(tokens) == 2: return tokens[0].strip(), tokens[1].strip() return None, None
9e7b731e435f115d28ef3601cbfec97071af83b4
65,806
from typing import Union from pathlib import Path from typing import Optional from typing import Dict from typing import List def get_csv_counts( file: Union[str, Path], separator: Optional[str] = None ) -> Dict[int, List[str]]: """ Get groups of semantically consistsent csv lines. i.e. same number of commas Parameters ---------- file : a string, pathlib.Path to used within a call to `open(file, "r") as f`. separator: (optional) a string indicating the token that is to be taken as column delimiter. it defaults to "," Returns ------- A dictionnary containing the number of commas as keys and the lines which have said number of commas. i.e. { 3: [ "x1,x2,x3", "11,21,31", "12,22,33", ], 2: [ "extrainfo,date", "none,2020-05-05" ] } """ _separator: str = separator or "," with open(file, "r") as file_reader: lines: List[str] = file_reader.readlines() csv_counts: Dict[int, List[str]] = {} for line in lines: n_commas: int = line.count(_separator) if n_commas in csv_counts.keys(): csv_counts[n_commas].append(line) else: csv_counts.update({n_commas: [line]}) return csv_counts
eb28973f9b3d31d363814332c29448a15bb3319e
65,808
def resize_string(in_str, target_size, filler_char='a'): """ Resizes a string to its new size. """ new_string = in_str[:target_size] new_string += filler_char * (target_size - len(new_string)) return new_string
af54b2cf6a4874fc705c014db0963884e30c1326
65,818
def gamma_encode(x): """Converts RGB data to `viewable values <https://en.wikipedia.org/wiki/Gamma_correction>`_.""" return x**(1/2.2)
b4ff3636be55e4ea21f30f8bc5af3b92ab6745d5
65,823
from typing import Any import requests def get_url(url: str, params: Any = None, proxies: Any = None) -> str: """ Request for given url and return text :param url: url :param params: parameter series :param proxies: use proxies or not, default None :return: text of content in the given url """ rsp = requests.get(url, params=params, proxies=proxies) rsp.raise_for_status() return rsp.text
3a2b405d9ac87eb38e29342ebded6b2d155c92e3
65,824
from pathlib import Path from typing import Dict from typing import Any from typing import IO import json from typing import OrderedDict def read_json(fname: Path) -> Dict[str, Any]: """ Reads JSON file. Parameters ---------- fname : pathlib.Path File path to the JSON file in question. Returns ------- dict of {str, Any} JSON contents. """ handle: IO with fname.open("rt") as handle: return json.load(handle, object_hook=OrderedDict)
9dca60f94d1c48331ce54f6fcb687c5d28e6351d
65,826
def validate_ascii_domain(domain_str): """ Validates ASCII domain str is compliant :param domain_str: :return: True for Compliant, False for non-compliant. """ domain_chars = set('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_.') return set(domain_str).issubset(domain_chars)
f0f94548dd28dbdeccae41b7e2e786f359d1a622
65,827
def process_primary_inputs(dict_): """ This functions processes the parameters specified by the user in the initialization dictionary. Parameters ---------- dict_: dict Estimation dictionary. Returned by grmpy.read(init_file). Returns ------- bins: int Number of histogram bins used to determine common support. logit: bool Probability model for the choice equation. If True: logit, else: probit. bandwidth: float Kernel bandwidth for the local polynomial regression. gridsize: int Number of equally-spaced grid points of u_D over which the MTE shall be estimated. startgrid: int Start point of the grid of unobservable resistance (u_D), over which the MTE is evaluated. end_grind: int End point of the grid of unobservable resistance (u_D), over which the MTE is evaluated. """ try: dict_["ESTIMATION"]["bins"] except KeyError: bins = 25 else: bins = dict_["ESTIMATION"]["bins"] try: dict_["ESTIMATION"]["logit"] except KeyError: logit = True else: logit = dict_["ESTIMATION"]["logit"] try: dict_["ESTIMATION"]["bandwidth"] except KeyError: bandwidth = 0.32 else: bandwidth = dict_["ESTIMATION"]["bandwidth"] try: dict_["ESTIMATION"]["gridsize"] except KeyError: gridsize = 500 else: gridsize = dict_["ESTIMATION"]["gridsize"] try: dict_["ESTIMATION"]["ps_range"] except KeyError: prop_score_range = [0.005, 0.995] else: prop_score_range = dict_["ESTIMATION"]["ps_range"] start_grid = prop_score_range[0] endgrid = prop_score_range[1] return bins, logit, bandwidth, gridsize, start_grid, endgrid
0fa412acd732530e36f43bfeb0ac75768c257d30
65,830
import yaml def transformation_results_yaml(topology,ignore=('addressing','defaults','nodes_map','includes')): """ Return the results of topology transformation YAML format * Remove elements that are not relevant for comparison * Create YAML text out of the remaining dictionary """ for k in ignore: topology.pop(k,None) """ Temporary: replace interfaces list within links with dictionary of node interfaces if 'links' in topology: for l in topology.links: for n in l.get('interfaces',[]): node = n.node n.pop('node',None) l[node] = n l.pop('interfaces',None) """ """ Temporary: rename node interfaces to links for n in topology.nodes.values(): if 'interfaces' in n: n.links = n.interfaces n.pop('interfaces',None) """ """ If we're using a dictionary extension that has to_yaml method use that, otherwise use pyyaml (hoping it won't generate extraneous attributes) """ if callable(getattr(topology,"to_yaml",None)): return topology.to_yaml() else: return yaml.dump(topology)
90ae8e6420c85b91fc7050dac9972d82bc348def
65,836
def getImName(image_path,folder_path): """ get image name by clean up image path Parameters: -image_path: path of image in the folder -folder_path: path of folder that contain images Returns: -name: name of the image """ name = image_path.replace(folder_path.replace("/*.jpg","")+"\\","") print(name) return name
e1b6a432d06f7c45d0a7408b52fcfc6464da2b07
65,848
def average_heart_rate(heart_rates): """Calculate the average of a list of heart rates Args: heart_rates (list): a list of heart rates Returns: float: the average heart rate """ # Compute sum and length of the input list hr_sum = sum(heart_rates) hr_length = len(heart_rates) return hr_sum/hr_length
0f8fbc1643306e42bed463528995894e1d105998
65,851
def difference(li1, li2): """Difference of two lists.""" return list(list(set(li1)-set(li2)) + list(set(li2)-set(li1)))
c2e9b30c5eeed330f7a10531c4269c6aa43e3ab7
65,856
def unescape(inp, char_pairs): """Unescape reserved characters specified in the list of tuples `char_pairs` Parameters ---------- inp : str Input string Returns ------- str Unescaped output See also -------- escape_GFF3 """ for repl, char_ in reversed(char_pairs): inp = inp.replace(char_, repl) return inp
ae3ad4e66ed68218e4612fa0c40f4d4a71266ace
65,858
def response_parser(response): """ Parse response from json to dict. If an error in the response data an exception is raised """ try: data = response.json() except ValueError: raise Exception( "'response' is not JSON serializable: '{}'".format(response.text) ) if "error" in data: raise Exception("{error}: {description}".format( error=data["error"], description=data["error_description"] )) return data
b945d9c1d8da29cfc15bf44ace66a2477ce2cc95
65,861
def get_inputs( filename ): """ The input file contains the starting decks of each player. This function returns a list of lists of their decks. """ with open( filename, 'r' ) as input_file: raw_data = input_file.read().split('\n\n') decks = [] for num, raw_deck in enumerate(raw_data): deck = [int(card) for card in raw_deck.splitlines()[1:] ] decks.append( deck ) return decks
26450413e585523881fd66a92410940772761926
65,862
def printNamespaces(params: dict = {}) -> str: """Get string of list of namespaces Args: params: dict of params containing the namespaces Returns: str: string of namespaces """ namespaces = params["namespaces"] res: str = "" for uri in namespaces: if uri[-1] != "#": res += "@prefix " + namespaces[uri] + ": <" + uri + "#>.\n" else: res += "@prefix " + namespaces[uri] + ": <" + uri + ">.\n" return res
8bfe6e977bc47a07bb58643fdd1b2d572e3e6277
65,863
import copy def flip(info): """ Flip horizontally a heatmap info :param info: dict, heatmaps info :return: dict, heatmaps info (copy) """ info = copy.deepcopy(info) for cwh in info['cwh_list']: c_x, c_y = cwh['center'] cwh['center'] = (info['img_width'] - c_x, c_y) return info
dbcd585aa88f2739e98139564b390d6b91f19116
65,866
def find_subset_sum(values, target): """ Find a subset of the values that sums to the target number This implements a dynamic programming approach to the subset sum problem Implementation is taken from: https://github.com/saltycrane/subset-sum/blob/master/subsetsum/stackoverflow.py @param values: List of integers @param target: The sum that we need to find a subset to equal to it. """ def g(v, w, S, memo): subset = [] id_subset = [] for i, (x, y) in enumerate(zip(v, w)): # Check if there is still a solution if we include v[i] if f(v, i + 1, S - x, memo) > 0: subset.append(x) id_subset.append(y) S -= x return subset, id_subset def f(v, i, S, memo): if i >= len(v): return 1 if S == 0 else 0 if (i, S) not in memo: # <-- Check if value has not been calculated. count = f(v, i + 1, S, memo) count += f(v, i + 1, S - v[i], memo) memo[(i, S)] = count # <-- Memoize calculated result. return memo[(i, S)] # <-- Return memoized value. memo = dict() result, _ = g(values, values, target, memo) return result
93b3062c710d617a20bd065e6d43c072dd7ad2fa
65,869
from pathlib import Path def _write_source_to_file(path: Path, source: str) -> str: """Write source code to a file, returning a string of its path.""" with open(path, "w", encoding="UTF-8") as file: file.write(source) return str(path)
39548faed7343b4310daa2e1cc67e9d9eac0a904
65,872
def get_price(data, key): """Extracts a price as a float from some data Args: data (dict): the data containing the price key (str): the key of the target price. Returns: float: the price as a float. When the dictionary is missing the requested key, returns :obj:`None` """ price = data.get(key) if price is not None: price = float(price) return price
559c93d868b8bd4e2cb1c9f9e1d52216c17f6c5f
65,876
def get_children(node): """ Retrieve the children (and their dict keys or list/tuple indices) of an ASDF tree node. Parameters ---------- node : object an ASDF tree node Returns ------- list of (object, object) tuples list of (identifier, child node) tuples, or empty list if the node has no children (either it is an empty container, or is a non-container type) """ if isinstance(node, dict): return list(node.items()) elif isinstance(node, list) or isinstance(node, tuple): return list(enumerate(node)) else: return []
38ad980ecde454f757b93ab3947e09a62fdf321a
65,877
def get_parent_path(full_path): """ Return parent path for full_path""" parent_path, node_name = full_path.rstrip('/').rsplit('/',1) # rstrip in case is group (ending with '/') if parent_path == "": parent_path = "/" return parent_path
61f208dd1646990d09b05daf95156b6d6601a286
65,878
def _indent(text, amount, ch=' '): """Return the indent text, where each line is indented by `amount` characters `ch`.""" padding = amount * ch return ''.join(padding + line for line in text.splitlines(True))
49ce38c083e24f0e16ac9433f990bca05b6567aa
65,881
import inspect def call_with_dictionary(fn, kwargs): """ Call a function by passing elements from a dictionary that appear in the function signature """ parameters = inspect.signature(fn).parameters common = set(parameters) & set(kwargs) sub_kwargs = {k: kwargs[k] for k in common} return fn(**sub_kwargs)
ab728c69641a0060dfdcb2949dde41aafb4a7549
65,882
from typing import Dict from typing import Union def deserialize_boolean_options(options: Dict) -> Dict[str, Union[str, bool]]: """Converts strings in `options` that are either 'True' or 'False' to their boolean representations. """ for k, v in options.items(): if isinstance(v, str): if v.strip() == 'False': options[k] = False elif v.strip() == 'True': options[k] = True return options
da0adb8cbce64b6cc450acacb56757e3ea545cd8
65,884
from typing import Dict from typing import Any import operator from functools import reduce def from_dot_notation(field: str, obj: Dict[Any, Any]) -> Any: """ Method to retrieve a value from the configuration using dot-notation. Dot-notation means nested fields can be accessed by concatenating all the parents and the key with a "." (e.g. db.driver.name). Args: field: The field (in dot-notation) to access obj: The object to access using dot-notation Returns: value: The value at the specified key, in the specified obj """ # Split the key into separate element keys = field.split(".") def retrievePart(obj, key): # Return an empty dict when the key doesn't exist if key not in obj: obj[key] = {} return operator.getitem(obj, key) # Use reduce to traverse the dictionary and retrieve the required keys value = reduce(retrievePart, keys, obj) return value
34f932e3010a7be87617a74a711794fc55c3496f
65,887
def compute_survival_rate(sMC_particle_ancestries): """ compute the time-series survival rate as a function of resamples Parameters ---------- sMC_particle_ancestries : dict of {_direction : list(np.array(ints))} dict of the particle ancestor indices Returns ------- survival_rate : dict of {_direction : np.array(float)} the particle survival rate as a function of step """ survival_rate = {} for _direction, _lst in sMC_particle_ancestries.items(): rate = [] num_starting_particles = len(_lst[0]) for step in range(len(sMC_particle_ancestries[_direction])): rate.append(float(len(set(sMC_particle_ancestries[_direction][step]))) / num_starting_particles) survival_rate[_direction] = rate return survival_rate
28cfaada9d115e9449b7e40607bd102d635518c7
65,894
import six def SetBuildArtifacts(images, messages, release_config): """Set build_artifacts field of the release message. Args: images: dict[str,dict], docker image name and tag dictionary. messages: Module containing the Cloud Deploy messages. release_config: apitools.base.protorpclite.messages.Message, Cloud Deploy release message. Returns: Cloud Deploy release message. """ if not images: return release_config build_artifacts = [] for key, value in sorted(six.iteritems(images)): # Sort for tests build_artifacts.append(messages.BuildArtifact(image=key, tag=value)) release_config.buildArtifacts = build_artifacts return release_config
d0d50dae11b16b3d04bd16ed6cb8c68604905218
65,897
import re def filter_values_by_param(vcard, key, param_key, param_value): """ Return a list of values collected for vcard and filtered by parameters. arguments: vcard -- the vcard to read from key -- the attribute key to collect values for param_key -- the name of the parameter to filter by if starting by '!', means exclude those values param_value -- the value of the parameter """ fvalues = [] # excluding exclude = False if param_value.startswith('!'): param_value = param_value[1:] exclude = True # force upper case value param_value = param_value.upper() if hasattr(vcard, key) and getattr(vcard, key + "_list"): # for each values for that key for v in getattr(vcard, key + "_list"): # define if we collect it or not append = False if str(v.params.get(param_key)).upper() == "['" + param_value + "']": if not exclude: append = True elif exclude: append = True # appending the value if append: if key == 'n': fvalues.append(re.sub(' +', ' ', str(v.value)).strip()) elif key == 'org' and isinstance(v.value, list): for vv in v.value: fvalues.append(vv.strip()) else: fvalues.append(str(v.value).strip()) return fvalues
13c6da916625d0d2c08f69403c24084ab2e99bad
65,900