content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def generate_comments(reddit, submission_id): """ Take a PRAW reddit object and finds comments for a given submissions_id Parameters ---------- reddit: praw.Reddit A PRAW Reddit API instance submission_id: int The id of the subreddit submission whose comments we want Returns ------- submission.comments: praw.models.comment_forest.CommentForest A Reddit CommentForest that can be iterated through """ # get submission from praw via submission_id from psaw submission = reddit.submission(id=submission_id) # should load all folded comments return submission.comments
6daa6354d5610e77b5819c21357344a722ec727d
106,268
def drop_single_coords(da): """ drops all dimensions if there is only a single coordinate Parameters ---------- da : xr.DataArray Returns ------- da : xr.DataArray """ # import pdb; pdb.set_trace() for coord in da.coords: if coord in da.dims: if len(da.coords[coord].values) == 1: print('Found only one value for coords '+ coord +' Dropping') da = da.isel({coord: 0}).drop(coord) else: print('Coordinate '+ coord +' had no corresponding dimension, dropping') da = da.drop(coord) return da
8a585d3945c4631588c02e211479c0e5e4658df2
106,269
import pickle def read_object(filename): """ Function to read/un-pickle python object :param filename: path to pickle file """ with open(filename, 'rb') as input_stream: obj = pickle.load(input_stream) return obj
b0c0e510bb3029055036df6b38f7653c6affc0f0
106,270
def strict_range(value, values): """ Provides a validator function that returns the value if its value is less than the maximum and greater than the minimum of the range. Otherwise it raises a ValueError. :param value: A value to test :param values: A range of values (range, list, etc.) :raises: ValueError if the value is out of the range """ if min(values) <= value <= max(values): return value else: raise ValueError('Value of {:g} is not in range [{:g},{:g}]'.format( value, min(values), max(values) ))
09893df9a51844413f85e7b0bb8cf5dacfe17888
106,283
import six def _func_type(func): """ returns if callable is a function, method or a classmethod """ argnames = six.get_function_code(func).co_varnames[:six.get_function_code(func).co_argcount] if len(argnames) > 0: if argnames[0] == 'self': return 'method' if argnames[0] == 'cls': return 'classmethod' return 'function'
32969a5fd855c698982bb0012ee2989ba48ff049
106,284
def inside(v, vecs): """ Check if the list contains a vector """ for vec in vecs: if (vec == v).all(): return True return False
f1223f8b5d774595cebc19c54fd10b6198bccc7b
106,285
def join_list(delimiter): """Joins a list into a string using the delimiter. This is just a wrapper for string.join. Args: delimiter: The delimiter to use when joining the string. Returns: A function that joins the list into a string with the delimiter. """ def join_string_lambda(value): return delimiter.join(value) return join_string_lambda
9eb8241db6b53c206405df0ab5dfb2dcc2f9817c
106,297
def slm_to_lpm(slm, pgas, tgas): """ Convert volumetric gas flow from standard liters per minute (SLM or SLPM) to liters per minute (LPM) where STP defined as 273.25 K and 101,325 Pa. .. math:: 1 LPM = 1 SLPM \\times \\frac{T_{gas}}{273.15\\,K} \\times \\frac{14.696\\,psi}{P_{gas}} Parameters ---------- slm : float Volumetric gas flow in standard liters per minute [SLM] pgas : float Absolute gas pressure [kPa] tgas : float Gas temperature [K] Returns ------- lpm : float Volumetric gas flow in liters per minute [LPM] Example ------- >>> slm_to_lpm(580, 150, 773) 1108.74 References ---------- Wikipedia contributors. (2018, February 8). Standard litre per minute. In Wikipedia online. Retrieved from https://en.wikipedia.org/wiki/Standard_litre_per_minute """ # equation requires gas pressure as psi so convert kPa to psi pgas_psi = pgas * 0.1450377 lpm = slm * (tgas / 273.15) * (14.696 / pgas_psi) return lpm
f29743ea1cfd153592c37e64f365054f4a11c553
106,298
def _skip(item): """Returns True if the item is a dictionary with the key 'skip' and value True""" if not isinstance(item, dict): return False return item.get("skip", False)
52bf196d4de9984041e35720e894538004f65576
106,303
def fuzzy_get(_dict, aliases, default): """ get a value from a list of keys(aliases), return first value which it's key exist in dict, if no keys found, return default. """ for name in aliases: if name in _dict: return _dict[name] else: return default
2df15490f32218032b6003b1d278e8808e304bed
106,308
import unicodedata def is_unicode_category(character, category): """Return True if character belongs to the specified Unicode category""" if ord(character) > 127: return unicodedata.category(character)[0] == category return False
c53e317504fd1ec1910d75576aa3d0ec56ebd605
106,312
def parse_enum_csv(key, value, enumeration, count = None): """ Parses a given value as being a comma separated listing of enumeration keys, returning the corresponding enumeration values. This is intended to be a helper for config handlers. The checks this does are case insensitive. The **count** attribute can be used to make assertions based on the number of values. This can be... * None to indicate that there's no restrictions. * An int to indicate that we should have this many values. * An (int, int) tuple to indicate the range that values can be in. This range is inclusive and either can be None to indicate the lack of a lower or upper bound. :param str key: configuration key being looked up :param str value: value to be parsed :param stem.util.enum.Enum enumeration: enumeration the values should be in :param int,tuple count: validates that we have this many items :returns: list with the enumeration values :raises: **ValueError** if the count assertion fails or the **value** entries don't match the enumeration keys """ values = [val.upper().strip() for val in value.split(',')] if values == ['']: return [] if count is None: pass # no count validateion checks to do elif isinstance(count, int): if len(values) != count: raise ValueError("Config entry '%s' is expected to be %i comma separated values, got '%s'" % (key, count, value)) elif isinstance(count, tuple) and len(count) == 2: minimum, maximum = count if minimum is not None and len(values) < minimum: raise ValueError("Config entry '%s' must have at least %i comma separated values, got '%s'" % (key, minimum, value)) if maximum is not None and len(values) > maximum: raise ValueError("Config entry '%s' can have at most %i comma separated values, got '%s'" % (key, maximum, value)) else: raise ValueError("The count must be None, an int, or two value tuple. Got '%s' (%s)'" % (count, type(count))) result = [] enum_keys = [k.upper() for k in list(enumeration.keys())] enum_values = list(enumeration) for val in values: if val in enum_keys: result.append(enum_values[enum_keys.index(val)]) else: raise ValueError("The '%s' entry of config entry '%s' wasn't in the enumeration (expected %s)" % (val, key, ', '.join(enum_keys))) return result
ff73794d8b64f96396a0d94cf8a93ecedc6afecb
106,316
def eosEngineID(dev_mac): """ Return the SNMP Engine ID generated from the System MAC this is the version used in EOS """ return "f5717f" + str(dev_mac).replace(":", "") + "00"
3bccfa697ebf584ab64aef1faa8af9355857aa17
106,317
def flatten_array(x): """ Returns flattened array of x. Args: x (array, array of arrays) Returns: flattened_array (array) """ try: flattened_array = [item for sub in x for item in sub] except TypeError: flattened_array = x return flattened_array
d09a5b187a43c81fdb61cfd434971356ab4fc9ab
106,319
from typing import Sequence def __packages_to_not_install() -> Sequence[str]: """Returns a list of packages to be not installed.""" return "*.tests", "*.tests.*", "tests.*", "tests"
40d0a8d286386f03b1c999c20212901abbf287db
106,320
def Subtract(u, v): """ Returns difference between two vectors. res = u - v """ assert len(u) == len(v) return map(lambda x, y: x - y, u, v)
9dc500be028e55686a713209a2544b9a7a6899d0
106,323
def get_auth_token(config): """Ensures an auth token exists. If a token is already present in the config, returns the token Otherwise prompts the user to create one or set one manually. Args: config |{str:str}| = A dictionary of settings from the configuration file. Returns: auth_token |str| = The user's auth token. Raises: |None| """ if "auth_token" not in config or len(config["auth_token"]) == 0: error = ('Error: No authentication token found!\n\n' 'Run the "token create" subcommand or manually update the\n' 'configuration file with a valid authorization token using\n' '"vmpooler_client config set auth_token AUTH_TOKEN"') raise RuntimeError(error) return config["auth_token"]
022e974cffdcff8f6aade6a7a122a5c582809ee7
106,325
import requests def find_anaconda_versions(name, anaconda_channel='bioconda'): """ Find a list of available anaconda versions for a given container name """ r = requests.get(f"https://anaconda.org/{anaconda_channel}/{name}/files") urls = [] for line in r.text.split('\n'): if 'download/linux' in line: urls.append(line.split('"')[1]) return urls
f174ec55163914220de9745acd00f9263ef26c1d
106,327
def _predict_proba_json_tree(json_tree, sample): """ Recursively follow the path of a sample through the JSON tree and return the resulting leaf's value. """ if "leaf" in json_tree: return json_tree["leaf"] if sample[json_tree["split"]] <= json_tree["split_condition"]: next_node_id = json_tree["yes"] else: next_node_id = json_tree["no"] for sub_tree in json_tree["children"]: if sub_tree["nodeid"] == next_node_id: return _predict_proba_json_tree(sub_tree, sample)
627852633c2bca56659a09c730bd296623332a79
106,331
def shape(x): """Returns tensor shape as a tuple of integers, None dimensions as -1.""" return tuple([-1 if d is None else d for d in x.get_shape().as_list()])
a76095113f54fc360e36536b846f10430c22dd61
106,333
import torch import random def one_hot_tensor(n: int) -> torch.Tensor: """ Sample a one hot vector of length n, return as a torch Tensor. """ one_hot = torch.zeros(n) k = random.randrange(n) one_hot[k] = 1.0 return one_hot
d94b3d39237b2227eea4f9310d994721716eb6d2
106,343
import re def guid_to_squid(guid): """ Converts a GUID to a compressed guid (SQUID) Each Guid has 5 parts separated by '-'. For the first three each one will be totally reversed, and for the remaining two each one will be reversed by every other character. Then the final compressed Guid will be constructed by concatenating all the reversed parts without '-'. .. Example:: Input: 2BE0FA87-5B36-43CF-95C8-C68D6673FB94 Reversed: 78AF0EB2-63B5-FC34-598C-6CD86637BF49 Final Compressed Guid: 78AF0EB263B5FC34598C6CD86637BF49 Args: guid (str): A valid GUID Returns: str: A valid compressed GUID (SQUID) """ guid_pattern = re.compile(r'^\{(\w{8})-(\w{4})-(\w{4})-(\w\w)(\w\w)-(\w\w)(\w\w)(\w\w)(\w\w)(\w\w)(\w\w)\}$') guid_match = guid_pattern.match(guid) squid = '' if guid_match is not None: for index in range(1, 12): squid += guid_match.group(index)[::-1] return squid
0d2ea76414ae5e4ed2d94e1688a9183d43fb2ed7
106,344
def three_stats(N, X): """ Return the mean, median, and mode of X """ # sort X into ascending first (could have writen a sort function, but not the focus here) X_sorted = sorted(X) # calculate mean by summarizing all numbers sum = 0 # use a dict to store number of appearance for each num X_dict = {} # store max number of appearance max_num_appearance = 0 mode = X_sorted[0] for x in X_sorted: sum += x X_dict[x] = X_dict.get(x, 0) + 1 # update mode and max_num_appearance if X_dict[x] > max_num_appearance: mode = x max_num_appearance = X_dict[x] if X_dict[x] == max_num_appearance: if x < mode: mode = x # calculate median if N % 2 == 0: # even median = (X_sorted[N//2 -1] + X_sorted[N//2])/2 else: # odd median = X_sorted[N//2] return round(float(sum/N), 1), round(float(median), 1), mode
84b8ccd659c82bed2d9bfd52e95b65ae4c2f5978
106,345
def scaling(dt, amplitude, dx): """Calculate scaling factor for integrated entropy from dt, amplitude, dx Args: dt (float): The difference in theta of hot and cold (in units of plunger gate). Note: Using lock-in dT is 1/2 the peak to peak, for Square wave it is the full dT amplitude (float): The amplitude of charge transition from the CS dx (float): How big the DAC steps are in units of plunger gate Note: Relative to the data passed in, not necessarily the original x_array Returns: float: Scaling factor to multiply cumulative sum of data by to convert to entropy """ return dx / amplitude / dt
588958aa98383f451c567608fa9ae4d51ce40fae
106,346
import collections import math def examineSubstrings(data, length, z=3.0, dividerMultiple=20.0): """ Examines substrings of the data of a given length, looking for patterns. :param data: bytes :param length: int Substring length to examine :param z: float :param dividerMultiple: float :return: collections.Counter """ #Count values in a sliding window over the data total = len(data) - length window = (data[k:k+length] for k in range(total)) counts = collections.Counter(window) print((' %s Byte Substrings ' % length).center(100, '*')) print() print('%10s %s' % (total, 'Total')) for values, count in counts.most_common(20): if count <= 1: break print('%10s %30s %s' % (count, values, tuple(value for value in values))) print() #If a value is more common than expected, it may be a divider that separates tokens for common, commonCount in counts.most_common(5): #Statistically, this means we are looking for values where the lower end of the #proportion confidence interval is still sufficiently high. # #To compute that, remember the formula for the confidence interval: # p_hat +/- z * sqrt(p_hat * (1 - p_hat) / n) fraction = float(commonCount) / total conservativeFraction = fraction - z * math.sqrt(fraction * (1.0 - fraction) / total) expectedFraction = 1.0 / (256 ** length) #Skip anything that doesn't happen at least the token multiple more often than expected if conservativeFraction < dividerMultiple * expectedFraction: break #Look for common tokens, skipping if there are none tokens = data.split(common) tokenCounts = collections.Counter(tokens) if tokenCounts.most_common(1)[0][1] == 1: continue print(' Common Tokens '.center(60, '*')) print('Common divider found: %s %s (%d times)' % (common, tuple(common), commonCount)) print() for token, count in tokenCounts.most_common(20): if count <= 1: break print('%10s %30s %s' % (count, token, tuple(token))) print() return counts
16e98475d43706edc7ac8d9d8eb7eb0982fa4b41
106,349
def get_max_attempts(argument): """ Gets the maximum number of scraping attempts from the raw argument. """ if argument == None: return 5 # default if argument == 'no': return float("inf") # unlimited number of attempts return int(argument)
5125300241769a7f5a126eca85c324706343417b
106,350
def mangle_struct_typename(s): """Strip leading underscores and make uppercase.""" return s.lstrip("_").upper()
fe9d0693323391f7e8f8e6df373d44aa80917af8
106,354
def merge_runfiles(all_runfiles): """Merges a list of `runfiles` objects. Args: all_runfiles: A list containing zero or more `runfiles` objects to merge. Returns: A merged `runfiles` object, or `None` if the list was empty. """ result = None for runfiles in all_runfiles: if result == None: result = runfiles else: result = result.merge(runfiles) return result
d5fcc7dbcd6b95b04f5fe32dabf375066361b6b9
106,355
def dec2gon(dec): """ Converts Decimal Degrees to Gradians :param dec: Decimal Degrees :type dec: float :return: Gradians :rtype: float """ return 10/9 * dec
6ffe6f5c79f36599bcd85dc5bac3a3a439b06986
106,356
def merge_args_kwargs_dict(args, kwargs): """Takes a tuple of args and dict of kwargs. Returns a dict that is the result of merging the first item of args (if that item is a dict) and the kwargs dict.""" init_dict = {} if len(args) > 0 and isinstance(args[0], dict): init_dict = args[0] init_dict.update(kwargs) return init_dict
e7efd13bf8c27e1b64d4d1c806207749bd2268f5
106,361
def _is_int(value): """Use casting to check if value can convert to an `int`.""" try: int(value) except ValueError: return False else: return True
56e1b97651afe017fa4e9f0c8c5542754d112d70
106,365
def __fire_trans(m, preset, postset): """ Fires a transition and returns a new marking Parameters --------------- m Marking preset Preset postset Postset Returns --------------- new_m New marking """ ret = {} for k in m: if k in preset: diff = m[k] - preset[k] if diff > 0: ret[k] = diff else: ret[k] = m[k] for k in postset: if k not in ret: ret[k] = postset[k] else: ret[k] = ret[k] + postset[k] return ret
50e6efdbb3282839fe52f531ad0167b16300ce3f
106,368
def get_ref(record): """Get the name of a VPC, or its ID if it has no name. Args: record A VPC record returned by AWS. Returns: The VPC's name, or its ID if it has no name. """ ref = record["VpcId"] tags = record.get("Tags") if tags: name_tags = [x for x in tags if x["Key"] == "Name"] if name_tags: ref = name_tags[0]["Value"] return ref
c37dcf4e916503ac0975019a148e252cd6c1857c
106,373
def gndvi(b3, b8): """ Green Normalized Difference Vegetation Index \ (Gitelson, Kaufman, and Merzlyak, 1996). .. math:: GNDVI = (b8 - b3) / (b8 + b3) :param b3: Green. :type b3: numpy.ndarray or float :param b8: NIR. :type b8: numpy.ndarray or float :returns GNDVI: Index value .. Tip:: Gitelson, A., Kaufman, Y. J., Merzlyak, M. N. 1996. Use of a green \ channel in remote sensing of global vegetation from EOS-MODIS. \ Remote Sensing of Environment 58(3), 289-298. \ doi:10.1016/s0034-4257(96)00072-7. """ GNDVI = (b8 - b3) / (b8 + b3) return GNDVI
e98f5af1a9f071b1db552d2c347cf91e7a71ba47
106,375
def tail_factorial( number: int, accumulator: int = 1, ) -> int: """Tail Recursive Factorial Function Args: number (int): [description] accumulator (int, optional): [description]. Defaults to 1. Returns: int: factorial of the number """ if number == 0: return accumulator return tail_factorial( number=number - 1, accumulator=accumulator * number, )
7331f0a74ca07d4ffcd9678ebaf0b2fc30207584
106,379
def twod_to_oned(size, *coordinates): """Converts coordinates (x >= 0, y >= 0) to an int representation. :param size: Size of the grid that (x, y) is contained in :param coordinates: [(x0, y0), (x1, y1), ...] :return: (int0, int1, ...) """ x_axis = size[0] result = tuple(x + y * x_axis for x, y in coordinates) if len(result) == 1: return result[0] return result
053dba604ab8a3fc2779bbfb280ea13b77a13fe7
106,381
def VLOOKUP(table, **field_value_pairs): """ Vertical lookup. Searches the given table for a record matching the given `field=value` arguments. If multiple records match, returns one of them. If none match, returns the special empty record. The returned object is a record whose fields are available using `.field` syntax. For example, `VLOOKUP(Employees, EmployeeID=$EmpID).Salary`. Note that `VLOOKUP` isn't commonly needed in Grist, since [Reference columns](col-refs.md) are the best way to link data between tables, and allow simple efficient usage such as `$Person.Age`. `VLOOKUP` is exactly quivalent to `table.lookupOne(**field_value_pairs)`. See [lookupOne](#lookupone). For example: ``` VLOOKUP(People, First_Name="Lewis", Last_Name="Carroll") VLOOKUP(People, First_Name="Lewis", Last_Name="Carroll").Age ``` """ return table.lookupOne(**field_value_pairs)
2a865196cd528593b757aded7ca58fbbe0cd862a
106,382
def countBlank(series, blanks = [None]): """Count number and percentage of blank values in series Args: series (Series): data series blanks (list): list of blank values Returns: number: number of blanks str: the percentage of blank values """ # n = 0 # counts = series.value_counts() # for blank in blanks: # if blank in counts.keys(): # n += counts[blank] n = series.isnull().sum() return (n, "{0:.2%}".format(n / series.size))
8ce717ddffd69bfcc5bb33e96a63e6d60029298a
106,385
def determine_file_type(record): """ Determine the file type used in the ENA archive :param record: ENA API record :return: the file type """ file_server_types = ['ftp', 'galaxy', 'aspera'] for tmp in file_server_types: key_to_check = f"submitted_{tmp}" if key_to_check in record and record[key_to_check] != '': return tmp return ''
7a0a4d81a55f1ef16e605f95578acf9293a93aaa
106,387
def project(request): """ Setup project flag :param request: :return: flag project value """ return request.config.getoption("--project")
79b5b73e527916930821cba2a6cb43d628c6dae6
106,388
import json import base64 def to_base64_json(obj): """ Return a base64-encoded JSON string from a given Python object. """ json_data = json.dumps(obj).encode('utf-8') string = base64.b64encode(json_data).decode('utf-8') return string
8f859781bbba0d14e40bd0d116759dd59251ba97
106,390
def delete_rows_for_taxa(dataframe, taxa_level, taxa_name): """ return a copy of the dataframe with the taxa name at the specified taxa level removed. For use in aggregating_mixed_taxonomy: allows for keeping track of the leftovers. :param dataframe: dataframe to copy and delete rows from :param taxa_level: taxa level to look for the name in :param taxa_name: rows with this name at the specified taxa_leven are deleted :return: a dataframe with less rows """ df = dataframe.copy() return df[df[taxa_level] != taxa_name]
cfc415bbd9613550c14b39cfd2b67dd03b3d5969
106,395
def rdb(sep, *args): """ Builds a regular expression for separated digits. """ parts = [] for numDigits in args: parts.append(r'(\d{{{:d}}})'.format(numDigits)) return sep.join(parts)
9c45e306f64f50e2c774206960b3dbe2c8c8d3ec
106,400
import re def indicator_to_integer(indicator): """Converts an instance indicator to an integer. An indicator can be an integer (23) a combination of integers and letters (23b) or just a letter (B). """ try: integer = int(indicator) except ValueError: parsed = re.sub("[^0-9]", "", indicator) if len(parsed): return indicator_to_integer(parsed) integer = ord(indicator.lower()) - 97 return integer
f2d6893040f02ebebf9832b249675580fe87b687
106,401
def fancy(message): """Print message with surrounding ~'s.""" return "~{0}~".format(message)
f9fa16f99c0151be6a2a48af44131dea5ace3cf9
106,402
def sum_dicts(dicts): """ Sum values in dicts""" summed = {} for d in dicts: for k, v in d.iteritems(): if k in summed: summed[k] = summed[k] + v else: summed[k] = v return summed
09d424bb3c0851122b0018bc9a78b440e85d4f68
106,405
import json def export_to_json(entries): """ Exports results as a JSON object """ return json.dumps(entries, indent=4, separators=(', ', ': '))
2de95322648d44c4189c1dc893e8c905466db0d0
106,408
import logging import requests def get_metrics_data(client_api, metrics): """Gets all data for specific metrics. :param client_api: API client, that handles API requests :type client_api: client.APIClient :param metrics: list of metrics to retrieve data :type metrics: list :return: list of metrics data :rtype: list[dict] """ logging.info("Start fetching metrics from Prometheus.") metrics_data_list = [] for metric in metrics: params = {'query': metric} try: data = client_api.get_request('query', params) except requests.exceptions.RequestException as e: logging.error("Received error: {}".format(e), exc_info=True) raise # Prometheus returns false-positive result for non-existent metrics. # We have to skip non-existent metrics, i.e. those with empty data if not data['data']['result']: logging.warning("Metric '{0}' not found.".format(metric)) continue metrics_data_list.append(data) logging.info("{0} out of {1} metrics were successfully fetched from " "Prometheus.".format(len(metrics_data_list), len(metrics))) return metrics_data_list
6335c35ce9a3299a625a416150f6b6c2bb495466
106,411
def list_from_i_to_n(i,n): """ make list [i,i+1,...,n] for example: list_from_i_to_n(3,7) => [3,4,5,6,7] list_from_i_to_n(4,6) => [4,5,6] """ result = [] for j in range(i,n+1): result = result + [j] return result
308a30dde3f74726755343a34a78d9a9c949cb8e
106,412
def dec2hex(d): """return a two character hexadecimal string representation of integer d""" return "%02X" % d
f5ad063ed6dafb82432f5664f1f3b52308c59182
106,413
import struct def readFloatVal(byteStream): """Read a floating point value""" floatVal = struct.unpack(">f", byteStream.read(4))[0] return 4, floatVal
c599695370c15e14b865c2683a0eaf8c698fb4fc
106,415
def delta(i, j): """ Returns 1 if i == j, else 0. This is used in the multiplication of Pauli matrices. Examples ======== >>> from sympy.physics.paulialgebra import delta >>> delta(1, 1) 1 >>> delta(2, 3) 0 """ if i == j: return 1 else: return 0
f5f90a49eb366a7e4adaedcc203ba23687f29b86
106,420
def _get_attribute(node, attr): """Return an attribute by name, or None if not present.""" if node.hasAttribute(attr): return node.getAttribute(attr) return None
54e6bbabc664aba0852bc0c562740e88e98b5da8
106,421
def _convert_plural_injury_to_singular(injury: str) -> str: """Converts a plural injury type to a singular injury type. For example, 'Broken Arms' becomes 'Broken Arm', or 'Ribs' becomes 'Rib'. Args: injury (str): The injury type to convert. Returns: str: The singularized injury type. """ injury_split = list(injury) if injury_split[-1] == "s": return injury[: len(injury_split) - 1] return injury
2232855c22432a18bd288be1e46ec33dc42a5bef
106,423
import math def tolerance(n_samples): """ Computes a tolerance value based on a number of samples for testing pseudo-random functions. """ return 1.2 / (10 ** (math.log10(n_samples) - 3))
c651ee866dd0f8b3580bd57c09f8dbeb8669615d
106,429
def str_to_list(val, separator=','): """ Split one string to a list by separator. """ if val is None: return None return val.split(separator)
e06b74760ee204abf23627b0fc52acef201b7f1f
106,431
from typing import Tuple def adjust_TWL_TSV_fields(raw_fields:Tuple[str,str,str,str,int,str]) -> Tuple[int,int,str,int,str,str]: """ Takes the 6 raw TWL TSV fields, then 1/ Splits the reference into C and V and converts them to integers 2/ Gets the category and word out of the link 3/ Drops unneeded fields """ # print(f" adjust_TWL_TSV_fields({raw_fields})…") reference, _rowID, _tags, orig_TWL_words, occurrence, TWLink = raw_fields C, V = reference.split(':') category, word = TWLink[len('rc://*/tw/dict/bible/'):].split('/') # print(f" adjust_TWL_TSV_fields returning {int(C)}, {int(V)} {orig_TWL_words}, {int(occurrence)}, {category}, {word}") return int(C), int(V), orig_TWL_words, int(occurrence), category, word
5e14b1e0f5a0a4517d888524d44dd6e08fd21168
106,437
import math def distance(gps1, gps2): """ Compute the distance between two tuples of latitude and longitude. :param gps1: First tuple of (latitude, longitude). :param gps2: Second tuple of (latitude, longitude). :return: The distance in meters. :Example: >>> int(distance([48.86786647303717, 2.19368117495212], \ [48.95314107920405, 2.3368043817358464])) 14117 """ lat1 = math.radians(gps1[0]) long1 = math.radians(gps1[1]) lat2 = math.radians(gps2[0]) long2 = math.radians(gps2[1]) # pylint: disable=locally-disabled,invalid-name a = ( math.sin((lat2 - lat1) / 2.0)**2 + math.cos(lat1) * math.cos(lat2) * math.sin((long2 - long1) / 2.0)**2 ) c = 2.0 * math.atan2(math.sqrt(a), math.sqrt(1 - a)) earth_radius = 6371000 return earth_radius * c
6f1173d329facaab9bfca02294a2114e71243aa1
106,451
import _asyncio def _in_loop(loop): """Returns True if loop is the running event loop for the current thread.""" try: return _asyncio.get_running_loop() is loop except RuntimeError: return False
ada8f82953159d309071a28fced8ac340c62015a
106,460
def solution(a, b): # O(1) """ Calculate the sum of two integers a and b, but you are not allowed to use the operator + and -. >>> solution(2, 3) 5 """ while b != 0: # O(1) # carry now contains common set bits of a and b carry = a & b # O(1) # Sum of bits of a and b where at east one of the bits is not set a = a ^ b # O(1) # Carry is shifted by one so that adding it to b gives the required sum b = carry << 1 # O(1) return a # O(1)
f532430438d4217509cf0ddb1d1d1de865604c90
106,461
def UriFuncForListInstanceRelatedObjects(resource): """UriFunc for listing instance-group related subresources. Function returns field with URI for objects being subresources of instance-groups, with instance fields. Works for list-instances and instance-configs list commands. Args: resource: instance-group subresource with instance field Returns: URI of instance """ return resource.instance
46982d5d53f2bed80bf2696ebeffa182496fdb9f
106,466
def is_sensible_roll(dice): """Checks the dice results for sensible sizes. Returns a boolean, and an error string if false.""" if int(dice[1]) > 100: # Attempted to roll more than 100 dice in a single group. return False, "Attempted to roll more than 100 dice in a single group." elif int(dice[2]) > 50000: #Attempted to roll a die with more than 50000 sides. return False, "Attempted to roll a dice with more than 50000 sides." else: return True, ""
b6f86e85e6bf6c63f82b520d05a03fc865f1c1da
106,467
def load_batchnorm(dump): """returns the BatchNorm1d or BatchNorm2d object loaded from a dump""" return globals()[dump["type"]].from_dump(dump)
ba972b1a4be97a69e822280b3a02015e22a73d74
106,469
import time def GetAgeTupleFromRequest(request, default_days=90): """Check the request for start/end times and return aff4 age tuple.""" now = int(time.time() * 1e6) default_start = now - (60*60*24*1e6*default_days) start_time = int(request.REQ.get("start_time", default_start)) end_time = int(request.REQ.get("end_time", now)) return (start_time, end_time)
55aec230742e98b1527916a460ed7fb34043547e
106,475
def _getPyFile(filename): """Return the file and '.py' filename from a filename which could end with .py, .pyc, or .pyo""" if filename[-1] in 'oc' and filename[-4:-1] == '.py': return filename[:-1] return filename
715ca7edb4e22c8708cecf67e5e4588ab7d96dc8
106,480
def parser_short_event_Descriptor(data,i,length,end): """\ parser_short_event_Descriptor(data,i,length,end) -> dict(parsed descriptor elements). Parses a descriptor describing a short event - usually a programme. It gives a name, longer description, and the language it is in. This is usually part of schedule or now & next information. The dict returned is: { "type" : "short_event", "language_code" : The language of this programme (ISO 639-2 3 letter language code) "name" : String name of the event (programme) "text" : Up to 255 character string description } (Defined in ETSI EN 300 468 specification) """ d = { "type" : "short_event", "language_code" : data[i+2:i+5], } name_length = ord(data[i+5]) i = i+6 d['name'] = data[i:i+name_length] text_length = ord(data[i+name_length]) i = i+name_length+1 d['text'] = data[i:i+text_length] return d
791b3b0546165833e0f7902f9c755dc334c8e89d
106,481
def get_skills(lower: bool=False): """ Returns a list of the skill names in the order listed on the highscores. See https://secure.runescape.com/m=hiscore_oldschool/overall Args: lower: If the skills should be lowercase or titlecase. """ skills = [ "Attack", "Defence", "Strength", "Hitpoints", "Ranged", "Prayer", "Magic", "Cooking", "Woodcutting", "Fletching", "Fishing", "Firemaking", "Crafting", "Smithing", "Mining", "Herblore", "Agility", "Thieving", "Slayer", "Farming", "Runecraft", "Hunter", "Construction" ] return [s.lower() for s in skills] if lower else skills
a6bd52eb49e3e47eff50dcea0b57efcf7f258db3
106,482
def distance(pt1, pt2): """ Returns Manhattan distance between two points. A point is a tuple (x, y, name). """ return abs(pt1[0] - pt2[0]) + abs(pt1[1] - pt2[1])
8590d7d24883b44161f00fc74dc5c3de99a86714
106,485
def is_neon(number: int) -> bool: """Return whether the specified number is a neon number.""" square = number ** 2 result = 0 for digit in str(square): result += int(digit) if result == number: return True return False
b4d29435c6e08187e6c457bac87cec25e689868e
106,486
def _run_patched(mover, patches, inputs): """Run the move with the given patches for the given inputs""" for p in patches: p.start() change = mover.move(inputs) for p in patches: p.stop() return change
5875244e1310dcfd2e22889c36029c872d0c4eb8
106,487
def find_points_in_cluster(merge_key, clusters, all_clusters): """ Finds all data points that belong to a merge. Removes all involved merges from clusters. :param merge_key: Key of the merge in the clusters dict :param clusters: Dictionary with merge_key as key and merged clusters as value :param all_clusters: Like clusters but also includes merges with clusters larger than cut distance :return: list of points, updated clusters """ points = [] queue = clusters[merge_key][:-1] del clusters[merge_key] while len(queue) > 0: cluster = queue.pop() if type(cluster) == int: points.append(cluster) else: queue.append(all_clusters[cluster][0]) queue.append(all_clusters[cluster][1]) clusters.pop(cluster, None) return points, clusters
db776b2ccfe5348c6ade29c3284f29f05942974a
106,489
from typing import OrderedDict def compare_type(t): """ Gives the order of the type for the dictionary Args: t (type): The type to compare Returns: 1 for (dict, OrderedDict), 0 otherwise """ if t in [dict, OrderedDict]: return 1 return 0
a982dcb44814caa7827a13d278f5141327b04a84
106,498
def contains_stack_cookie_keywords(s): """ check if string contains stack cookie keywords Examples: xor ecx, ebp ; StackCookie mov eax, ___security_cookie """ if not s: return False s = s.strip().lower() if "cookie" not in s: return False return any(keyword in s for keyword in ("stack", "security"))
76c2c27c00e1df48d13b2f7822759d9a3c43cc5a
106,499
def _unwrap_response(resp): """Get the actual result from an IAM API response.""" for resp_key, resp_data in resp.items(): if resp_key.endswith('_response'): for result_key, result in resp_data.items(): if result_key.endswith('_result'): return result return {} # PutRolePolicy has no response, for example raise ValueError(resp)
3902c3f9f78d256e697fe2a3ffa3de9a89f980be
106,503
def make_composite_overlay(*overlays): """Return an overlay, which will composite the supplied overlays in turn. :*overlays: The overlay callables to composite. :returns: A function which will composite *overlays and return the image. """ def do_overlay(image, transform): for o in overlays: image = o(image, transform) return image return do_overlay
df92e162d2a8d38e088dd9e323810f7dffee2f09
106,504
import random def build_sort_case(l, h, length, n): """Returns a test case set that suits your need for sorting l: lowest number in the test array (inclusive) h: highest number in the test array (inclusive) length: length of the test array n: the number of test case set you need """ arr_tcs = [] for _ in range(n): tc_set = [random.randint(l, h) for _ in range(length)] arr_tcs.append((tc_set, sorted(tc_set))) return arr_tcs
153681fa3c8d8e85aa523d85dcfa95625f55c32a
106,506
import math def minimum_shock_angle(m): """ Calculates the shock angle for which the deflection angle is zero Input: m - Mach number """ return math.asin(1/float(m))
6467e3512314aa1fa55966f0388a3b94ffed5060
106,513
def tidy_string(s): """Tidy up a string by removing braces and escape sequences""" s = s.replace("{", "").replace("}", "") return s.replace("\'", "").replace('\"', "").replace('\\','')
f153e51df189592bd4465917000f281688dc61d1
106,520
def isSolved(puzzle): """ Check whether all cells in the puzzle is filled. """ return puzzle.all()
089143ff8d0d75d0e514ae66304eb855bfb21f70
106,523
def enumerate_options(course_list): """Given a course_list generates a propt string which enumerats the courses the user may download. Args: course_list (list): a list of Course objects Returns: str : a string detailing the options a user has (courses to fetch files from, and how to exit) """ options = "Courses:\n" for i, course in enumerate(course_list): options += f"\t{i+1}: {course.get_name()}\n" options += "Enter a course number, or type \"q\" to quit: " return options
d0cf025f30760f1b16e06c66ffa394d4a876adb6
106,528
import math def min_len(l_len): """ (Theoretical) minumum length of a origin list given a difference list of length Len . """ return math.floor(1+math.sqrt(1+8*l_len)/2)
a4264819787da0ff695952eaa5ec164497762e83
106,529
def base10toN(num, base): """Convert a decimal number (base-10) to a number of any base/radix from 2-36.""" digits = "0123456789abcdefghijklmnopqrstuvwxyz" if num == 0: return '0' if num < 0: return '-' + base10toN((-1) * num, base) left = num // base if left == 0: return digits[num % base] else: return base10toN(left, base) + digits[num % base]
9bd2434b259cbd7e294258377f32de6a74448b79
106,531
def get_padding(dimension_size, sectors): """ Get the padding at each side of the one dimensions of the image so the new image dimensions are divided evenly in the number of *sectors* specified. Parameters ---------- dimension_size : int Actual dimension size. sectors : int number of sectors over which the the image will be divided. Returns ------- pad_before , pad_after: int, int Padding at each side of the image for the corresponding dimension. """ reminder = dimension_size % sectors if reminder != 0: pad = sectors - reminder pad_before = pad // 2 if pad % 2 == 0: pad_after = pad_before else: pad_after = pad_before + 1 return pad_before, pad_after return 0, 0
4bf0e3f4483586f1a41d5b65a3a6cb5e1dbbd6dc
106,533
def _CheckScript(context): """Returns the checkScript property or a successful no-op if unspecified.""" return context.properties.get('checkScript', 'return 0')
aa0e8b5a3752f4098c9675f633f54ce3ee578612
106,537
import mimetypes def get_content_type(filename): """ Use the python mimetypes to determine a mime type, or return application/octet-stream """ return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
ba52c9c0aa9e421e74dbfb0c19da21fff3bcad0b
106,542
import codecs def read_file(path, uncomment_moz_langpack=False): """Read the resource at the given path.""" with codecs.open(path, "r", "utf-8") as f: # .inc files have a special commented-out entity called # MOZ_LANGPACK_CONTRIBUTORS. We optionally un-comment it before # parsing so locales can translate it. if uncomment_moz_langpack: lines = [] for line in f: if line.startswith("# #define MOZ_LANGPACK_CONTRIBUTORS"): line = line[2:] lines.append(line) content = "".join(lines) else: content = f.read() return content
2ab95fd66d3b00905d15890565951bfb8ef31682
106,544
def to_chr(x): """chr(x) if 0 < x < 128 ; unicode(x) if x > 127.""" return 0 < x < 128 and chr(x) or eval("u'\\u%d'" % x)
1f2eebb014f328e3ff6184a56663db814ebfa34b
106,549
def _collect_group_names(s): """Returns the list of placeholder names found in the given string. Placeholders are of the form `{foo}`. Args: s: The string that potentially contains placeholders. Returns: A list of placeholder names found in the string, if any. """ names = [] length = len(s) end_index = 0 for i in range(length): # Don't try to capture a placeholder inside another placeholder. if i < end_index: continue ch = s[i] if ch == "{": end_index = s.find("}", i + 1) if end_index != -1: names.append(s[(i + 1):end_index]) return names
a1530769c0a94e66570a2b8f684e155617151cec
106,554
def y_generator(class_count: int, instance_count: list) -> list: """ Generate y values for corresponding classes :param class_count: Number of classes(data groupings) in dataset :param instance_count: number of instances in class :return: corresponding values for data groupings in dataset """ return [k for k in range(class_count) for _ in range(instance_count[k])]
04ebd280e1db8635fe6c9a72298c4e21ebd821fc
106,555
def experiment_rank_by_average_normalized_score(experiment_pivot_df): """Creates experiment level ranking by taking the average of normalized per benchmark scores from 0 to 100, where 100 is the highest reach coverage.""" # Normalize coverage values. benchmark_maximum = experiment_pivot_df.max(axis='columns') normalized_score = experiment_pivot_df.div(benchmark_maximum, axis='index').mul(100) average_score = normalized_score.mean().sort_values(ascending=False) return average_score.rename('average normalized score')
e13fef63ba69ea5269fc5de7fd22396a65b1a3eb
106,559
def _get_ip_axis(ur_number): """ Internal function that gets the ip of the axis machine Args: ur_number: ID of robot (1,2 or 3) Returns: ip: string. """ ip = 10 * ur_number return '192.168.10.%d' % ip
b560efa1ec8354730c7096251e6dc66cf0babd8c
106,561
import collections def group_fields_by_root(field_locations): """Given a list of field_locations group together those that hang from the same parent. Parameters ---------- field_locations: List[FieldLocation] Returns ------- dict Dictionary mapping PageTree node numbers to a list of FieldLocation """ groups = collections.defaultdict(list) for field_location in field_locations: groups[field_location.item[field_location.root]].append(field_location) return groups
345b4c5c5f30c9806a26c3812e758341ac74fc42
106,564
import math def poisson(n, expectedValue): """ Calculates the probability-value of an integer in a poisson distribution. Args: n (int): quantity of which you want to know the probability expectedValue (float): the average value that defines the poisson distribution Returns: float: probability of "n" in distribution "expectedValue" """ # Only consider whole numbers if n<0 or n%1!=0: return 0 else: return (expectedValue**n)/math.factorial(n)*math.exp(-expectedValue)
e1a81b47aad4128021981b3c5d1d83b2b4af2865
106,565
import itertools def range_str(values): """ Take an iterable object containing integers and return a string of comma separated values and ranges given by a dash. Example: >>> geopmpy.launcher.range_str({1, 2, 3, 5, 7, 9, 10}) '1-3,5,7,9-10' """ result = [] # Turn iterable into a sorted list. values = list(values) values.sort() # Group values with equal delta compared the sequence from zero to # N, these are sequential. for aa, bb in itertools.groupby(enumerate(values), lambda xx_yy: xx_yy[1] - xx_yy[0]): bb = list(bb) # The range is from the smallest to the largest in the group. begin = bb[0][1] end = bb[-1][1] # If the group is of size one, the value has no neighbors if begin == end: result.append(str(begin)) # Otherwise create a range from the smallest to the largest else: result.append('{}-{}'.format(begin, end)) # Return a comma separated list return ','.join(result)
6ffca7be706ee6a833da24821178fd08ff7e459c
106,570
def import_from(import_string): """ Imports a function from python module import string """ fn_name = import_string.split('.')[-1] module_name = '.'.join(import_string.split('.')[:-1]) module = __import__(module_name, fromlist=[fn_name]) return getattr(module, fn_name)
805ea07b9cc387754a4436dae2587944cdea25d2
106,571
def stringinlist(s, l): """ check is string is in list of strings. """ for i in l: if s in i: return True return False
b95363f0d36365d75cd013160d44ddc97f36c97f
106,576
def hex2dec(s): """ Convert hex string to decimal number. Answer None if conversion raises an error. >>> hex2dec('0064') 100 >>> hex2dec('FFFF') 65535 >>> hex2dec(dec2hex(32)) 32 >>> hex2dec('FFZ') is None True """ try: return int(s, 16) except ValueError: pass return None
bdc5dd1a2f266eb39395ed21dcce6fd411996c93
106,577
def frequency(dataframe, column_name): """ Gets the frequency of each unique value in a specified column. Args: dataframe (Pandas.DataFrame): The data that contains the column for the the frequency calculation column_name (string): Name of the column to calculate the frequency for Returns: Pandas.DataFrame: A new dataframe that has each unique value from the specified column along with the percentage frequency, the cumulative percentage, and the ccdf """ out = dataframe[column_name].value_counts().to_frame() out.columns = ['frequency'] out.index.name = column_name out.reset_index(inplace=True) out = out.sort_values(column_name) out['percentage'] = out['frequency'] / out['frequency'].sum() out['cumulative'] = out['frequency'].cumsum() / out['frequency'].sum() out['ccdf'] = 1 - out['cumulative'] return out
20f57437d9c7b78bcdc22eba9814f9b6fc3d6b92
106,580
def has_numbers(inputString): """Check if there is a number in a string list and return True or False.""" return any(char.isdigit() for char in inputString)
f16e80590087b666f66113503b680a23bb040027
106,581
import json def get_entry(file_contents): """Loads file from filename and returns the stache information as a tuple. Arguments: file_contents: File contents which stores X-STACHE-READ-KEY and endpoint as a JSON. The contents must be in the format: { "X-STACHE-READ-KEY": "stache_read_key", "endpoint": "stache_endpoint" } Returns: returns (key, url) of the api entry as a tuple. """ creds = json.loads(file_contents) # Get key, check for old version of json's first. key = creds.get("X-STACHE-READ-KEY") if not key: key = creds["X-STACHE-KEY"] # Grab endpoint. endpoint = creds["endpoint"] # Format for request. key = {"X-STACHE-KEY": key} url = f"https://stache.arizona.edu{endpoint}" return key, url
075d0b63570f167024f7f1681f11cd6e23ba412a
106,584