content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
import torch def isValidDevice(device: str): """Checks if a given device is valid for torch Args: device (str): Device to check """ try: torch.device(device) return True except: return False
79e6d42942759b2ca1294c2c5e6ad30ba5720950
86,628
from typing import Counter def states_and_counts_from_annots(annots): """get ``states`` and ``counts`` from a list of ``crowsetta.Annotation``s, to build a transition matrix showing the probability of going from one state to another. Parameters ---------- annots : list of ``crowsetta.Annotation``s whose ``Sequence.label`` attributes will be used to count transitions from one label to another in the sequences of syllables Returns ------- states : set of characters, set of unique labels that occur in the list of ``Annotation``s. counts : collections.Counter where keys are transitions of the form "from label, to label", e.g. "ab", and values are number of occurrences of that transition. """ states = sorted( set([lbl for annot in annots for lbl in annot.seq.labels.tolist()]) ) counts = Counter() for annot in annots: labels = annot.seq.labels.tolist() trans = zip(labels[:-1], labels[1:]) for a_trans in trans: counts[a_trans] += 1 return states, counts
a6ce48b77af9f641428023d7ca2c790e7b682880
86,630
def complememt(dna) : """ This function returns the reverse complement of a DNA sequence string dna is the dna sequence to be translated. Assumes already in lower case """ basecomplement={'a':'t','c':'g','g':'c','t':'a','n':'n'} letters = list(dna[::-1]) #blow the string backwards into a letters list letters = [basecomplement[base] for base in letters] #convert all return ''.join(letters) #turn letters back to string
495ff6a93600ea2a54c44df0da182539f50b7673
86,631
def taxid(entry): """ Gets the NCBI taxon id as an integer. """ base, tid = entry["taxonId"].split(":", 1) assert base == "NCBITaxon" return int(tid)
00fe879444e9587132cfe697a949c77b9002672d
86,634
def _fibonacci(n): """Fibonacci base function (naive recursive algorithm) Exponential Time Args: n: The nth number of the fibonacci sequence Returns: An integer of the nth number in the fibonacci sequence """ if n <= 2: # Base case f = 1 else: # Recursive call f = _fibonacci(n - 1) + _fibonacci(n - 2) return f
64b99ef3fc20008937193157cd432da719bc435a
86,636
def _serialize_decimal(val): """Create a JSON encodable value of a Decimal object.""" return float(val)
7ffa552cb69aeed404c300ed10a363c627593937
86,637
def with_constant_class(the_class): """ Create a mixin class with ``constant_class`` attribute. Allows to set a constant class for constants container outside container itself. This may help to create more readable container definition, e.g.: .. code-block:: python from candv import Constants from candv import SimpleConstant from candv import with_constant_class class CustomConstant(SimpleConstant): ... class FOO(with_constant_class(CustomConstant), Constants): A = CustomConstant() B = CustomConstant() .. code-block:: python >>> FOO.constant_class <class '__main__.CustomConstant'> """ class ConstantsMixin: constant_class = the_class return ConstantsMixin
ed090ddb56f5ed979fb7124192ae842c1b6936b4
86,642
def is_ptr_variable_type(type): """ Determines whether or not the specified type is a pointer variable type. """ return type.isVar() and type.isPtr()
5aa2ee46fb5d232920034aa9d0616530728f834c
86,644
def address_to_raw(address): """Converts a string representation of a MAC address to bytes""" return bytes([int(n, 16) for n in address.split(":")][::-1])
f73728958be4b9748f4c9e1d6152639742e21e7c
86,647
def get_meld_set(leaf_node): """ Follows a path up to the root, and gets an array of melds """ arr = [] node = leaf_node while node: arr.append(node.meld) node = node.parent return arr
a4726fdda9670e674f65229c90aaeb5c35a8696a
86,649
def kinetic_energy(momenta, masses): """ Returns kinetic energy This is just a helper for the KE formula, because the formula is used frequently but not particularly recognizable or easy to read Args: momenta (Matrix[momentum, shape=(*,3)]): atomic momenta dim_masses (Vector[mass]): atomic masses Returns: Scalar[energy]: kinetic energy of these atoms """ return 0.5 * (momenta*momenta/masses[:,None]).sum()
3ecfbca88f9f1a7f547594b06ff4c6b1cbcc76d2
86,651
def removesuffix(s, suffix): """ Remove the suffix from a string, if it exists. Avoid needing Python 3.9 just for this. """ return s[:-len(suffix)] if s.endswith(suffix) else s
35e8a8d556101cac7009a26dfe1359dca8c7b803
86,660
def _filter_repeating_nonalnum(phrase, length): """ Check if a given phrase has non repeating alphanumeric chars of given length. Example: 'phrase $$$' with length=3 will return False """ if len(phrase) > 0: alnum_len = length for t in phrase: if not t.is_alpha: alnum_len -= 1 else: alnum_len = length if alnum_len == 0: return None return phrase
723507022fd66fba3df8d708df4740e9aee08920
86,661
def label_to_name(self, class_id): """ Retrieves the class name given a class id """ return self.id_to_class_info[class_id]['name']
07a6962d3ae3c8dc8bc657b4e01530a75d3a96a0
86,662
def attacks(attack_time, rest_time, cur_time): """ Return True if attack occurs, and False otherwise. :param attack_time: number of minutes dog attacks :param rest_time: number of minutes dog rests :param cur_time: time person arrives :return: True if person is attacked, False otherwise """ n = 0 upper = attack_time lower = 0 attack = True while cur_time > upper: upper = (n + 1) * attack_time + n * rest_time lower = n * (attack_time + rest_time) n += 1 if lower < cur_time <= upper: attack = True else: attack = False return attack
790222e46418a1e8ee5f7801825431f9485ea28c
86,666
import math def condense_list(big_list, condense_size=100): """ Condense a list into n elements by averaging. :param big_list: List :param condense_size: Int, the size of the new list to be returned :return: """ size = len(big_list) chunk_size = int(math.ceil(float(size)/condense_size)) # math.ceil only returns ints in python 3+ newlist = [] chunk_size = int(chunk_size) for n in range(0, size, chunk_size): newlist.append(sum(big_list[n:n + chunk_size]) / chunk_size) return newlist
f46c53eb4d4f657b47017ead3fca29edff233a61
86,669
import base64 def get_client_id_and_secret(authorization): """ Get the client ID and secret. Args: authorization (str): The authorization header. Returns: str: The client ID. str: The client secret. str: The error message. """ if authorization is None: return None, None, "Authorization header not found." if " " not in authorization: return None, None, "Authorization header format error." if authorization.split(" ")[0] != "Basic": return None, None, "Authorization header format error." try: client_id_and_secret = base64.b64decode(authorization.split(" ")[1]).decode( "utf-8" ) except Exception as e: return None, None, e.__str__() if ":" not in client_id_and_secret: return None, None, "Authorization header format error." client_id_and_secret_list = client_id_and_secret.split(":") if client_id_and_secret_list[1] == "": return client_id_and_secret_list[0], None, None return client_id_and_secret_list[0], client_id_and_secret_list[1], None
a7f68a85eeac3d684a803ace134e04dd06ecbe6d
86,672
import math def humanize_size(n): """Convert file size in bytes to a friendly human readable form. Args: n: int. Size in bytes. Returns: str. Human-friendly size. #### Examples ```python humanize_size(1024) ## '1 KB' ``` """ exp = 0 b = 1024 u = 'B' pre = [''] + [p + '' for p in 'KMGTPEZY'] r, f = min(int(math.log(max(n * b ** exp, 1), b)), len(pre) - 1), '{:,.%if} %s%s' h = (f % (abs(r % (-r - 1)), pre[r], u)).format(n * b ** exp / b ** float(r)) return h
081904fe5a54e6369e0d206ab7e8c3d7a912229f
86,673
import torch def select_device(device: str = "") -> torch.device: """Select device where model and image will be allocated on Args: device (str, optional): name of device (cpu, cuda, ..). Defaults to "". Returns: device (torch.device): selected device """ if not isinstance(device, str): device = device.type cpu = device.lower() == "cpu" cuda = not cpu and torch.cuda.is_available() return torch.device("cuda:0" if cuda else "cpu")
15f032370f88f64fa046336dc6c040b9dd6583f6
86,676
import json def decode(data, default=None): """ Load JSON data from a string Args: data: JSON data to decode. default: Value to return if decoding fails. Defaults to an empty dictionary """ if default is None: default = {} try: return json.loads(data) except ValueError: return default
9815f2f250437e950ec4d9a40bc77e488861ff8d
86,677
def _get_type_name(type_): # type: (type) -> str """Return a displayable name for the type. Args: type_: A class object. Returns: A string value describing the class name that can be used in a natural language sentence. """ name = repr(type_) if name.startswith("<"): name = getattr(type_, "__qualname__", getattr(type_, "__name__", "")) return name.rsplit(".", 1)[-1] or repr(type_)
57ed386d36edaf216bdeaccb368ff4d072cac792
86,678
def extract_transformations(events: list) -> list: """Extract the transformations from the json object 'events'""" return [e["type"] for e in events if e["type"] != "UNIMPORTANT"]
88d9f76244e91d2747543ec760a681e1698c191f
86,695
def get_querystring_for_page(request, page_number, querystring_key, default_number=1, prefix="?"): """ Return a querystring pointing to *page_number*. The querystring is prefixed by *prefix* (e.g.: "?page=2"). """ querydict = request.GET.copy() querydict[querystring_key] = page_number # for page number 1 there is no need for querystring if page_number == default_number: del querydict[querystring_key] if "querystring_key" in querydict: del querydict["querystring_key"] if querydict: return "%s%s" % (prefix, querydict.urlencode()) return ""
a6c6a415e233c488da2ec08f6d5a22d85ea02d69
86,700
def get_args_pos(fparams) -> int: """Position in params of function of varargs, >= 0 if it's present, else -1. fparams is inspect.signature(f).parameters for some function f. Doctests: >>> import inspect >>> def f(a, b, c, x=8, **kwargs): pass >>> get_args_pos(inspect.signature(f).parameters) -1 >>> def ff(a, b, *other_args, **kwargs): pass >>> get_args_pos(inspect.signature(ff).parameters) 2 >>> def fff(*args): pass >>> get_args_pos(inspect.signature(fff).parameters) 0 """ for i, name in enumerate(fparams): param = fparams[name] if param.kind == param.VAR_POSITIONAL: return i return -1
da534cfd22601ff199d2951c2e4f4ac84e70ea54
86,702
import math def f2(x): """ Computes and returns f2(x). """ return math.log(x + 2.2)
bcd9be4a3f68f8c69fdf815a68dad1e38c193522
86,705
def mocked_no_results(*args, **kwargs): """ This method returns an empty list. The same as if no results could be scraped from HTML. """ return []
ab61b3cbf7da3829b10dcd99791342d13fd69469
86,706
def sum_of_powers(numbers, power): """ Sums each number raised to power Ex: sum_of_powers([2, 3, 4], 2) = 2^2 + 3^2 + 4^2 = 29 """ return sum(pow(number, power) for number in numbers)
706f9a81eb83c4d9ba259480db8ca9907c17d5a9
86,713
def convert_to_abmag(value, name): """ Convert magnitude to AB magnitude Parameters ---------- value : float Value of the band name : str Name of the band as stated in the GSC column name. Options are: 2MASS: tmassJMag, tmassHMag, tmassKsMag SDSS: SDSSgMag, SDSSiMag, SDSSzMag GSC: JpgMag, FpgMag, IpgMag """ mag_constants = { 'tmassJMag': 0.90, 'tmassHMag': 1.37, 'tmassKsMag': 1.85, 'SDSSuMag': 0.0, 'SDSSgMag': 0.0, 'SDSSrMag': 0.0, 'SDSSiMag': 0.0, 'SDSSzMag': 0.0, 'JpgMag': -0.055, 'FpgMag': 0.24, 'NpgMag': 0.48, } abmag = value + mag_constants[name] return abmag
7783e9b8a2968e0be2da54e1781e9732f7292e9e
86,716
import re def sanitize(phrase): """Sanitizes words by removing punctuation""" return re.sub('[!-():?.]','',phrase)
79c20b88120144cc88fcd8934b9cc1277cb95e84
86,718
from typing import OrderedDict def kvp_convert(input_coll): """ Converts a list of string attributes and/or tuples into an OrderedDict. If passed in an OrderedDict, function is idempotent. Key/value pairs map to `first_tuple_element` -> `second_tuple_element` if a tuple, or `scalar_value` -> None if not a tuple. :param input_coll: An iterable with string and/or 2-tuple elements :returns: collections.OrderedDict """ if isinstance(input_coll, OrderedDict): return input_coll else: return OrderedDict( (l, None) if not isinstance(l, tuple) else (l[0], l[1]) for l in input_coll )
e4cce681ac3b7a772a89a5ec905ed9af3272ebcc
86,719
def oxygen_cost_v(v): """Effectively defines Daniels' pace-power relationship. AKA speed-to-vdot. Assumed to be the same for all runners. Args: v (float): velocity in m/min. Returns: float: oxygen cost to cover distance in mL/kg/min. """ a1 = 0.182258 a2 = 0.000104 c = -4.60 return a1 * v + a2 * v ** 2 + c
685ba9c4fdef898f312f2cfbd0aacb374b541ea1
86,720
import math def get_lat_len_to_metre_factors_at(lat): """Return the factors required to convert the length of a degree of latitude and longitude at the given latitude `lat` to meters from [understanding terms in length of degree formula](https://gis.stackexchange.com/questions/75528/understanding-terms-in-length-of-degree-formula/75535#75535) """ m1 = 111132.92 m2 = -559.82 m3 = 1.175 m4 = -0.0023 p1 = 111412.84 p2 = -93.5 p3 = 0.118 # Calculate the length of a degree of latitude and longitude in meters latlen = m1 \ + (m2 * math.cos(2 * lat)) \ + (m3 * math.cos(4 * lat)) \ + (m4 * math.cos(6 * lat)) longlen = (p1 * math.cos(lat)) \ + (p2 * math.cos(3 * lat)) \ + (p3 * math.cos(5 * lat)) # calculate correction for 14 cm error #print (latlen) #print ('deg lat per metre', 1/latlen) #print ('deg lat per cm', 0.01/latlen) #print ('deg lat for 12 cm', 0.12/latlen) #print (47.61227235282722 + 0.14/latlen) #exit(1) return (latlen,longlen)
4574d03fe99f3906bf51a3256c664c4d2bdda87a
86,721
def authenticate(func): """ Wrapper function for methods that require a logged in user """ def authenticate_and_call(self, *args, **kwargs): user = self.verify() if user is None: self.redirect('/signup') return else: return func(self, user, *args, **kwargs) return authenticate_and_call
03fbe9efb3944bb773fb9efb23856d8fcbfa1df3
86,722
def allowed_file(filename, extensions={'csv'}): """ Checks if a filename contains an allowable extension. Parameters ---------- filename : str The filename to check. extensions : set The set of allowable file extensions. Returns ------- allowed : bool True if allowable extension, False otherwise. """ return '.' in filename and \ filename.rsplit('.', 1)[1].lower() in extensions
3d69412b59e84659d38276d7fbb2e71822e54a48
86,723
def _getcomponent(self, component, clsname=None): """Get single component by name. Parameters ---------- component : str Component name. clsname : str or None Optional name of the class for component lookup. If `None` then instance level components are searched. """ components = self._getcomponents(clsname) try: return components[component] except KeyError: if not clsname: clsname = self.__class__.__name__ raise AttributeError(f"'{clsname}' does not have '{component}' component")
c5589e6db0235fcd30f5a8a76febe9ac4453a63e
86,725
def ends_paragraph(s): """Return True if s is a blank line.""" return not s.strip()
c3ad94d3a924373cd06922f1a632864acd3128fd
86,728
def get_skill_entry(name, skills_data) -> dict: """ Find a skill entry in the skills_data and returns it. """ for e in skills_data.get('skills', []): if e.get('name') == name: return e return {}
784c203e638ef798a7c811e5b5f783146902420e
86,730
import codecs def read_file(filename, encoding="UTF-8"): """ :param filename: string :param encoding: string: the encoding of the file to read (standart: `UTF-8`) :return: list of strings: the lines of the file """ f1 = codecs.open(filename, encoding=encoding) lines = f1.read() f1.close() return lines
81966b9633b7ac2548b092373bdd7cab92278aba
86,731
def map_to_unit_interval(x, lo=0., hi=1.): """ Linearly map value in [lo_val, hi_val] to [0, 1] """ return (x - lo) / (hi - lo)
7c7c3562ac8f92e2dfccb618026603d34e630458
86,737
def add_query_params(url, params): """ Takes a string url and a dictionary of parameters and generates a url. Arguments --------- url: (str) the base url params: (dict) the url parameters to add to it Examples -------- >>> add_query_params("api.revibe.tech", {"uid": 2}) "api.revibe.tech?uid=2 >>> add_query_params("api.revibe.tech", {"uid": 2, "cid": "49gb2"}) "api.revibe.tech?uid=2&cid=49gb2" """ if url[-1] != "?": url += "?" param_string = "&".join([f"{key}={value}" for key, value in params.items()]) return url + param_string
8e865cdbcdc28ba29df2164929b52a54510c5e64
86,738
def get_rat_name_folder(s): """Get rat name from folder""" names_part = ["Su", "Ca", "LR", "CS", "CR", "LS"] temp = s.split("/") for name in temp: for part in names_part: if part in name: return name
76fb117a507a97c8c4daafc750613a3762965dee
86,740
def is_sha1(instring): """Check if instring is sha1 hash.""" if instring.endswith("-f"): instring = instring[:-2] if len(instring) != 40: return False try: int(instring, 16) except ValueError: return False return True
0727a3115ea122af9acfa8121ce6f78da794f622
86,742
def list2map(listoffilenames, delimiter): """ convert a list to a map :param listoffilenames: list of filenames :param delimiter: common separator used in filenames :return: map/dictionary of list with key of filename before delimiter and value of complete filename """ return dict(map(lambda x: [x.split(delimiter)[0], x], listoffilenames))
c68440d5a70e5cd6d2ba1d19fd0674826d32e87e
86,743
import re def str_list_to_tuple_str_series(col, regex_pattern='[A-Z]\d+'): """ Convert string of lists into tuples of strings, for each row in column. regex_pattern determines string tokens. """ if not isinstance(col[0], str): print("error: str expected, instead {} found.".format(type(col[0]))) return col else: p = re.compile(regex_pattern) return col.apply(p.findall).apply(tuple)
0c29b31614f037675733d6018d9b666fff195200
86,745
import requests import html def fetch(url: str): """Get the HTML content of the given URL.""" r = requests.get(url) return html.unescape(r.text)
11fc4b4bd3d9de47ce341588f7bbdc453a6aa008
86,752
import torch def get_context_vector(encoded_sents, target, future=False, use_cuda=False): """ Get create the context vector for the sentence given at index target for state classification. Do this by max pooling the sentences before the target sentence Args: encoded_sents (Tensor, [num_sents, batch, encoder dim]) target (int) : Index of the target sentence (starts at 0) future (bool) : If true, use the vectors from the future instead of the past Ret: context vector (Tensor [batch, encoder dim]) """ if target == 0 and not future: #return encoded_target[0, :, :] return torch.zeros(encoded_sents[0,:,:].shape).cuda() if use_cuda else torch.zeros(encoded_sents[0,:,:].shape) elif target == encoded_sents.shape[0]-1 and future: #return encoded_target[encoded_sents.shape[0]-1, :, :] return torch.zeros(encoded_sents[0,:,:].shape).cuda() if use_cuda else torch.zeros(encoded_sents[0,:,:].shape) if not future: sents = encoded_sents[:target, :, :] #[sents, batch, encoder dim] else: sents = encoded_sents[target+1:, :, :] #[sents, batch, encoder dim] # maxpool, _ = torch.max(sents, dim=0) #[batch, encoder dim] maxpool = torch.mean(sents, dim=0) #[batch, encoder dim] return maxpool
1d13c19d673f862aeee39b320442368f619e78f1
86,755
def watch_url(video_id): """Construct a sanitized YouTube watch url, given a video id. :param str video_id: A YouTube video identifier. :rtype: str :returns: Sanitized YouTube watch url. """ return 'https://youtube.com/watch?v=' + video_id
18d883369f113f2084d44fc13f5ae18d49cb8dd3
86,756
def get_fno(obj): """ Try to get the best fileno of a obj: * If the obj is a integer, it return that integer. * If the obj has a fileno method, it return that function call. """ if obj is None: return None elif isinstance(obj, int): return obj elif hasattr(obj, "fileno") and callable(getattr(obj, "fileno")): return obj.fileno() raise TypeError("Expected None, int or fileobject with fileno method")
18c62c29604cbcf95cece22d3f287d454422b334
86,760
def count(fasta): """Counts sequences in an open FASTA file handle. Iterates file and counts header lines. Then, seeks to start of the file and returns the count. Parameters: fasta (file pointer): An open file handle corresponding to a FASTA file. Returns: count (int): Total number of sequences in the file. """ count = 0 for line in fasta: if line.startswith(">"): count += 1 fasta.seek(0) return count
a715a2594ec8e57b7fefb11b66cce71ee68480b0
86,762
def is_control_char(c): """Return True if 'c' is a control character. c is considered a control character if it is outside of the extended ASCII set or has a code below 32 with some exclusions. An ASCII compatible character set is assumed. """ charcode = 0 # The following assignment # should make this module compatible with # at least Python 2.7 (tested on 2.7.9). try: charcode = ord(c) except TypeError: charcode = c excludes = ("\t", "\r", "\n") if charcode in [ord(char) for char in excludes]: return False return (charcode < 32 or charcode > 255)
aedd5edad7e54d6eccee25f81332bd3ad17108c5
86,766
def millions(x, pos=0): """Expects value and tick position returns string formatted for thousands and millions """ #length of float value within thousands boundry if len(str(x)) <=8 : return '%1.1fK' % (x*1e-3) #length of float value within millions boundry return '%1.1fM' % (x*1e-6)
899af224b604e84575fafa89f2511376d0943791
86,767
import hashlib import json def get_filter_hash(filter_dict): """ Given a dictionary for a fixed set of filter values, generates a filter hash see also: https://www.doc.ic.ac.uk/~nuric/coding/how-to-hash-a-dictionary-in-python.html """ filter_hash = hashlib.md5() filter_bytes = json.dumps(filter_dict, sort_keys=True).encode() filter_hash.update(filter_bytes) return filter_hash.hexdigest()
da528fbee09c0d623e1977c93f494cb3e71122f4
86,768
def is_tree(item): """ Check if an item is a tree """ return item == "#"
e525f361d146d256a6b4d5842350866fb6899c71
86,769
def build_axlabel(base, units): """ Generate axis label Auxilliary function to generate an axis label from the specified name and units: 'Base [units]' Parameters ___________ base: str Axis name units: str Axis units Returns ________ label: Complete axis label """ label = base if units: label = label + ' [' + units + ']' return label
aee5060d2eecab7f0545bc5cd154ecb107094012
86,771
import copy def swap(s, i, j): """ Swaps the positions of the given atoms. s: structure_class i: int The index of the first atom. j: int The index of the second atom. Returns ------- s: structure_class """ tmpPos = copy.copy(s.structure[j].position) s.structure[j].position = copy.copy(s.structure[i].position) s.structure[i].position = copy.copy(tmpPos) return s
a5e16a48b92470fd0d462be17bee4eb6bc920c58
86,774
def string_ellipse(string, maxlen): """Clamp the string to be no longer than the maximum length. If the string is too long, we write it as "... []" where "[]" is the final part of the string. :param string: The input string. :param maxlen: The maximum length of the string. """ if len(string) <= maxlen: return string if maxlen <= 4: raise ValueError("maxlen too small") return "... " + string[4-maxlen:]
56ebe5aa7a54e3f5e9e4a49632768e6906aeade2
86,776
def jinja_filter_bbox_overlaps(bounds, geometry_col_name, srid=3857): """ Check whether the boundary of the geometry intersects with the bounding box. Note that the usual meaning of "overlaps" in GIS terminology is that the boundaries of the box and polygon intersect, but not the interiors. This means that if the box or polygon is completely within the other, then st_overlaps will be false. However, that's not what we want. This is used for boundary testing, and while we don't want to pull out a whole country boundary if the bounding box is fully within it, we _do_ want to if the country boundary is within the bounding box. Therefore, this test has an extra "or st_contains" test to also pull in any boundaries which are completely within the bounding box. """ min_point = 'ST_MakePoint(%.12f, %.12f)' % (bounds[0], bounds[1]) max_point = 'ST_MakePoint(%.12f, %.12f)' % (bounds[2], bounds[3]) bbox_no_srid = 'ST_MakeBox2D(%s, %s)' % (min_point, max_point) bbox = 'ST_SetSrid(%s, %d)' % (bbox_no_srid, srid) bbox_filter = \ '((%(col)s && %(bbox)s) AND (' \ ' st_overlaps(%(col)s, %(bbox)s) OR' \ ' st_contains(%(bbox)s, %(col)s)' \ '))' \ % dict(col=geometry_col_name, bbox=bbox) return bbox_filter
2e509e0ba2bf75f3d00df837b884aabdd28699a0
86,777
def dms2dd(degrees, minutes, seconds): """ Utility function to convert DMS (degrees, minutes, seconds) to decimal value :param degrees: degrees value :param minutes: minutes value :param seconds: seconds value :return: decimal (float) value """ if degrees >= 0.0: return degrees + (minutes / 60.0) + (seconds / 3600.0) else: return degrees - (minutes / 60.0) - (seconds / 3600.0)
5ddb31695f6281f1023040f4949906e8b4fa279a
86,780
def filesizeformat(bytes): """ Formats the value like a 'human-readable' file size (i.e. 13 KB, 4.1 MB, 102 bytes, etc). """ try: bytes = float(bytes) except TypeError: return u"0 bytes" if bytes < 1024: return "%(size)d bytes" % {'size': bytes} if bytes < 1024 * 1024: return "%.1f KB" % (bytes / 1024) if bytes < 1024 * 1024 * 1024: return "%.1f MB" % (bytes / (1024 * 1024)) return "%.1f GB" % (bytes / (1024 * 1024 * 1024))
5af90b23c98c095a7065becc27000173c1b08708
86,791
import pickle def get_stop_words(k = 200): """get stop words specific to the corpus. :param k: the threshold for counting as stop word. :return: a stop word set. """ with open('./data/stop_words_candidates.pkl', 'rb') as f: freq = pickle.load(f) return set(k for k, v in freq[:k])
ff1bb74526977de1a4954b25e80ab7ccef57daad
86,793
from typing import Dict from typing import List def get_taskmodel_and_task_names(task_to_taskmodel_map: Dict[str, str]) -> Dict[str, List[str]]: """Get mapping from task model name to the list of task names associated with that task model. Args: task_to_taskmodel_map (Dict[str, str]): map from task name to task model name. Returns: Dict[str, List[str]] map of task model names to lists of task names using that task model. """ taskmodel_and_task_names = {} for task_name, taskmodel_name in task_to_taskmodel_map.items(): if taskmodel_name not in taskmodel_and_task_names: taskmodel_and_task_names[taskmodel_name] = [] taskmodel_and_task_names[taskmodel_name].append(task_name) return taskmodel_and_task_names
3983a723a2659ef5a3c428b84b0faab9d17b0140
86,797
import time def get_ticks() -> int: """get the time in nanoseconds Return the number of nanoseconds since get_ticks() was first called. Returns ------- Time since get_ticks() was first called in nanoseconds. """ return time.perf_counter_ns()
dd0e2578e2d0dd7994d2ae2f4645f74b6ed0986b
86,798
import torch def euclidean_reward(target_mel, pred_mel, reward_min=None, reward_max=None, verbose=0): """Negative Euclidean norm of the difference between the two flattened inputs. If reward_min or reward_max values are given, it caps the reward at appropriate levels. """ target_mel = torch.flatten(target_mel) pred_mel = torch.flatten(pred_mel) diff = target_mel-pred_mel reward = -torch.norm(diff, p=2).view(1,1) if reward_min: reward = torch.max(torch.Tensor([reward, reward_min])).view(1,1) if reward_max: reward = torch.min(torch.Tensor([reward, reward_max])).view(1,1) if verbose > 1: print("REWARDS: Euclidean reward: {:.6f}".format(reward.item())) return reward
f01a11a8c2fa51f09dc5529dfc605b93a0021b98
86,799
def determineIndentationAndTrailingWS(text): """Get the indentation used in this document and whether there is any trailing whitespace. The text is analyzed to find the most used indentations. The result is -1 if tab indents are most common. A positive result means spaces are used; the amount signifies the amount of spaces per indentation. 0 is returned if the indentation could not be determined. The second return value is the number of lines with trailing ws. """ text = text[:32768] # Limit search for large files # create dictionary of indents, -1 means a tab indents = {} indents[-1] = 0 trailing = 0 lines = text.splitlines() lines.insert(0, "") # so the lines start at 1 for i in range(len(lines)): line = lines[i] # remove indentation lineA = line.lstrip() lineB = line.rstrip() lineC = lineA.rstrip() indent = len(line) - len(lineA) if len(lineB) < len(line): trailing += 1 line = lineC if line.startswith("#"): continue else: # remove everything after the # line = line.split("#", 1)[0].rstrip() if not line: # continue of no line left continue # a colon means there will be an indent # check the next line (or the one thereafter) # and calculate the indentation difference with THIS line. if line.endswith(":"): if len(lines) > i + 2: line2 = lines[i + 1] tmp = line2.lstrip() if not tmp: line2 = lines[i + 2] tmp = line2.lstrip() if tmp: ind2 = len(line2) - len(tmp) ind3 = ind2 - indent if line2.startswith("\t"): indents[-1] += 1 elif ind3 > 0: if ind3 not in indents: indents[ind3] = 1 indents[ind3] += 1 # find which was the most common tab width. indent, maxvotes = 0, 0 for nspaces in indents: if indents[nspaces] > maxvotes: indent, maxvotes = nspaces, indents[nspaces] # print "found tabwidth %i" % indent return indent, trailing
2ee55507693211ad474a61767536a42fc0518128
86,801
def amcheck(lst_p, pattern, subset=True): """ Anti-monotonicity check. Checks if a GP is a subset or superset of an already existing GP :param lst_p: list of existing GPs :param pattern: GP to be checked :param subset: check if it is a subset :return: True if superset/subset, False otherwise """ result = False if subset: for pat in lst_p: result1 = set(pattern.get_pattern()).issubset(set(pat.get_pattern())) result2 = set(pattern.inv_pattern()).issubset(set(pat.get_pattern())) if result1 or result2: result = True break else: for pat in lst_p: result1 = set(pattern.get_pattern()).issuperset(set(pat.get_pattern())) result2 = set(pattern.inv_pattern()).issuperset(set(pat.get_pattern())) if result1 or result2: result = True break return result
3b12b864765d9fd16ed8513a91d6c6587babda5f
86,803
import types from typing import List from typing import Tuple import linecache def _get_tb_lines(tb: types.TracebackType) -> List[Tuple[str, int, str]]: """Get the filename and line number and line contents of all the lines in the traceback with the root at the top. """ res = [] opt_tb = tb while opt_tb is not None: lineno = opt_tb.tb_frame.f_lineno filename = opt_tb.tb_frame.f_code.co_filename line = linecache.getline(filename, lineno).strip() res.append((filename, lineno, line)) opt_tb = opt_tb.tb_next res.reverse() return res
0ab40df31e39c5216e684d49ee94ad0638a72cd1
86,814
def contains (graph, bag, bags): """Indicates whether `bag` is (eventually) in `bags` (or its bags).""" return bag in bags or any(contains(graph, bag, graph[b]) for b in bags)
8b7d71a1f425190d9d26080a8057cfd1bf970c26
86,819
import textwrap def format_title(title): """Format clipboard text for display in history view list. :param title: Title to format. :type title: str :return: Formatted text. :rtype: str """ modified = textwrap.dedent(title) return modified.replace('\t', ' ')
4a5ef9eab95bc472132112c7e5431d238cd7a5b6
86,822
import pathlib import tempfile def get_tempdir() -> pathlib.Path: """Get the temporary directory to be used by genshin.py.""" tempdir = pathlib.Path(tempfile.gettempdir()) directory = tempdir / "genshin.py" directory.mkdir(exist_ok=True, parents=True) return directory
d4698586cbd9bbe452a99de87a5361b6691d9b3b
86,824
def empty_form_data(formset, index=None): """Return a form data dictionary for a "new" form in a formset. Given a formset and an index, return a copy of the empty form data. If index is not provided, the index of the *first* empty form will be used as the new index. """ index = str(index or len(formset)) result = {} for field in formset.empty_form.fields: result[ formset.empty_form .add_prefix(field) .replace('__prefix__', index) ] = formset.empty_form[field].value() # add initial data, if needed if formset.empty_form.fields[field].show_hidden_initial: result[ formset.empty_form .add_initial_prefix(field) .replace('__prefix__', index) ] = formset.empty_form[field].value() return result
8e6c26523ce477179d89a84c3e5d77046b374b65
86,825
def _table_sort_by(table, sort_exprs): """ Sort table by the indicated column expressions and sort orders (ascending/descending) Parameters ---------- sort_exprs : sorting expressions Must be one of: - Column name or expression - Sort key, e.g. desc(col) - (column name, True (ascending) / False (descending)) Examples -------- >>> import ibis >>> t = ibis.table([('a', 'int64'), ('b', 'string')]) >>> ab_sorted = t.sort_by([('a', True), ('b', False)]) Returns ------- sorted : TableExpr """ result = table.op().sort_by(table, sort_exprs) return result.to_expr()
4fbe1a81e6312389785f2fa78d0876c3056a5a16
86,833
def default_record(table, ignore_fields=None): """Return a dict represent a record from the table with all fields set to their default values. Args: table: gluon.dal.Table instance ignore_fields: list of strings, or string. Field names to exclude from the returned result. None: Return all field names of the table. list: List of field names string: 'common' => ['id', 'created_on', 'updated_on'] Returns: dict """ record = {} if ignore_fields is not None: if ignore_fields == 'common': ignore_fields = ['id', 'created_on', 'updated_on'] for f in table.fields: if ignore_fields is None or f not in ignore_fields: record[f] = table[f].default return record
9cbe6f3c973f94ef2b16cba2a28d5b72aabd7974
86,844
def linear_scale(array, in_range=(0, 1), out_range=(0, 255)): """ Translate from range in_range to out_range Inputs: in_range: The starting range [default: (0, 1)] out_range: The output range [default: (0, 255)] Outputs: new_array: A translated array """ scale_factor = (out_range[1] - out_range[0]) / (in_range[1] - in_range[0]) return scale_factor * (array - in_range[0]) + out_range[0]
db02559061c86cdc03906b348ab5c19314a2f873
86,848
import fcntl def unlock_file(fp, close=False): """ Unlock a file pointer. If close=True the file pointer will be closed after unlocking. Always returns True. """ try: fcntl.flock(fp, fcntl.LOCK_UN) finally: if close: fp.close() return True
312719e9f97b0b0242cba35fe6f05601e3d92bc1
86,851
def ngram_slices(i, n, l): """ Given index i, n-gram width n and array length l, returns slices for all n-grams containing an ith element """ out = [] a = i - n + 1 if a < 0: a = 0 b = i + 1 if b + n > l: b = l - n + 1 d = b - a for k in range(d): start = a + k stop = start + n out.append(slice(start, stop)) return out
489d6b36a960f55a03aa8e812d60f4f9add4d7d1
86,855
import yaml def read_manifest(manifest_file: str) -> dict: """ Read the yaml manifest and return parsed dict content. :param manifest_file: str :return: dict """ with open(manifest_file, 'r') as f: yaml_data = yaml.load(f, Loader=yaml.FullLoader) return yaml_data
149a6fa4f7348bf800c104abf1dd3d7875cfb638
86,864
def is_field(field, value=None): """If value is not None, returns True if field name matches, otherwise returns field name""" f = field.field.__class__.__name__.lower() if value is not None: return True if f == value else False # value is None return f
71a060c2086ebfcd679a445721975777d388ff4a
86,865
def create_filenames(msg_id, base_dir): """Return the response and request filenames and associated paths""" request_filename = msg_id + '.request' request_file_path = base_dir + '/' + request_filename response_filename = msg_id + '.response' response_file_path = base_dir + '/' + response_filename return [request_filename, request_file_path, response_filename, response_file_path]
2af6b4261ff0e6e528e4fe72b35c86cba2870aa3
86,866
def bytes_to_int(src): """ Reinterpret byte array as a big-endian integer. Arguments --------- src : bytes Byte array representation of a number. Returns ------- int The integer value of that number. """ ival = 0 for bval in src: ival <<= 8 ival |= bval return ival
b5583e03c67d4fe467f29d46cc5ed1114d526a52
86,867
def merge_list(list_x, list_y): """ Merge two list and remove the duplicated items """ merged_list = [] for item in list_x: if item in merged_list: continue merged_list.append(item) for item in list_y: if item in merged_list: continue merged_list.append(item) return merged_list
d5c031c5849c2daa03cf1443fb6ca1efe05c567f
86,871
def get_last_version(documents): """ Helper function to get the last version of the list of documents provided. :param documents: List of documents to check. :type documents: [cla.models.model_interfaces.Document] :return: 2-item tuple containing (major, minor) version number. :rtype: tuple """ last_major = 0 # 0 will be returned if no document was found. last_minor = -1 # -1 will be returned if no document was found. for document in documents: current_major = document.get_document_major_version() current_minor = document.get_document_minor_version() if current_major > last_major: last_major = current_major last_minor = current_minor continue if current_major == last_major and current_minor > last_minor: last_minor = current_minor return last_major, last_minor
aef0e8484e9d54f5608a8052f99891b663bd5374
86,872
def timedelta_to_str(aTimedelta): """ a conversion function for time deltas to string in the form DD:HH:MM:SS """ days = aTimedelta.days temp_seconds = aTimedelta.seconds hours = temp_seconds / 3600 minutes = (temp_seconds - hours * 3600) / 60 seconds = temp_seconds - hours * 3600 - minutes * 60 return '%d:%d:%d:%d' % (days, hours, minutes, seconds)
9aef036edaf295f0f120de92028ce219b3e449e0
86,876
def df_describe(df, percentiles=None, include=None, exclude=None): """Describe a pandas DataFrame""" return df.describe(percentiles=percentiles, include=include, exclude=exclude)
0519d9f8332be13d42755febcbc10a128dd74dfb
86,879
import inspect def signature(obj): """ Safely return function Signature object (PEP 362). inspect.signature was introduced in 3.3, however backports are available. In Python 3.3, it does not support all types of callables, and should not be relied upon. Python 3.4 works correctly. Any exception calling inspect.signature is ignored and None is returned. @param obj: Function to inspect @type obj: callable @rtype: inpect.Signature or None """ try: return inspect.signature(obj) except (AttributeError, ValueError): return None
38fb03c7968803e41b4e8f1b19d454dfac3767c6
86,881
def make_flagsandlength(length: int) -> list: """ Converts a length value in a Flags and Length list with two bytes in the correct order. :param length: the length to convert. should be 12-bit value :return: the list with the two bytes """ return [(0x7 << 4) + ((length & 0xF00) >> 8), length & 0xFF]
4a60a13da6564ca4a4bd72a8590b638247b8d3c3
86,882
def _find_used(activity, predicate): """Finds a particular used resource in an activity that matches a predicate.""" for resource in activity['used']: if predicate(resource): return resource return None
9d0465b60a036a0cd11ecb424aa3c0d869d1e28e
86,883
def pagination_response_items(pagination, route, items): """Returns a pagination response for the given items""" response = {} response['has_next'] = pagination.has_next response['has_prev'] = pagination.has_prev response['items'] = items response['next_url'] = (route + "?page={}".format(pagination.next_num) if pagination.has_next else None) response['prev_url'] = (route + "?page={}".format(pagination.prev_num) if pagination.has_prev else None) response['total'] = pagination.total response['pages'] = pagination.pages return response
09e34929efff865007e15b11d2c2df945cabdaaf
86,884
def character_tokenizer(sequence): """ Splits sequence into a list of characters. """ if sequence: return [*sequence]
8ab4b94d500c8bf81e1e5531c2caa0db1ceb6acc
86,898
import pathlib def read_fasta(filepath): """ Read the fasta file Assumes a FASTA file formatted:: >some sequence name AAAAAAA... AAAAAAA... AAAA .. note:: If more than one sequnece is present, only the first is returned :param filepath: The FASTA formatted text file containing a single sequence :returns: The sequence name, the sequence """ filepath = pathlib.Path(filepath) header = None seq = [] with filepath.open('rt') as fp: for line in fp: line = line.strip() if line == '': continue if line.startswith('>'): if header is None: header = line[1:] else: # Found a second sequence break else: seq.append(line) return header, ''.join(seq).upper()
97c45e9e61b2759b43dda66cde406c2cf0047b1a
86,900
def tuplify(df, name): """Pads a heirarchical column name with enough empty strings to be used as a column specification for a heirarchical DataFrame. For example: tuplify(df, ('roi_atoms', 'Fz')) = ('roi_atoms', 'Fz', '', '') """ m = df.columns.nlevels if type(name) != tuple: name = (name,) while len(name) < m: name += ('',) return name
ed9e43732c4c61ce2a8a0c414db0e9fd740383e6
86,901
def chunkarray(array: list, size: int): """Return a list of specified sized lists from a list""" return [array[i:i + size] for i in range(0, len(array), size)]
36a747a881de7d911565a917581143468cf79b18
86,903
def _prepare_body_update_trunk(prop_diff): """Prepare body for PUT /v2.0/trunks/TRUNK_ID.""" return {'trunk': prop_diff}
edf2d71373bdbc4503402f75cade89ba717f473a
86,905
def _get_fieldspec(dtype): """ Produce a list of name/dtype pairs corresponding to the dtype fields Similar to dtype.descr, but the second item of each tuple is a dtype, not a string. As a result, this handles subarray dtypes Can be passed to the dtype constructor to reconstruct the dtype, noting that this (deliberately) discards field offsets. Examples -------- >>> dt = np.dtype([(('a', 'A'), np.int64), ('b', np.double, 3)]) >>> dt.descr [(('a', 'A'), '<i8'), ('b', '<f8', (3,))] >>> _get_fieldspec(dt) [(('a', 'A'), dtype('int64')), ('b', dtype(('<f8', (3,))))] """ if dtype.names is None: # .descr returns a nameless field, so we should too return [('', dtype)] else: fields = ((name, dtype.fields[name]) for name in dtype.names) # keep any titles, if present return [ (name if len(f) == 2 else (f[2], name), f[0]) for name, f in fields ]
6251d786f1a949959ae71cc001b180a8872f2972
86,906
def aggregate_permuted_network(observed_networks): """This method handles the case where multiple observed networks are generated (e.g. from models produced by different random seed initializations of an unsupervised feature construction algorithm). We handle analysis of multiple networks by aggregating them; likewise, we require that the permutation test generates N aggregate permuted networks. Parameters ----------- observed_networks : list(CoNetwork) the list of observed networks, generated from models produced by different random seed initializations of the same unsupervised feature construction algorithm Returns ----------- CoNetwork, the aggregate permuted network created by generating a permutation for each individual observed network and then aggregating them into a single network """ aggregate_permuted_network = None for pathcore_network in observed_networks: permuted_network = pathcore_network.permute_pathways_across_features() if not aggregate_permuted_network: aggregate_permuted_network = permuted_network else: aggregate_permuted_network.aggregate(permuted_network) return aggregate_permuted_network
d56ebab767a32942a9d292277dbea591f00eee27
86,907
def get_labels(path): """ Get labels from a text file. :param path: path to text file :return: list of labels """ with open(path, encoding='utf-8', errors='ignore') as f: labels = [line.strip() for line in f.readlines()] f.close() return labels
7c43d679be2f4ff415b03141efc3c25ef708fa84
86,908
def remove_duplicates(text, separator): """ Removes possible duplicates from a separator-separated list. """ entries = text.split(separator) cleaned_entries = list(set(entries)) text = separator.join(cleaned_entries) return text
75456765b97f20e41646bbb7b97dee195ef404bd
86,911
def _descuento(cantidad: int, total: float) -> float: """Calcula el descuento a aplicar. Si la cantidad de helados es mayor a 7, el descuento es del 10% de lo que se compro; de lo contrario no se aplica descuento. :param cantidad: Cantidad de helados :type cantidad: int :param total: Total de la compra :type total: float :return: Descuento a aplicar :rtype: float """ if cantidad >= 7: return total * 0.1 return 0
e9538a4c87a4d8ddab4ceeac8b6c9f4c724abdc8
86,913
def evaluateCondition(instructionLine, dict): """ Evaluate the condition for a line of instruction """ register,sign,num = [el for el in instructionLine.split()][-3:] return eval(f'dict["{register}"] {sign} {num}')
c6648e892fc9a03e2164a148f7c7eb15af2347c0
86,916
from typing import Iterator from typing import Mapping import json import time def json_failure_handler(results: Iterator[Mapping]) -> str: """A failure handler which returns results in a JSON-formatted response. Args: results: The results of all checks which were executed for a checker. Each result dictionary is guaranteed to have the keys: 'check', 'message', 'passed', 'timestamp'. Returns: The checker response, formatted as JSON. """ return json.dumps({ 'status': 'failure', 'timestamp': time.time(), 'results': results, })
13d75e5615c39737abb5bcd7dcc6f2514452ab9e
86,918
from typing import Optional def time_str_to_ms(time: str) -> Optional[int]: """ Convert time string in 00:00.000 format to milliseconds. If it fail return None """ try: minutes, second_and_ms = time.split(":") second, ms = second_and_ms.split(".") total_ms = int(minutes) * 60_000 total_ms += int(second) * 1000 total_ms += int(ms) except ValueError: return None return total_ms
dcec96b7c7ff9dae7c766743fd6fff05eaef6e88
86,923