content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
import hashlib def hash_string(string_to_hash: str) -> str: """ Returns the sha256 hexdigest of string_to_hash. :param string_to_hash: Input to the sha256 hash function :type string_to_hash: str :return: Hexdigest of string_to_hash. :rtype: str """ return hashlib.sha256(string_to_hash.encode("utf-8")).hexdigest()
318f58168f216e2573f7833634107679b44962df
395,308
def get_all_parents(go_objs): """Return a set containing all GO Term parents of multiple GOTerm objects.""" go_parents = set() for go_obj in go_objs: go_parents |= go_obj.get_all_parents() return go_parents
889ade2998510a97ae29edb7a3572a8fbe0055e9
196,352
def find_all_0_128(files): """ Finds all the files that end with _0- :param in_path: :return: list of tuples to files [(path,filename)] """ match = [] for i in files: if "_0-" in i[1]: match.append(i) return match
7cba218df04613ae8d3d07fd3984d6fbc5c866c4
505,229
import math def isclose(a, b): """ Checks approximate equality of floating-point numbers a, b. We add 1 to the numbers, since if a ~= b ~= 0, then relative tolerance will be zero. """ return math.isclose(a + 1, b + 1)
3e3443a87b223dcf077b219f9308784b8c03f87f
182,665
def remove_whitespace(text): # type: (str) -> str """strips all white-space from a string""" if text is None: return "" return "".join(text.split())
747538de63b11e49d498b2f4ccb8286975019ec8
699,049
from typing import List from typing import Dict def remap_list(target_list: List[int], mapping: Dict[int, int]) -> List[int]: """ Take a list of atom indices and remap them using the given mapping. """ return [mapping[x] for x in target_list]
8672f531490fbee6442eaca72b53df10c8844a14
656,611
def parse_tva_id_Descriptor(data,i,length,end): """\ parse_tva_id_Descriptor(data,i,length,end) -> dict(parsed descriptor elements). Parses a descriptor that carries one or more TVA IDs along with associated state for each. The returned dict is: { "type" : "tva_ids", "ids" : [ { "id" : number, "running_status", number }, { "id" : number, "running_status", number }, ... ] } (Defined in ETSI TS 102 323 specification) """ ids = [] n=0 while n < length: id = (ord(data[i+2+n]) << 8) + ord(data[i+3+n]) rs = ord(data[i+4+n]) & 0x7 ids.append({"id":id, "running_status":rs}) n=n+3 return { "type":"tva_ids", "ids" : ids }
39c62b0430c6dfb6a37a66dda61fb94bdee73ebd
145,400
def make_feature_dict( feature_sequence ): """A feature dict is a convenient way to organize a sequence of Feature object (which you have got, e.g., from parse_GFF). The function returns a dict with all the feature types as keys. Each value of this dict is again a dict, now of feature names. The values of this dict is a list of feature. An example makes this clear. Let's say you load the C. elegans GTF file from Ensemble and make a feature dict: >>> worm_features_dict = HTSeq.make_feature_dict( HTSeq.parse_GFF( ... "test_data/Caenorhabditis_elegans.WS200.55.gtf.gz" ) ) (This command may take a few minutes to deal with the 430,000 features in the GTF file. Note that you may need a lot of RAM if you have millions of features.) Then, you can simply access, say, exon 0 of gene "F08E10.4" as follows: >>> worm_features_dict[ 'exon' ][ 'F08E10.4' ][ 0 ] <GenomicFeature: exon 'F08E10.4' at V: 17479353 -> 17479001 (strand '-')> """ res = {} for f in feature_sequence: if f.type not in res: res[ f.type ] = {} res_ftype = res[ f.type ] if f.name not in res_ftype: res_ftype[ f.name ] = [ f ] else: res_ftype[ f.name ].append( f ) return res
d8a89ab51625f7ea2b71be1f078eb1d610031cbb
448,718
def standardize_punctuation(text: str) -> str: """Standardizes punctuation in the text. Args: text: Input text. Returns: Punctuation-standardized text. """ text = text.replace("’", "'") text = text.replace("`", "'") text = text.replace("“", '"') text = text.replace("”", '"') return text
2bd142fe27e9846b5b4c3840d96b55bf4c83b4f9
572,637
def isShowcasable(pixels, duration, tomodify="duration"): """ Calculate the speed, the mouse would need to reach and return True if it's considered feasible or a string precising what's wrong """ MINSPEED = 40.0 MAXSPEED = 2000.0 speed = pixels / duration if ( MINSPEED < speed < MAXSPEED or pixels == 0 ): # pixels = 0 means the capture is made without any computed move return True elif speed < MINSPEED: if tomodify == "duration": change = round(pixels / (MINSPEED + 1)) else: change = duration * MINSPEED return f"Mouse move would be too slow: try {tomodify} : {change}" else: if tomodify == "duration": change = round(pixels / (MAXSPEED - 1)) else: change = duration * MAXSPEED return f"Mouse move would be too fast: try {tomodify} : {change}"
9a33a8d59fe2245de7768d20d296c0258a92ebf3
458,314
def get_resource_for_path(path, root, codebase): """ Return a resource in ``codebase`` that has a ``path`` relative to the ``root` Resource For example, say we start from this: path: this/is/that therefore segments [this, is, that] root: /usr/foo We would have these iterations: iteration1 root = /usr/foo segments = [this, is, that] seg this segments = [is, that] children = [/usr/foo/this] root = /usr/foo/this iteration2 root = /usr/foo/this segments = [is, that] seg is segments = [that] children = [/usr/foo/this/is] root = /usr/foo/this/is iteration3 root = /usr/foo/this/is segments = [that] seg that segments = [] children = [/usr/foo/this/is/that] root = /usr/foo/this/is/that finally return root as /usr/foo/this/is/that """ segments = path.strip('/').split('/') while segments: seg = segments.pop(0) children = [c for c in root.children(codebase) if c.name == seg] if len(children) != 1: return else: root = children[0] return root
5e6c93a9193323938594e08a622d769edcc73c0a
308,586
from pathlib import Path def get_output_filename(output_folder: str, repository_type: str, repository_name: str, filename: str) -> Path: """Returns the output filename for the file fetched from a repository.""" return ( Path(output_folder) / Path(repository_type.lower()) / Path(Path(repository_name).name) / Path(Path(filename).name) )
23b806f98265b45b799dbcc177760d5ceb8248fb
708,404
import torch def neigh_square(position_diff: torch.Tensor, sigma: torch.Tensor) -> torch.Tensor: """ Square-shaped neighborhood scaling function based on center-wise diff `position_diff` and radius `sigma`. Parameters ---------- position_diff : torch.Tensor The positional difference around some center. sigma : torch.Tensor The scaling radius. """ v = (position_diff).abs().max(-1)[0] # v = pnorm(position_diff, p=2) return torch.exp(torch.neg(v.sqrt() / sigma))
abebe4bbcbb348d45b396867b833fcb0714cb0ca
646,578
import math def _angle_between_vectors(vector_1, vector_2): """ Calculates the angle in between the 2 rank vectors. Uses dot product to calculate the cosine of the angle, then math.acos to convert into angle. Args: vector_1, vector_2: Type-Dict The Angle is calculated in between these 2 vectors. The keys of the vector are the axis of the vector, and values are the length in that axis. Returns: The angle in between the 2 vectors in degree. """ cross_product = 0 magnitude1 = 0 magnitude2 = 0 for x in vector_1.keys(): cross_product = cross_product + vector_1[x] * vector_2[x] magnitude1 = magnitude1 + vector_1[x] * vector_1[x] magnitude2 = magnitude2 + vector_2[x] * vector_2[x] cosine_angle = cross_product / (math.sqrt(magnitude1 * magnitude2)) angle = math.acos(cosine_angle) # angle in degrees angle = angle * 180 / math.pi return angle
957eb4a4748384e5962c7703f9d6af11c5d3c32e
506,221
from typing import List from typing import Dict def get_instances_from_ip(ip: str, instance_descriptions: List[Dict]) -> List[Dict]: """Filter AWS instance_descriptions based on PrivateIpAddress :param ip: private IP of AWS instance. :param instance_descriptions: list of AWS instance description dicts. :returns: list of instance description dicts""" instances = [instance for instance in instance_descriptions if instance['PrivateIpAddress'] == ip] return instances
ffc6f356c520f1f0f2865b359926091b6df064d5
169,131
def _get_text_alignment(point1, point2): """Get the horizontal and vertical text alignment keywords for text placed at the end of a line segment from point1 to point2 args: point1 - x,y pair point2 - x,y pair returns: ha - horizontal alignment string va - vertical alignment string""" x1, x2, y1, y2 = point1[0], point2[0], point1[1], point2[1] if(x1 < x2): ha = 'left' else: ha = 'right' if(y1 < y2): va = 'bottom' else: va = 'top' return(ha, va)
317860030bf86750207bc891c236ad2618c686b1
12,839
def get_all(client, query_params): """Requests the list of clusters.""" return client.get_clusters(**query_params)
06b24d515fd4afc26d2f7802c9e2f2dd14907a60
217,734
import copy def _filter_for_staff(staff_num, elem): """Determines whether the element belongs to the given staff. Elements normally have a <staff> child, with a 1-indexed staff number. However, elements in <attributes> may use a "number" XML attribute instead (also 1-indexed). Args: staff_num: The 0-indexed staff index. elem: The XML element (a descendant of <measure>). Returns: A copied element with staff information removed, or None if the element does not belong to the given staff. """ staff = elem.find('staff') new_elem = copy.deepcopy(elem) if staff is not None: for subelem in new_elem: if subelem.tag == 'staff': if subelem.text == str(staff_num + 1): # Got the correct "staff" tag. return new_elem else: # Incorrect "staff" value. return None # The "number" XML attribute can refer to the staff within <attributes>. if 'number' in new_elem.attrib: if new_elem.attrib['number'] == str(staff_num + 1): del new_elem.attrib['number'] return new_elem else: # Incorrect "number" attribute. return None # No staff information, element should be copied to all staves. return copy.deepcopy(elem)
4641d836dad0e10523c384b4367759914851a27d
364,012
def utf8len(strn): """Length of a string in bytes. :param strn: string :type strn: str :return: length of string in bytes :rtype: int """ return len(strn.encode("utf-8"))
d43215730e1bfbb9beb593033b6d8339b45cce2b
38,578
import math def is_pentagonal(n): """ According to Wikipedia, to test whether a positive integer x is a pentagonal number you can check that n = ((sqrt(24*x) + 1) + 1)//6 is a natural number. The number x is pentagonal if and only if n is a natural number. Source: https://stackoverflow.com/questions/ 37390233/python-is-pentagonal-number-check :param n: :return: """ digit = (math.sqrt(24 * n + 1) + 1) / 6 return digit.is_integer()
90d7974193fc3e19d87e2ae0b3f19095a11ab5a3
271,862
def get_email_key(response_id, requester=False): """ Returns a formatted key for an email. Intended for storing the body of an email. :param response_id: id of the response :param requester: will the stored content be emailed to a requester? :return: the formatted key Ex. 1_requester 1_agency """ return '_'.join((str(response_id), 'requester' if requester else 'agency'))
485fc7779c59bceb5c908c24b8859f5bb24a7265
215,418
def props_of_this_event(event, ts_tuple, run): """ Find the set of propositions that correspond to an event. Parameters ----------- event: A tuple An event is a tuple of the form (agent_no, run_pos). This argument can either be a tuple of events (simultaneous events) or a tuple of integers (single event). ts_tuple: A tuple of transition system objects In this tuple ts_tuple[i] corresponds to the transition system that models agent i. run: A list of tuples. A run on the team transition system. Length of the list is the length of the run, and the i^th tuple run[i] gives the state of the team at the i^th position of the run. Length of this tuple is equal to the number of the agents and the j^th element run[i][j] of this tuple gives the state of the j^th agent at the i^th position of the run. Returns ------- props: The set of propositions. """ # Construct a serialized list of events events_ser = [ee for ee in event] # Construct the set of propositions of the states of the agents props = set() for e in events_ser: # NOTE: run[pos][agent] = state_label state = run[e.pos][e.agent] props |= ts_tuple[e.agent].g.node[state].get('prop',set()) return props
a0abab647967e1f777aa47797af0e18f61cba6bd
651,962
def compare_equal(compare_data): # pylint: disable=redefined-outer-name """ Returns a function which checks that a given data is equal to the stored reference. """ return lambda data, tag=None: compare_data(lambda x, y: x == y, data, tag)
45215759bb4aac2a9fe304e75c40e3ff70acd787
687,725
import re def _get_join_tables_and_columns(clause): """Parses a join clause like `<t1>.<c1>=<t2>.<c2>` into tables and columns.""" pattern = re.compile(r"^(.*)\.(.*?)\s*=\s*(.*)\.(.*)$") match = pattern.match(clause) assert match, clause return match.group(1), match.group(2), match.group(3), match.group(4)
8a23bcab38f359b6fba787b1196485edf3f51ed5
219,490
from pathlib import Path def is_media_file(media_file_extensions: list, file_name: str) -> bool: """ If the file extension is in a list of media_file_extensions, then return True (this is a media file), else return False """ file_extension = Path(file_name).suffix if file_extension in media_file_extensions: return True return False
51fa8b96eddbd47418450aaf492fd63e86d693ab
417,384
import json def _read_session_cache(filepath): """Returns the JSON contents of CACHE_DIR/SESSION_FILENAME.""" try: file = open(filepath, 'r') result = json.load(file) if isinstance(result, dict): return result except: # noqa # If we cannot parse the cache for any reason, treat it as if the cache is empty pass return {}
bfe4ee84d00498958b3d052f39889f6fdd5e34fb
172,033
def nCr(n,r): """Calculate ways of selecting *r* members out of a collection of *n* members. :param n: Size of the collection :param r: Number of members to be selected from the collection :returns: n!/[r!(n-r)!] .. note:: :sup:`n` C :sub:`r` is typically read as *n combination r*. Order of members *is not* important in the combination. E.g. There are :sup:`4` C :sub:`2` = 6 ways of selecting two members out of a collection (A, B, C, D) => AB, AC, AD, BC, BD, CD. """ result = 0 if n >= 0 and r >= 0 and r <= n: num = 1 denom = 1 for i in range(r): num *= (n-i) denom *= (i+1) result = num/denom return result
a4bd8de9e4b3339ef2c23c2fd7731106d89b5fe7
646,071
def _merge_tables(d1, d2): """ Merge dictionaries Args: d1 (dict): first dict to merge d2 (dict): second dict to merge """ for key, l in d2.items(): if key in d1: for item in l: if item not in d1[key]: d1[key].append(item) else: d1[key] = l return d1
2f1fabcd9ef7ce2f8f53405e267561c88002f457
21,124
def cpu_format(cpu, ratio=1.0): """Convert number of cpu to cpu cycle. Args: cpu: Number of cpu. ratio: The percent that can be used. Returns: Formatted string of cpu cycle if cpu is valid, None otherwise. """ try: cpu = float(cpu) except: return None else: return "%dm" % int(ratio * cpu * 1000)
98d88c6670076508472f732e6bc7bcf008b76c24
376,283
def _bits_to_bytes_len(length_in_bits): """ Helper function that returns the numbers of bytes necessary to store the given number of bits. """ return (length_in_bits + 7) // 8
3421909f7018f8860ee3143dca6936e33d63af65
82,350
def segment_kmers(string: str, k: int = 1) -> list: """Segment a string in k-mers: segments of length k. Note that the final segment might be shorter than k. >>> segment_kmers('abcdefghijklmno', k=2) ['ab', 'cd', 'ef', 'gh', 'ij', 'kl', 'mn', 'o'] >>> segment_kmers('abcdefghijklmno', k=3) ['abc', 'def', 'ghi', 'jkl', 'mno'] >>> segment_kmers('abcdefghijklmno', k=4) ['abcd', 'efgh', 'ijkl', 'mno'] Parameters ---------- string : str The string to segment k : int, optional The length k of the segments, by default 1 Returns ------- list A list of segments """ segments = [] for i in range(0, len(string), k): segment = string[i:i+k] segments.append(segment) return segments
301c475cff86e59789dffbf1ab8b9737057f4df0
226,742
def pad(number: int, zeros: int)-> str: """Converts a number to a string with padding with zeros""" return "0"*(max(zeros - len(str(number)),0)) + str(number)
56503f6cc47a6205e89efbc29a4cd5652cca4cb4
562,653
import torch def to_4D_image(image: torch.Tensor) -> torch.Tensor: """Convert `image` to 4D tensors (b, c, h, w) Args: image (torch.Tensor): 2/3/4D image tensor 2D: (h, w) 3D: (c, h, w) 4D: (b, c, h, w) Returns: torch.Tensor: 4D image tensor """ ndims = len(image.shape) assert ndims in [2, 3, 4], \ f"only supports 2/3/4D images while {ndims}-D are given." if ndims == 2: return image[None, None, :, :] # b, c elif ndims == 3: return image[None, :, :, :] # b else: return image
56c7638e9dbc78205478678180beda730d5b5e36
600,687
def make_all_seqs(l): """ Makes all possible sequences, including Ns, of length l """ if l > 8: print("Warning - large barcodes detected!") print("It may be faster to use option '--dont_build_reference'!") nts = ['A', "C", "G", "T", "N"] all_seqs = nts for i in range(l - 1): new_seqs = [] for seq in all_seqs: for nt in nts: new_seqs.append(seq + nt) all_seqs = new_seqs return (all_seqs)
875152c28bddee735ca1157de15746008fbfd53d
61,085
def draw_specific_samples(param, parameters, samples): """Return samples for a given parameter param: str parameter that you wish to get samples for parameters: nd list list of all parameters stored in the result file samples: nd list list of samples for each parameter """ ind = [i.index(param) for i in parameters] return [[k[ind] for k in l] for l in samples]
24922620a6ca276321a19416b90e0fb880879e02
590,476
def reorder_columns(df, order_dict=None): """Orders columns according to ``order_dict``. Can be used to order columns according to hierarchical constraints. Consider the tree where a parent is the sum of its children. Let a node's label be its BFS traversal order, with the root as 0. Use ``order_dict`` to map column names to these node labels, to get the dataframe in BFS traversal order, matching the structure of the tree. Parameters ---------- df : `pandas.DataFrame` Input data frame. order_dict : `dict` [`str`, `float`] or None How to order the columns. The key is the column name, the value is its position. Columns are returned in ascending order by value from left to right. Only column specified by ``order_dict`` are included in the output. If None, returns the original ``df``. Returns ------- reordered_df : `pandas.DataFrame` ``df`` with the selected columns reordered. """ if order_dict is not None: order_tuples = list(order_dict.items()) order_tuples = sorted(order_tuples, key=lambda x: x[1]) order_names = [x[0] for x in order_tuples] df = df[order_names] return df
06ab6a7aa91b4985df83fce4626f6a6d9b1932e0
557,351
def is_valid(glyph_str): """ Validates if glyph_str is alphanumeric and contains unique chars Parameters ---------- glyph_str : string glyph alphabet to be used for number encoding Returns ------- True when: glyph string is alphanumeric Each char occurs only once """ uniq = True if len({x for x in glyph_str}) != len(glyph_str): uniq = False return uniq and glyph_str.isalnum()
2d1790224313d164e8271b46959c08cfe5fe197f
85,698
from typing import List def url_create_blacklist(url_information: dict) -> List[dict]: """ Create blacklist for url command. Args: url_information(dict). Returns: Blacklist(list). """ blacklist_information = [] blacklists = url_information.get('blacklists', {}) for bl_name, bl_status in blacklists.items(): blacklist_information.append({'Name': bl_name, 'Status': bl_status}) return blacklist_information
71a2822b8e951056258c802c7ec711f1c4c71248
207,845
def get_filters(content): """ Get filter names from the token's content. WARNING: Multiple filters can be used simultaneously, e.g.: {{ some_list|safeseq|join:", " }} :content: String; the token's content :returns: a list of filter names """ filters = [] split_content = content.split('|') for item in split_content[1:]: if ':' in item: item = item[:item.index(':')] filters.append(item) return filters
c85be646425d6bee9b97b447cf9185d60a8cf24f
323,943
def _get_ftype_from_filename(fn, ext_unit_dict=None): """ Returns the boundary flowtype and filetype for a given ModflowFlwob package filename. Parameters ---------- fn : str The filename to be parsed. ext_unit_dict : dictionary, optional If the arrays in the file are specified using EXTERNAL, or older style array control records, then `f` should be a file handle. In this case ext_unit_dict is required, which can be constructed using the function :class:`flopy.utils.mfreadnam.parsenamefile`. Returns ------- flowtype : str Corresponds to the type of the head-dependent boundary package for which observations are desired (e.g. "CHD", "GHB", "DRN", or "RIV"). ftype : str Corresponds to the observation file type (e.g. "CHOB", "GBOB", "DROB", or "RVOB"). """ ftype = None # determine filetype from filename using ext_unit_dict if ext_unit_dict is not None: for key, value in ext_unit_dict.items(): if value.filename == fn: ftype = value.filetype break # else, try to infer filetype from filename extension else: ext = fn.split(".")[-1].lower() if "ch" in ext.lower(): ftype = "CHOB" elif "gb" in ext.lower(): ftype = "GBOB" elif "dr" in ext.lower(): ftype = "DROB" elif "rv" in ext.lower(): ftype = "RVOB" msg = f"ModflowFlwob: filetype cannot be inferred from file name {fn}" if ftype is None: raise AssertionError(msg) flowtype_dict = { "CHOB": "CHD", "GOBO": "GHB", "DROB": "DRN", "RVOB": "RIV", } flowtype = flowtype_dict[ftype] return flowtype, ftype
6b76b4797312f045c8bd6673d3aa74128cafbde9
625,031
def drop_postfix(s, pattern): """ Remove postfix pattern of a string. :param s: string :param pattern: string pattern """ if s.endswith(pattern): return s[:-len(pattern)] return s
fae11e47e329304a5eda2c70b98948e9d3097e87
454,160
import torch def _cross_entropy(y_hat, y_true, class_weights=None, epsilon=1e-7): """Semi-supervised cross entropy loss function. Args: y_hat: prediction tensor with size (N, C), where C is the number of classes y_true: label tensor with size (N, C). A sample won't be counted into loss if its label is all zeros. class_weights: class weights tensor with size (C,) epsilon: numerical stability term Returns: cross_entropy: cross entropy loss computed only on samples with labels """ device = y_hat.device # clamp all elements to prevent numerical overflow/underflow y_hat = torch.clamp(y_hat, min=epsilon, max=(1 - epsilon)) # number of samples with labels labeled_samples = torch.sum(y_true.sum(dim=1) > 0).float() if labeled_samples.item() == 0: return torch.tensor(0.).to(device) ce = -y_true * torch.log(y_hat) if class_weights is not None: ce = ce * class_weights.unsqueeze(0).float() return torch.sum(ce) / labeled_samples
65744075c2d4db34791bbbd50b53badeacb3d342
595,586
import click def output_error(text): """Click echo error.""" return click.style("ERROR: {}".format(text), fg="bright_red", bold=True)
4a3a7f2d94c18e4aa38d330690aadace2bffe216
585,074
def deepget(data: dict, keys: list[str]): """ deepget nested keys from a dictionary. Raises a KeyError on the first missing sub-keys """ k = keys.pop(0) if keys: return deepget(data[k], keys) else: return data[k]
8809ce784eefd0d580bbdb0c7e008a45b216cdea
112,629
def gvcf_output_enabled(options): """Returns True if we should be generating gVCF output.""" return bool(options.gvcf_filename)
b5e34c58182a0225bdfe964985d11536597f92cf
163,147
from pathlib import Path def get_mount_directory_for_volume_name(volume_name: str) -> Path: """ Get directory under which the volume will be mounted """ return Path('/Volumes') / volume_name
06bd620ca030be4a91e23add2fd2f18e33e3cb42
418,732
import math def yrlygrowth(total_growth, years): """ Determine the annual growth from the growth over an arbitrary time span. """ return math.exp(math.log(total_growth) / years)
5763cb27a9289395e3bc3885d68e76ef855fe887
67,751
def render_published_by_badge(request, surface): """Returns a HMTL snippet with a badge about who published a given surface. """ return { 'surface': surface, 'is_publisher': request.user == surface.publication.publisher }
1713e0d2b0246353414c8512a6d4a2e2ce5aff0a
558,581
def _propertyContainerElementTypeName(container, name): """Return name of the type of a particular element Parameters ---------- container : `lsst.daf.base.PropertySet` or `lsst.daf.base.PropertyList` Container including the element name : `str` Name of element """ try: t = container.typeOf(name) except RuntimeError as e: # KeyError is more commonly expected when asking for an element # from a mapping. raise KeyError(str(e)) for checkType in ("Bool", "Short", "Int", "Long", "LongLong", "UnsignedLongLong", "Float", "Double", "String", "DateTime", "PropertySet", "Undef"): if t == getattr(container, "TYPE_" + checkType): return checkType return None
8363da7777b9dc86a8060fed617cad0677d2a81b
518,654
import hashlib def calc_hash(value: str) -> str: """ calculate the hash to be used for `after_catalog_created` and `before_pipeline_run` :param value: string value to calculate the hash for :return: hash string """ return hashlib.sha1(value.encode("UTF-8")).hexdigest()[:8]
b4dbe9e65107d3adb34e7e3fbbae0f731ac3ff50
400,163
def read_attribute_key_value(tag, content, date_parser, values_dict, set_attributes_to_read): """ Reads an attribute from the line of the log Parameters -------------- tag Tag content Full content of the line date_parser Date parser values_dict Dictionary of keys/values already met during the parsing set_attributes_to_read Names of the attributes that should be parsed. If None, then, all the attributes are parsed. Returns -------------- key Key of the attribute value Value of the attribute """ key = content[1] value = None if set_attributes_to_read is None or key in set_attributes_to_read: if tag.startswith("string"): value = content[3] elif tag.startswith("date"): value = date_parser.apply(content[3]) elif tag.startswith("int"): value = int(content[3]) elif tag.startswith("float"): value = float(content[3]) else: value = content[3] # limits the number of different instantiations of the same key if key in values_dict: key = values_dict[key] else: values_dict[key] = key # limits the number of different instantations of the same value if value in values_dict: value = values_dict[value] else: values_dict[value] = value return key, value
d3fba7b13474367e4fc9ab4af3611e5068ad70a8
522,766
import struct def _StToNum(S): """ Convert S to a number. :param S: The (big-endian) bytestring to convert to an integer :type S: bytes :return: An integer representation of the bytestring (rightmost chr == LSB) :rtype: int """ return struct.unpack('>L', S)[0]
34fa90e7ef9cef9c46d9a423f85e517f5436fb58
685,144
def first_child_node(node): """Returns the first child node of the given node.""" children = list(node.get_children()) assert children, 'there are no children in node {}'.format(node) return children[0]
8457fe4d7014b1cb50fac8e929e143de0f72424e
446,152
import re def removeEmptyLine(code): """ Removes empty whitelines in code string""" return re.sub(r'^$\n', '', code, flags=re.MULTILINE)
48f1541832933b1ad4ee840627bc3439fae9e365
198,891
import torch def rotmat(a,b): """ Adapted from http://www.netlib.org/templates/matlab/rotmat.m Compute the Givens rotation matrix parameters for a and b. """ c = torch.zeros_like(a) s = torch.zeros_like(a) temp = torch.zeros_like(a) mask = (b.abs()>a.abs()) temp[mask] = a[mask] / b[mask] s[mask] = 1.0 / torch.sqrt(1.0+temp[mask]**2) c[mask] = temp[mask] * s[mask] mask = (b.abs()<=a.abs()) temp[mask] = b[mask] / a[mask] c[mask] = 1.0 / torch.sqrt(1.0+temp[mask]**2) s[mask] = temp[mask] * c[mask] mask = (b==0) c[mask] = 1.0 s[mask] = 0.0 # if b==0.0: # c = 1.0 # s = 0.0 # elif b.abs()>a.abs(): # temp = a / b # s = 1.0 / torch.sqrt(1.0+temp**2) # c = temp * s # else: # temp = b / a # c = 1.0 / torch.sqrt(1.0+temp**2) # s = temp * c return c, s
9170c832ccf4e0203b22938e39a4eca67c3f3a4f
586,445
def has_two_primary(primaries, chain): """ Determine if there are two, or more, primary currencies in the chain """ return len([entry for entry in chain if entry in primaries]) > 1
7141c9c4e1a40b8bf52d17a189262c943b3818c0
142,508
import requests def get_latest_version_number() -> str: """ Gets the latest pip version number from the pypi server. Returns: (str) the version of the latest pip module """ req = requests.get("https://pypi.org/pypi/monolithcaching/json") return req.json()["info"]["version"]
12c291049ec873c4d68cc36b9356d481d0ceb090
673,412
import json def format_json(batched): """Format raw data into json format""" aggregated_events = [] for event in batched: aggregated_events.append(json.dumps(event)) aggregated_events.append("\n") return aggregated_events
9d335a01540ee7b95dfd094bbd5433543b4ef599
112,092
def _indent_of(s): """Return the prefix of s that is whitespace.""" return s[:len(s) - len(s.lstrip(" \t"))]
a91ec607fd9706128d1db682816b157b22a938b1
573,313
def check_not_equal_within_3_sigma(generated_value, expected_value, sigma, **kwargs): """Check that generated_value is not equal to expected_value, but within 3 sigma range.""" if generated_value == expected_value: return False return abs(generated_value - expected_value) <= 3 * sigma
1f8eff94b6ad9e3e89b787321c9adc681e773c9c
337,955
def _get_first_endpoint(endpoints, region): """Find the first suitable endpoint in endpoints. If there is only one endpoint, return it. If there is more than one endpoint, return the first one with the given region. If there are no endpoints, or there is more than one endpoint but none of them match the given region, raise KeyError. """ if len(endpoints) == 1: return endpoints[0] else: for candidate_endpoint in endpoints: if candidate_endpoint["region"] == region: return candidate_endpoint raise LookupError("No suitable endpoint found")
f7bf17020663b333319781d6e9386dce129f994e
396,756
def distance(f1, f2): """\ Distance between 2 features. The integer result is always positive or zero. If the features overlap or touch, it is zero. >>> from intersecter import Feature, distance >>> distance(Feature(1, 2), Feature(12, 13)) 10 >>> distance(Feature(1, 2), Feature(2, 3)) 0 >>> distance(Feature(1, 100), Feature(20, 30)) 0 """ if f1.end < f2.start: return f2.start - f1.end if f2.end < f1.start: return f1.start - f2.end return 0
c6eb726a21748cadde6d0a26feed1ae716ea318d
310,994
def get_remote_addr(req, resp, resource, params) -> str: """ Returns the remote address of the request to be used as a key for the limit See https://falcon.readthedocs.io/en/stable/api/request_and_response.html#falcon.Request.remote_addr Do NOT use it when you have reverse proxies in front of your application, as this only shows the last IP, alias the IP of the reverse proxy that sent the request to your application. In such case you should pick up the IP from the `req.access_route` list. """ return req.remote_addr
81b7e71bb182ccfa53e793da77e6f86436df357e
160,715
from pathlib import Path def file_exists(filename: str, throw_error: bool = False) -> bool: """ Check if the passed filename is an actual file Args: filename (str): The filename to check throw_error (bool): If True, will throw an error if the file doesn't exist. Defaults to False. Returns: bool: True if the file exists, false otherwise Throws: FileNotFoundError: If filename is not a file and throw_error is set to true """ is_file = Path(filename).is_file() if throw_error and not is_file: raise FileNotFoundError(f"{filename} is not a file.") return is_file
9df829d9af8cb1f4e37ba4ac1d7a28b0fad74ff8
233,453
def prefixes(s: str): """ Returns the prefixes of the string given in parameter """ return (s[:i] for i in range(len(s) + 1))
9408a961dd2784dbb9f5c404ec0c6a35a9a25304
625,319
def is_mono(t): """ Returns True if all values of _t_ are equal and False otherwise. """ for i in t: if i != t[0]: return False return True
4b7322f64cb3e421e22319756f2ccf981ceb22ec
612,266
def flash_class(category): """Convert flash message category to CSS class for Twitter Bootstrap alert :param category: Category of flash message :type category: str :return: CSS class for category :rtype: str """ if category == 'error': return 'danger' return category
ea03fe4ba14a32a32498b96016bf9a0f479c9662
604,240
def boundary_is_solid(grid): """ Helper method to test of the maze is sane. Algorithms should generate a maze with a solid boundary of walls. Args: grid (np.array): maze array Returns: boolean: is the maze boundary solid? """ # first row for c in grid[0]: if c == 0: return False # other rows for row in grid[1: -1]: if row[0] == 0 or row[-1] == 0: return False # last row for c in grid[grid.shape[0] - 1]: if c == 0: return False return True
b47f7cb607237811f1cc2aa253968f15c5fc7132
301,398
def normalized(P): """Return an stochastic matrix (all rows sum to 1), proportional to P""" return (P.T/P.sum(axis=1)).T
db812e3bfffa1634c0ffa2a3ea1b919d76928965
132,262
def rel_index(elem, lst, default=None): """`lst.index(elem) / len(lst)` with fallback.""" try: return lst.index(elem) / len(lst) except ValueError: if default == None: raise return default
27c86151641b16896078427e731adb12d6722da0
437,401
def get_display_text_for_event(event): """ Takes an event from a collection exercise and returns a version of it that can be displayed to the user. If the string isn't found in the map, it just returns the string the function was given. :param event: A name of an event from a collection exercise :type event: str :return: A version of that event that can be displayed to the user """ mapping = { "mps": "MPS (Main print selection)", "go_live": "Go live", "return_by": "Return by", "reminder": "First reminder", "reminder2": "Second reminder", "reminder3": "Third reminder", "nudge_email_0": "First nudge email", "nudge_email_1": "Second nudge email", "nudge_email_2": "Third nudge email", "nudge_email_3": "Fourth nudge email", "nudge_email_4": "Fifth nudge email", "exercise_end": "Exercise end", } return mapping.get(event, event)
9634391bda3de743a76b221b42aa7ad529df04d9
588,616
def query_create_index(table, index, col_name): """ Generate query to create index with name 'index' on column 'col_name' in table 'table' """ return 'CREATE INDEX ' + index + ' ON ' + table + '(' + col_name + ')'
eed317ef29b744c990e4de38a16f61a9976b9dfe
140,565
from typing import Tuple def get_wires_of_gate(gate: Tuple): """Get index bit that gate act on Args: gate (qiskit.QuantumGate): Quantum gate Returns: numpy arrray: list of index bits """ list_wire = [] for register in gate[1]: list_wire.append(register.index) return list_wire
e98fffc2a0b77e4d396c5d4cebe6484c033205b0
601,086
def average(series): """ implements the average of a pandas series from scratch suggested functions: len(list) sum(list) you should get the same result as calling .mean() on your series https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.mean.html See numpy documenation for implementation details: https://docs.scipy.org/doc/numpy/reference/generated/numpy.mean.html """ return sum(series)/len(series) pass
1b18c722d277c9edda8af386e59f56c17cff9019
115,247
def _running_locally(coreapi_url, jobs_api_url): """Check if tests are running locally.""" return not (coreapi_url and jobs_api_url)
5d8c1a53658f1beb544063be677ce324ce9f3bd3
160,327
def tAdjust(sTime): """Takes a string representing time. If it has only one/two digit adds ':00' otherwise it only replaces the '.' separator between hours and minutes with ':'. Returns the modified string.""" if len(sTime) <= 2: return sTime + ":00" return sTime.replace('.', ':')
0396676786410e1db374c255ef0cf6820bdd609b
208,265
def Tn(n): """ Triangular numbers.""" return n * (n + 1) // 2
62b683668949359e892ebe89989a1d02d9124b05
301,060
import sqlite3 async def db_check(bot, msg, cursor, table: str): """ This function is coroutine. Checks if table exists. :param bot: Bot instance :param msg: Message :param cursor: Database cursor :param table: Table name :return: Bool """ try: cursor.execute('SELECT 1 FROM {}'.format(table)) return True except sqlite3.Error: await bot.send_message(msg.channel, "Table {} is not initialized.\n\n" "Hint: Use `Kurisu, db init` to perform database initialization.".format(table)) cursor.close() return False
6f032f2ac0f2d9f1d0e49890cd8db51e24c85c72
641,259
def mapToRange(val, src, dst): """ Map the given value from the range of src to the range of dst. """ return ((val - src[0]) / (src[1] - src[0])) * (dst[1] - dst[0]) + dst[0]
769dd6f6c52b0c8cdc2b0358457820c0ed15a1f8
697,165
def det(tensor): """Detach a torch Tensor to a cpu numpy version Args: tensor (torch.Tensor): A tensor to be detached and turned into an ndarray Returns: (ndarray): The same data as an ndarray """ return tensor.cpu().detach().numpy()
d37c9ede3fd7e40fa56bb6ddbf42d83b94c1787b
618,315
def afunc(array, power): """ return the powers of a list of numbers """ return [pow(number, power) for number in array]
6177c112cfa3148b2ca6044e909bc2246316bcec
335,153
def good_astrom_func(table): """Require the star to have good astrometry.""" return table['ruwe'] < 1.4
580dbf5e1095384277bb561a8e54fe3429ad9810
37,086
from typing import Dict from typing import Any def get_value_len(dct: Dict[Any, Any]) -> Dict[Any, int]: """Get length of each dictionary value. Args: dct: Python `dict()` object. Returns: `dict()` object. Example: >>> status_codes = { 200: ['google.com', 'something.net'], 404: ['notfoundweb.com'], } >>> get_value_len(dct=status_codes) >>> { 200: 2, 404: 1, } """ counter = {} for key, nested_container in dct.items(): counter[key] = len(nested_container) return counter
9f8cd9e6d08bd624c1666eba691dea7fc40e6767
627,691
def get_m2m_changes(current_qs, new_qs): """ Get difference between states of a many to many relation. :arg django.db.models.QuerySet `current_qs`: objects from the current state of relation. :arg django.db.models.QuerySet `final_qs`: objects from the future state of m2m :returns: A tuple with 2 querysets for added and removed items from m2m """ add_items = new_qs.exclude( pk__in=current_qs.values_list('pk', flat=True) ) remove_items = current_qs.exclude( pk__in=new_qs.values_list('pk', flat=True) ) return list(add_items), list(remove_items)
d0e51c36b6447dde99b3c37244a7f644cd31d38d
505,059
def is_float(dtype): """Return True if datatype dtype is a float kind""" return ('float' in dtype.name) or dtype.name in ['single', 'double']
3c6301e6d89fb8d825ac4181ca02b5cf95028066
696,041
def tokenize_text(src): """ Split up source text by tokens corresponded to single characters with replacing spaces by special token <space>. :param src: source text. :return token list. """ tokens = list() for cur in src.split(): tokens += list(cur) tokens.append(u'<space>') return u' '.join(tokens[:-1])
75fcf0304acbd133b86c4de335e1b2110985d48e
399,677
def get_proportion(part, whole): """ Get proportion between part and whole, if whole is zero, returns 0 to be used as 0% """ if whole == 0: return 0 return float(part)/float(whole)
28325cfcb8fce5cbe6643bc1d8d800232626fd33
535,491
def is_polarization_problem(output_path): """ Check the presence of polarization errors. """ thefile = open(output_path) lines = thefile.read().split('\n') for line in lines: if "POLARIZATION: Iteration to find the polarization" in line: return True return False
2a8980d59dc65c3ab73c04174b881ff88fc5cf00
596,078
import inspect def isposonly(param: inspect.Parameter) -> bool: """Returns whether the given parameter is a *param parameter""" return param.kind == inspect.Parameter.VAR_POSITIONAL
1c1183eac11d0d0a1b4c67befeb76d0bad6f3540
324,505
def year_month(df, col_with_dt): """A function to extract year and month form Pandas datetime format""" year_only = df['date_time'].dt.year month_only = df['date_time'].dt.month df['year_only'] = year_only df['month_only'] = month_only return df
b54752511320d56199c7cebe2cf2c63ac972656e
176,131
def linearize_dict(kwargs, separator=".") -> dict: """linearizes `kwargs` dictionary using the provided `separator Parameters ---------- kwargs : dict dictionary of keys linearized into an flat dictionary separator: str, default='.' defines the separator to be applied on the final dictionary keys Returns ------- dict flat dictionary with keys names using a separator Examples -------- >>> from magpylib._src.defaults.defaults_utility import linearize_dict >>> from pprint import pprint >>> mydict = { ... 'line': {'width': 1, 'style': 'solid', 'color': None}, ... 'marker': {'size': 1, 'symbol': 'o', 'color': None} ... } >>> flat_dict = linearize_dict(mydict, separator='.') >>> pprint(flat_dict) {'line.color': None, 'line.style': 'solid', 'line.width': 1, 'marker.color': None, 'marker.size': 1, 'marker.symbol': 'o'} """ assert isinstance(kwargs, dict), "kwargs must be a dictionary" assert isinstance(separator, str), "separator must be a string" dict_ = {} for k, v in kwargs.items(): if isinstance(v, dict): d = linearize_dict(v, separator=separator) for key, val in d.items(): dict_[f"{k}{separator}{key}"] = val else: dict_[k] = v return dict_
0f9bb607132e486c7d855ebf1f6b336a61d32183
522,110
def get_AllAlternativeImages(self, page=True, region=True, line=True, word=True, glyph=True): """ Get all the ``pc:AlternativeImage`` in a document Arguments: page (boolean): Get images on ``pc:Page`` level region (boolean): Get images on ``pc:*Region`` level line (boolean): Get images on ``pc:TextLine`` level word (boolean): Get images on ``pc:Word`` level glyph (boolean): Get images on ``pc:Glyph`` level Returns: a list of :py:class:`AlternativeImageType` """ ret = [] if page: ret += self.get_AlternativeImage() for this_region in self.get_AllRegions(['Text']): if region: ret += this_region.get_AlternativeImage() for this_line in this_region.get_TextLine(): if line: ret += this_line.get_AlternativeImage() for this_word in this_line.get_Word(): if word: ret += this_word.get_AlternativeImage() for this_glyph in this_word.get_Glyph(): if glyph: ret += this_glyph.get_AlternativeImage() return ret
ff45f9ffb1af2e06f831e9be7721af39c5ed14ce
65,733
from typing import Iterable def is_iterable_but_not_string(obj): """ Determine if an object has iterable, list-like properties. Importantly, this functions *does not* consider a string to be list-like, even though Python strings are iterable. """ return isinstance(obj, Iterable) and not isinstance(obj, str)
e20fa2e08519f635d179aa2123e4b8672a6c0af4
308,666
import copy def get_object_state(obj, ignore_attributes=()): """Returns a dictionary with the state of the attributes of an object. Note that this is not general. For example, it will throw an error for classes that define a __slots__ field (like namedtuples). Args: obj: a Python object ignore_attributes: list of attributes to ignore when getting the object state Returns: state: a dictionary representation of the object state. """ state = {} for k, v in obj.__dict__.items(): if k not in ignore_attributes: state[k] = copy.deepcopy(v) for k in ignore_attributes: if not hasattr(obj, k): raise ValueError("Ignored attribute `%s` does not exist in object." % k) return state
42377b5ae20bd4ceaa4f58cec3681b6ddc655eb6
645,959
def block_comments_begin_with_a_space(physical_line, line_number): """There should be a space after the # of block comments. There is already a check in pep8 that enforces this rule for inline comments. Okay: # this is a comment Okay: #!/usr/bin/python Okay: # this is a comment K002: #this is a comment """ MESSAGE = "K002 block comments should start with '# '" # shebangs are OK if line_number == 1 and physical_line.startswith('#!'): return text = physical_line.strip() if text.startswith('#'): # look for block comments if len(text) > 1 and not text[1].isspace(): return physical_line.index('#'), MESSAGE
6996573e2f5988f8952e13d13fe2437ba7913f79
122,949
def intersect(lists): """ Return the intersection of all lists in "lists". """ if len(lists) == 0: return lists if len(lists) == 1: return lists[0] finalList = set(lists[0]) for aList in lists[1:]: finalList = finalList & set(aList) return list(finalList)
c5f5fe0a93b2787f68bdcdb4466f461a12a7ec53
407,546
def transpose(matrix): """return the transpose of a matrix stored as a 9-element tuple""" return (matrix[0], matrix[3], matrix[6], matrix[1], matrix[4], matrix[7], matrix[2], matrix[5], matrix[8])
25c6f6ac09ff1d0305102721963d53282e9b80f3
637,558
import math def add_aggregate_info(site_and_date_info, percentage, sorted_names): """ Function is used to add an 'aggregate metric' that summarizes all of the data quality issues for a particular site on a particular date. NOTE: This function DOES NOT take the weighted value of all of these metrics. This is merely to attach the aggregate statistic. NOTE: This is for the DICTIONARY with the date as the first set of keys. :param site_and_date_info (dict): dictionary with key:value of date:additional dictionaries that contain metrics for each HPO's data quality by type percentage (boolean): used to determine whether or not the number is a simple record count (e.g. duplicates) versus the percentage of records (e.g. the success rate for each of the tables) sorted_names (lst): list of the names that should have an aggregate statistic analyzed (e.g. avoiding 'avarage' statistics) :return: site_and_date_info (dict): same as input parameter but now each site and date has an added aggregate statistic. """ for date in site_and_date_info.keys(): date_report = site_and_date_info[date] date_metric, num_iterated = 0, 0 for site in sorted_names: table_metrics = date_report[site] date_metric, num_iterated = 0, 0 for table in table_metrics.keys(): stat = table_metrics[table] if not math.isnan(stat): date_metric += stat num_iterated += 1 # NOTE: 'AGGREGATE INFO' SHOULD NOT BE USED FOR # THE PERCENTAGE METRIC. THIS IS BECAUSE THE # FIRST 'AGGREGATE INFO' DOES NOT WEIGHT SITES # BY THEIR RELATIVE CONTRIBUTIONS (BY # OF ROWS). if percentage and num_iterated > 0: date_metric = date_metric / num_iterated elif percentage and num_iterated == 0: date_metric = float('NaN') date_report['aggregate_info'] = date_metric return site_and_date_info
71eb0b8d33bcbf9ad04f53621d7959995112bc47
47,906
def get_valid_image_ranges(experiments): """Extract valid image ranges from experiments, returning None if no scan""" valid_images_ranges = [] for exp in experiments: if exp.scan: valid_images_ranges.append(exp.scan.get_valid_image_ranges(exp.identifier)) else: valid_images_ranges.append(None) return valid_images_ranges
ddfcb091fabb0c70f7a6a3fce8b43396574a797c
695,510
import requests def get_user_key(api_key: str, username: str, password: str) -> str: """Login the user and retruns his ``user_key`` This key can be cached since, it is only invalidated by the next (api) login. Arguments: api_key {str} -- the api_key of the application. username {str} -- the username of the user that wants to login. password {str} -- the password of the user that wants to login. Returns: user_key {str} -- The ``user_key`` of the user that logged in. Raises: ValueError: Bad API request, use POST request, not GET ValueError: Bad API request, invalid api_dev_key ValueError: Bad API request, invalid login ValueError: Bad API request, account not active ValueError: Bad API request, invalid POST parameters """ r = requests.post("https://pastebin.com/api/api_login.php", data={ "api_dev_key": api_key, "api_user_name": username, "api_user_password": password }) try: r.raise_for_status() except: raise ValueError(r.text) return r.text
92dd0cedfd5364378d5381b0a64844e7f789f62d
18,164