content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
import yaml def read_yaml(yaml_file): """ Read a YAML file Parameters ---------- yaml_file : str or PathLike Path to the YAML file. Returns ------- content : dict Dictionary with the content of the YAML file. """ with open(yaml_file, "r") as f: content = yaml.load(f, Loader=yaml.FullLoader) return content
8feeb20c8128e99d855d7f42ccaf8f0b72538a0a
108,501
def sanitize(text): """Escapes characters that have a special meaning in latex.""" text = text.replace('\\', '\\textbackslash{}') for c in '$%^{}#_&': text = text.replace(c, '\\' + c) # Allow some more line breaks to happen. text = text.replace('/', '\\slash{}') text = text.replace('=', '=\\allowbreak{}') text = text.replace('-', '-\\allowbreak{}') return text
76f1ef50bac2e6da57a9226a2b594f5ce684e13f
108,502
from typing import Counter def ngram_counter(list_of_token, n=2): """ Function for converting a list of ordered tokens into n-grams. | Argument | | list_of_token: a list of ordered tokens to be processed. | | Parameter | | n: number of continuous tokens to group | | Output | | dictionary of count of ngrams (type: collections.Counter) """ if len(list_of_token) < n: raise Exception("Can't get {}-gram from input of length {}".format(n, len(list_of_token))) ngram_cnt = Counter([' '.join(list_of_token[index:index+n]) for index in range(len(list_of_token)-n+1)]) return ngram_cnt
29fccd9a2225a4e0c4899b991c8b0d1ad496c4cb
108,506
def Dmap(tree): """ Butcher's function D(t). Represents differentiation. Defined by D(t)=0 except for D('T')=1. """ return 1*(tree=='T')
9c3cfd252adbb55422dfb4e8a7623661263bb803
108,511
def findElementRecursively(obj, elem_name): """Recursively find hierarchy of element in nested object""" if not hasattr(obj, "__dict__"): # object has no named fields return None elif elem_name in obj.__dict__.keys(): # required hierarchy was reached return obj else: # hierarchy is yet to be found for key in obj.__dict__.keys(): if key.endswith("_"): continue hierarchy = findElementRecursively(obj.__getattribute__(key), elem_name) if hierarchy is not None: return hierarchy return None
76e5365da42d68fbfbb7f16b366331395f18dcef
108,514
def ratio(num, den): """ Returns the ratio of integers `num` and `den`. """ try: return float(num) / float(den) except ZeroDivisionError: return 0.0
27207dac6592c733898971c9ed59c64a4244c6b5
108,520
import yaml def save_yaml(val): """ Save data to yaml string :param val: Value or struct to save :type val: None | int | float | str | unicode | list | dict :return: The yamlified string :rtype: str | unicode """ return yaml.dump(val)
3a432952b9be1610a2baf2c21187ece5bbb7d33a
108,526
import math def getHeading(lat_diff, long_diff): """Return directional heading (0=North) given lat and long diffs Args: lat_diff: (float) difference in latitude long_diff: (float) difference in longitude Returns: The heading value """ angleEast = int(math.atan2(lat_diff, long_diff)*180/math.pi) heading = 90 - angleEast if heading < 0: heading += 360 return heading
1f0207c8172c6fd06a92f0460a87ec664b76b7bc
108,535
def linear_search(numbers: list, item: int) -> int: """ Algorithm that implement linear search Parameters ---------- numbers : list The numbers list item : int The element to search Returns ------- index int The index of element. Return -1 if element not exists in list """ for (index, number) in enumerate(numbers): if number == item: return index return -1
9c79b0debc4a5355eac0b7122a438587bbf5f4f0
108,537
def get_all(qradarAppliance, check_mode=False, force=False): """ Retrieves a list of all server hosts in the deployment """ return qradarAppliance.invoke_get("Get server hosts", "/system/servers")
7df3f1fc2d6f955b2b94b7c0be82e6f9f631b597
108,538
def _compute_ngrams_py(word, min_n, max_n): """Get the list of all possible ngrams for a given word. Parameters ---------- word : str The word whose ngrams need to be computed. min_n : int Minimum character length of the ngrams. max_n : int Maximum character length of the ngrams. Returns ------- list of str Sequence of character ngrams. """ BOW, EOW = ('<', '>') # Used by FastText to attach to all words as prefix and suffix extended_word = BOW + word + EOW ngrams = [] for ngram_length in range(min_n, min(len(extended_word), max_n) + 1): for i in range(0, len(extended_word) - ngram_length + 1): ngrams.append(extended_word[i:i + ngram_length]) return ngrams
8577165294827920709e82cab43f8745f89a2901
108,543
import textwrap import pprint def pprint_msg(dic, prefix=' '): """ Give logger.info a string for neatly printing a dictionary. Usage: logger.info(pprint_msg(arbitrary_object)) """ return "\n" + textwrap.indent(pprint.pformat(dic), prefix=prefix)
2896a7389c27dc3a6dcae0bcd56bbaf74493b51f
108,548
def to_pair(seq): """ Convert a FastaEntry object to a simple header/sequence pair """ return (seq.header, seq.seq)
794ae198b48fe8001731e48a5267858f7b4cbfa0
108,549
def complete_sentence(learn, start_phrase, n_words=30, n_samples=3, temp=.75): """Generate text from a given input. This is a decent way to get a glimpse of how a language model is doing. Parameters ---------- learn: fastai Learner start_phrase: str The prompt (start of a sentence) that you want the model to complete. n_words: int Number of words to generate after the given prompt. n_samples: int Number of sample sentences to generate. temp: float Returns ------- list[str]: Each item is one completed sentence. """ return [learn.predict(start_phrase, n_words, temperature=temp) for _ in range(n_samples)]
0eed175367915fbc123de04824ba60140ba7c983
108,550
import torch def _get_relation_types(dataset,): """ Classify relations into 1-N, M-1, 1-1, M-N Bordes, Antoine, et al. "Translating embeddings for modeling multi-relational data." Advances in neural information processing systems. 2013. :return: dictionary mapping from int -> {1-N, M-1, 1-1, M-N} """ relation_stats = torch.zeros((dataset.num_relations(), 6)) for index, p in [ (dataset.index("train_sp_to_o"), 1), (dataset.index("train_po_to_s"), 0), ]: for prefix, labels in index.items(): relation_stats[prefix[p], 0 + p * 2] = labels.float().sum() relation_stats[prefix[p], 1 + p * 2] = ( relation_stats[prefix[p], 1 + p * 2] + 1.0 ) relation_stats[:, 4] = (relation_stats[:, 0] / relation_stats[:, 1]) > 1.5 relation_stats[:, 5] = (relation_stats[:, 2] / relation_stats[:, 3]) > 1.5 result = dict() for i, relation in enumerate(dataset.relation_ids()): result[i] = "{}-{}".format( "1" if relation_stats[i, 4].item() == 0 else "M", "1" if relation_stats[i, 5].item() == 0 else "N", ) return result
45e6a3d6c0ed6740b1f735f2ab7832fdabd3094f
108,553
def getdistribfunc(distrib, funcname): """ Returns the distrib.funcname function for recognized funcnames """ if ( funcname == "cdf" ): return distrib.cdf elif ( funcname == "isf" ): return distrib.isf elif ( funcname == "pdf" ): return distrib.pdf elif ( funcname == "pmf" ): return distrib.pmf elif ( funcname == "ppf" ): return distrib.ppf elif ( funcname == "sf" ): return distrib.sf elif ( funcname == "rvs" ): return distrib.rvs else: raise ValueError("Unsupported scipy.stats function name '%s'" % funcname)
3931be2411d7a458381bb08a8f3fbc125c944975
108,558
def replace_simple_tags(string, from_tag="italic", to_tag="i", to_open_tag=None): """ Replace tags such as <italic> to <i> This does not validate markup """ if to_open_tag: string = string.replace("<" + from_tag + ">", to_open_tag) elif to_tag: string = string.replace("<" + from_tag + ">", "<" + to_tag + ">") string = string.replace("<" + from_tag + "/>", "<" + to_tag + "/>") else: string = string.replace("<" + from_tag + ">", "") string = string.replace("<" + from_tag + "/>", "") if to_tag: string = string.replace("</" + from_tag + ">", "</" + to_tag + ">") else: string = string.replace("</" + from_tag + ">", "") return string
f5607032e7d8b6efd57e3f30856579ecff1bf110
108,560
from typing import Dict from typing import Any from pathlib import Path def get_history_dir(config: Dict[str, Any]) -> Path: """ Return the directory where all the runs are stored. :param config: config file :return: path to folder """ folder = config["history_data_dir"] return folder
3f52f36323a52702f998e686c3223e795a9b22c7
108,563
def get_table_schema(conn, table): """Get column name and type of given table Args: conn: a database connection, this function will leave it open table: table name or db.table Returns: Tuple of (field_name, field_type) tuples """ return conn.get_table_schema(table)
5df898348f039ecd70e1c1cd31270c2601dded33
108,571
def _extract_region(host): """Extract region from Amazon S3 host.""" tokens = host.split(".") token = tokens[1] # If token is "dualstack", then region might be in next token. if token == "dualstack": token = tokens[2] # If token is equal to "amazonaws", region is not passed in the host. if token == "amazonaws": return None # Return token as region. return token
119fb50fa20f27c9620cb609c3fdb7206af15978
108,572
def filter_none(lst): """Removes None elements from the list.""" lst = [el for el in lst if el is not None] return lst
5e7da53c27296f81d120763853d9b42d2144a6e6
108,579
import operator def _sortPullRequests(pull_requests): """ Helper function from _formatPullRequests(). Sort a list of pull requests by pull request number and then by comment creation date. GIVEN: pull_requests (list) -- nested list of pull_requests and their comments RETURN: new_pull_requests (list) -- the given 'pull_requests' list, but sorted by pull request number and comment creation date """ new_pull_requests = sorted(pull_requests, key=operator.itemgetter(3, 9)) return new_pull_requests
0c38419bcc9c3828fcc7dda00f5808f1c200d738
108,580
def pad(base, fill, count, right = False): """Pad base string with given fill until count, on either left or right.""" while len(base) < count: if right: base += fill else: base = fill + base return base
3a2f55e10e967fdfbcfa4317fe284b504738e7be
108,583
def _named_idx(idx): """ Converts 0 to x, 1 to y, 2 to z, or raises an exception. """ if idx < 0 or idx > 2: raise ValueError('idx must be between 0 and 2, got %d' % idx) return ('x', 'y', 'z')[idx]
01db76043bed0aa8290aab31f9d011e038935bfc
108,584
def create_gcp_connector(api, configuration, api_version, api_exception, name, service_account): """ Creates a GCP connector. :param api The Deep Security API exports. :param configuration The configuration object to pass to the API client. :param api_version The API version to use. :param api_exception The Deep Security API exception module. :param name The name of the GCP connector. :param service_account The GCP service account used by the GCP connector. :return The created GCP connector object which contains the created GCP connector ID information. """ # Create a GCP connector object api_instance = api.GCPConnectorsApi(api.ApiClient(configuration)) gcp_connector = api.GCPConnector() # Set the GCP connector properties gcp_connector.name = name gcp_connector.service_account = service_account try: # Call create_gcp_connector API to create the GCP connector api_response = api_instance.create_gcp_connector(gcp_connector, api_version) return api_response except api_exception as e: print("An exception occurred when calling GCPConnectorsApi.create_google_connector: %s\n" % e)
8073be5f8f3ece27655b1e30a7602b1b4500b3f5
108,587
def addChildNode(node, name, obj=None): """ Use this to build paths to your plugin's endpoints. :param node: The parent node to add the child node to. :param name: The name of the child node in the URL path. :type name: str :param obj: The object to place at this new node, or None if this child should not be exposed as an endpoint, instead just used as an intermediary hidden node. :type obj: object or None :returns: The node that was created. """ if obj: setattr(node, name, obj) return obj else: hiddenNode = type('', (), dict(exposed=False))() setattr(node, name, hiddenNode) return hiddenNode
7ac31e965dfa4f33c40040677ace07fb9439e3d1
108,593
def create_blob_client(blob_service_client, container_name, blob_file): """ Create a blob-specific client :param blob_service_client: type: azure.storage.blob.BlobServiceClient :param container_name: type str: Name of the container of interest :param blob_file: type iterable from azure.storage.blob.BlobServiceClient.ContainerClient.list_blobs :return: blob_client: type azure.storage.blob.BlobServiceClient.BlobClient """ # Create a blob client for the current blob blob_client = blob_service_client.get_blob_client(container=container_name, blob=blob_file) return blob_client
9715a3fb9df3daaf2836122e0993f1ff88b3a893
108,596
def _pattern_common(**params): """ Do common preprocessing steps for pattern_match and pattern_count. Not really useful on its own. Args: data (list): values. params (kwargs): pattern (str or list): the pattern to be sought in data (obligatory) metric (str): 'identity' counts identical positions, 'euclid' calculates the Euclidean distance (L2 norm), 'taxi' calculates the taxicab (Manhattan) distance (L1 norm). 'sup' returns maximum distance between positions, 'inf' returns minimum distance between positions. Only 'identity' can be used with non-numerical data. radius (number): the similarity cutoff (non-negative) Returns: pattern (list): the pattern to be sought in data as a list. patlen (int): length of the pattern. metric (str): name of the metric for calculating match similarity. Raises: NameError when 'pattern' is not given, TypeError if 'pattern' is neither string nor list, ValueError if 'radius' is negative or unsupported distance method used. """ if 'pattern' in params: if isinstance(params['pattern'], list): pattern = params['pattern'] elif isinstance(params['pattern'], str): pattern = list(params['pattern']) else: raise TypeError("The pattern should be either list or a string.") else: raise NameError("No pattern provided.") patlen = len(pattern) radius = params['radius'] if 'radius' in params else 0 if radius < 0: raise ValueError("Similarity radius cannot be negative.") metric = params['metric'] if 'metric' in params else 'identity' if metric not in ['identity', 'taxi', 'euclid', 'sup', 'inf']: raise ValueError("Unsupported distance metric.") return pattern, patlen, radius, metric
dc8e7d20ca05ab7574f974354fca35215c8b1008
108,599
def __is_utf8(rule_string): """ Takes the string of the rule and parses it to check if there are only utf-8 characters present. :param rule_string: the string representation of the yara rule :return: true if there are only utf-8 characters in the string """ try: rule_string.encode('utf-8') except UnicodeEncodeError: return False else: return True
90d9842334b0e989d152577c840807380a0e4a31
108,602
def deepcopy_nested_dict(nested_dict_to_deepcopy: dict): """ Deepcopy of a nested dictionary of two levels, e.g. {k1:{...}, k2:{...}, ..., kN:{...}} :param nested_dict_to_deepcopy: The nested dictionary to return a deepcopy of :return: A deepcopy of a nested dictionary """ # Copy the upper level deepcopied_nested_dict = nested_dict_to_deepcopy.copy() # Coppy the lower level for k, d in nested_dict_to_deepcopy.items(): assert type(d) is dict deepcopied_nested_dict[k] = d.copy() return deepcopied_nested_dict
09e3c0d4eecf88613b25a7344ad3ca9f4ea8c23b
108,605
def CalcMetricMatrix(inData, metricFunc): """ generates a metric matrix **Arguments** - inData is assumed to be a list of clusters (or anything with a GetPosition() method) - metricFunc is the function to be used to generate the matrix **Returns** the metric matrix as a Numeric array """ nObjs = len(inData) res = [] inData = map(lambda x: x.GetPosition(), inData) return metricFunc(inData)
805c740ca906ec08632f2b6ebf2d232cf6988757
108,608
import re def is_email_valid(email): """ Checks if a certain email address follows the given conventional pattern of username@domain.extension :param email: given email address :return: True if the given email address is valid, False otherwise :rtype: bool """ return ( True if re.fullmatch( r"^[a-zA-Z0-9][\w\-._]+@[a-zA-Z0-9]+\.[a-zA-Z]{1,3}|\.[a-zA-Z]{1,3}\.[a-zA-Z]{1,3}$", email, ) else False )
e38abbce85768d83de1ffe0286c7d69378fb806d
108,611
def hmean(iterable): """ Returns the harmonic mean of the given list of values. """ a = iterable if isinstance(iterable, list) else list(iterable) return float(len(a)) / sum(1.0 / x for x in a)
c841028b6d616525d568a1252e82e8045c70c9ad
108,614
def get_long_description(path): """Return contents of file.""" with open(path) as fh: return fh.read()
e029281c72d46dfe1b10258d2449cade36ae205a
108,621
import collections def get_rare_char_info(char_to_lang_map, shared_lang_threshold): """Returns a tuple of: - a set of 'rare_chars' (those used threshold langs or fewer), - a mapping from each locale with rare chars to a set of its rare chars""" rare_chars = set() locs_with_rare_chars = collections.defaultdict(set) for cp in char_to_lang_map: num_shared_langs = len(char_to_lang_map[cp]) if num_shared_langs <= shared_lang_threshold: rare_chars.add(cp) for lang_tag in char_to_lang_map[cp]: locs_with_rare_chars[lang_tag].add(cp) return rare_chars, locs_with_rare_chars
72739275e074b836d0b2d9a6f6086ed18ed20cfa
108,623
def welcome_prompt(name): """Prompts the user with the game opening banner. If the player chooses to begin, the method returns True. Any other input will return False. Parameters: name -- the variable for storing the player's name in""" print("Welcome, Traveler.") name = input("What do they call you? ") print("Ah. {name}, is it? Pity you've wandered here.".format(name=name)) print("As you will discover very soon, you are a cat.") print("There are many trials before you.") answer = input("Feeling curious? [Y/N]: ").upper() while len(answer) == 0 or answer[0] not in ['Y', 'N']: print("Taking a nap on the keyboard, are you?") answer = input("Please enter either [Y]es or [N]o: ").upper() if answer[0] == 'Y': print("Good luck.") return True else: print("Ah. Maybe another day, then.") print("After all, Destiny does not beckon every kitten that meows.") print("Come back when you're older and more curious.") return False
a60107001c41969abc50c23c21a105453b1d7921
108,626
def binary_to_decimal(number): """ Converts a binary-number to decimal-number """ return int(number, 2)
76369e12481cb77224cec799dcdfa33e65226665
108,629
def comment(commentstr=''): """Insert comment.""" return '<!-- ' + commentstr.replace('--','') + ' -->'
1dab75f200b02b70238f8113287faeb85794f0a7
108,631
import torch def _add_rician_noise(dat, noise_prct=0.1): """Adds rician noise to a tensor as: dat = dat + n_real + i*n_img, where n_real, n_img ~ N(0, std**2) dat = magnitude(dat) Parameters ---------- dat : tensor Input data noise_prct : float, default=0.1 Amount of noise to add, as a percentage of input max value Returns ---------- dat : tensor Noisy data std : float Noise standard deviation """ std = noise_prct * dat.max() dat = ((dat + std*torch.randn_like(dat))**2 + (std*torch.randn_like(dat))**2).sqrt() return dat, std
e3b39ea648df28940ee0ba52cab38e40dba9cb18
108,632
import copy def _enforce_hierarchy(dupe_dict, values, hierarchy): """ Enforce a general hierarchy of which structures to keep, based on the list of values and their importance. Parameters: dupe_dict (dict): the dictionary keyed by the index of unique structures that holds lists of duplicates for that structure. values (list): the list of values for each structure on which to enforce the hierarchy. hierarchy (list): the order in which to consider the values, e.g. `['ICSD', 'OQMD']` will promote ICSD structures over OQMD. Returns: dict: the reshuffled dictionary of duplicates. """ max_val = max(list(dupe_dict.keys()) + [val for t in dupe_dict.values() for val in t]) if len(values) - 1 != max_val: raise RuntimeError("Number of hierarchy values does not much number of items: {} vs {}" .format(len(values)-1, max_val)) new_dupe_dict = copy.deepcopy(dupe_dict) swapped = [] for i in new_dupe_dict: if not list(new_dupe_dict[i]): continue for value in hierarchy: found = False for k in [i] + list(new_dupe_dict[i]): if values[k] == value: swapped.append((i, k)) found = True break if found: break for i, k in swapped: if i != k: if k in new_dupe_dict: new_dupe_dict[k].update([ind for ind in new_dupe_dict[i] if ind != k] + [i]) else: new_dupe_dict[k] = set([ind for ind in new_dupe_dict[i] if ind != k] + [i]) del new_dupe_dict[i] return new_dupe_dict
913a42743992be979c8c70154ab880b5d7c1e6cf
108,633
import string def to_constant(s: str) -> str: """Returns a new str in CONSTANT_CASE, given any str. Examples: >>> to_constant('to_constant') 'TO_CONSTANT' >>> to_constant('Meals, Entrees, and Side Dishes') 'MEALS_ENTREES_AND_SIDE_DISHES' >>> to_constant('American Indian/Alaska Native Foods') 'AMERICAN_INDIANALASKA_NATIVE_FOODS' """ s = s.replace(' ', '_') s = ''.join(c.upper() for c in s if c in string.ascii_letters or c in '_') assert s.isidentifier(), s return s
664c297c10c9b28be2530facba26ec27bafcafa1
108,634
def list_strings(string1='', string2='', string3='', string4=''): """ Put strings in a list. Parameters ---------- string1 : string string2 : string string3 : string string4 : string Returns ------- string_list : list of strings Examples -------- >>> from mindboggle.guts.utilities import list_strings >>> string1 = 'a b c' >>> string2 = 'd e f' >>> string3 = '' >>> string4 = 'j k l' >>> string_list = list_strings(string1, string2, string3, string4) >>> string_list ['a b c', 'd e f', 'j k l'] """ string_list = [] if string1 and isinstance(string1, str): string_list.append(string1) if string2 and isinstance(string1, str): string_list.append(string2) if string3 and isinstance(string1, str): string_list.append(string3) if string4 and isinstance(string1, str): string_list.append(string4) return string_list
13ea667b0c93e3c12981551553fc472d9b93d9a2
108,638
def column_parser(text_column): """ Returns a parser which parses a row of a csv file containing labeled data, extracting the label and the text This parser assumes the label is the zeroth element of the row, and the text is the 'text_column' element """ def f(row): return int(row[0]), row[text_column] return f
e6b1afcf0e01be7f2671c196277b5d2083ac2623
108,639
def in_commands(substr, commands): """ Test that a string is in the command list """ return any(substr in cmd[1] for cmd in commands)
1ebece7b16a60c33975024f2bed10bb641bb02ad
108,641
def compare(name, first, second, bfr): """ Ensure correct open is paired with correct close """ o = bfr[first.begin:first.end] c = bfr[second.begin:second.end] match = False if o == "if" and c == "fi": match = True elif o in ["select", "for", "while", "until"] and c == "done": match = True elif o == "case" and c == "esac": match = True return match
c93659b3fa43caa0bb70db3a1ab763b0aaa98c25
108,642
def make_mongo_url(user, pwd, url, db): """ makes the mongo url string :param user: user :param pwd: password :param url: url :param db: db :return: mongo url string """ return "mongodb://" + user + ":" + pwd + "@" + url + "/" + db
b2695edf083fae408c8d7151d3881626fb45efae
108,646
def get_commit_statuses(api, urn, ref): """ Returns combined commit statuses It uses aggregated status endpoint: https://developer.github.com/v3/repos/statuses/#get-the-combined-status-for-a-specific-ref ref can be an sha, tag or a branch name (e.g. "master") """ path = "/repos/{urn}/commits/{ref}/status".format(urn=urn, ref=ref) response = api("get", path) return response.get("statuses", [])
23b276b9beee6e11d551016032485fa8e230b322
108,647
def get_resolwe(*resources): """Return resolwe object used in given resources. Raise an error if there is more than one. """ resolwes = {res_obj.resolwe for res_obj in resources} if len(resolwes) != 1: raise TypeError('All input objects must be from the same `Resolwe` connection.') return list(resolwes)[0]
7cb271b94317f07b87478351070042cae75548c3
108,649
def rgb24_to_rgb16(red, green, blue): """ Convert 24-bit RGB color components to a 16-bit RGB color. :param red: The RED component in the RGB color. :type red: int :param green: The GREEN component in the RGB color. :type green: int :param blue: The BLUE component in the RGB color. :type blue: int :return: An 16-bit RGB color. :rtype: int """ #return ((red >> 3) << 11) | ((green >> 2) << 5) | (blue >> 3) return ((round((0x1F * (red + 4)) / 0xFF) << 11) | (round((0x3F * (green + 2)) / 0xFF) << 5) | round((0x1F * (blue + 4)) / 0xFF))
a86e94d43087bdb32c0aac6c81fe6bb241c8147c
108,651
async def resolve_delete_user(_root, info, id): """Resolver function for deleting a user object""" user = await info.context["registry"].get(id) await info.context["registry"].delete(user.id) return True
ca1a7ffc0c3684f8e21301073fc512a630a1e0f3
108,653
import string def _consume_whitespace(line, start=0): """return index of next non whitespace character returns length of string if it can't find anything """ for i, c in enumerate(line[start:]): if c not in string.whitespace: return i+start return len(line)
78646408c497a687e5122f9daffc42a920b29d61
108,654
import pickle def load_worker(worker_file): """Load worker from file.""" with open(worker_file, 'rb') as f: worker = pickle.load(f) return worker
1e6c306eeeaa459450ecb3b282c3540e6ca5a365
108,659
def is_even(num): """ Check for number is even. """ return num % 2 == 0
5e4d081640562c68740e294d693f2c84417d7894
108,660
def get_strictness_label(strictness): """Get the alert box/label coloring based on strictness.""" levels = dict( low='info', medium='warning', high='danger', veryhigh='danger', ) if strictness not in levels: return 'default' return levels[strictness]
7cb5e8ab37ce54bd4f94a8d3a844aa0f5be61435
108,663
def reset_params_skorch(regressor): """ Simple helper function that manually resets the parameters in each layer of a skorch regressor model. Parameters ---------- regressor : skorch.NeuralNetRegressor The neural net regressor (wrapped PyTorch model) that parameters must be reset (in place) for. returns : None """ regressor.get_params()["module"].zero_grad() for layer in regressor.get_params()["module"].children(): if hasattr(layer, 'reset_parameters'): layer.reset_parameters() return None
fc163654d35c18c9b334a94c27dbb0d9ace4a615
108,665
def Ttr(x): """Equation for the triple point of ammonia-water mixture Parameters ---------- x : float Mole fraction of ammonia in mixture [mol/mol] Returns ------- Ttr : float Triple point temperature [K] Raises ------ NotImplementedError : If input isn't in limit * 0 ≤ x ≤ 1 References ---------- IAPWS, Guideline on the IAPWS Formulation 2001 for the Thermodynamic Properties of Ammonia-Water Mixtures, http://www.iapws.org/relguide/nh3h2o.pdf, Eq 9 """ if 0 <= x <= 0.33367: Ttr = 273.16*(1-0.3439823*x-1.3274271*x**2-274.973*x**3) elif 0.33367 < x <= 0.58396: Ttr = 193.549*(1-4.987368*(x-0.5)**2) elif 0.58396 < x <= 0.81473: Ttr = 194.38*(1-4.886151*(x-2/3)**2+10.37298*(x-2/3)**3) elif 0.81473 < x <= 1: Ttr = 195.495*(1-0.323998*(1-x)-15.87560*(1-x)**4) else: raise NotImplementedError("Incoming out of bound") return Ttr
e29839ef15a012fae150e0bfb6b877af99875e49
108,668
def concatenate_list_data(char_list): """ DESCRIPTION: List concatenation of characters to produce words. INPUT: Translated character list OUTPUT: A single element that represents a word """ result = '' for element in char_list: result += str(element) return result
7811a3c9c36a988f81291b78ce567864c243314f
108,671
def options( request ): """Return the command-line options.""" return request.config.option
3471e19301ad586923ec96c4cd8da07d9a4a538c
108,675
from typing import Generator import re def split_lines(string: str) -> Generator[str, None, None]: """ Splits string into lines, skipping empty; surrounding spaces are removed. """ return ( x.group(0).strip() for x in re.finditer(r".*(?:$|\n)", string) if len(x.group(0).strip()) > 0 )
521882cb40826fe9eb6d84349aaba1decbbfa52b
108,676
def flatten_list(lst): """ Flattens a list of lists :param lst: list """ flatten = [] for item in lst: if isinstance(item, list): flatten.extend(item) else: flatten.append(item) return flatten
9287c4701f789cc772763ea684549e82952113c9
108,678
def file_to_class_name(f_name: str) -> str: """ Take the file name of a scan and return the name of it as a class: snake to camel case """ return "".join(word.title() for word in f_name.split('_'))
40684691577b147059b6f3bfa65f2b90d069912f
108,681
def get_coords(object_, points): """Return coordinates for an object which is somewhere in a list of points.""" return next(coords for coords, maybe_this_object in points if maybe_this_object == object_)
052de5611d085603f3a18efaa4a4201d2d1cf0b8
108,682
def inter_over_union(interval_1, interval_2): """Intersection over union for two intervals.""" a, b = interval_1 c, d = interval_2 intersection = max(0, min(b, d) - max(a, c)) if intersection > 0: union = max(b, d) - min(a, c) else: union = (b - a) + (d - c) return intersection / union
e7d2c724cef4317a73a6562d8187c06ed6ffd5a7
108,684
import colorsys def rgb_to_hs(rgbstr): """ Convert RGB color to (hue, saturation) """ r, g, b = bytes.fromhex(rgbstr[1:]) h, s, v = colorsys.rgb_to_hsv(r, g, b) s2 = 0.5+(255-v)/512 return {'hue':h, 'saturation':s2}
9597056e1c30198fe73f57a000da82c91aec3d3c
108,685
def requires(filename): """Returns a list of all pip requirements :param filename: the Pip requirement file (usually 'requirements.txt') :return: list of modules :rtype: list """ modules = [] with open(filename, 'r') as pipreq: for line in pipreq: line = line.strip() if line.startswith('#') or not line: continue modules.append(line) return modules
02ab292c4b475b8c41256150676c7d459416c8ac
108,686
def get_wdl_boolean_string(boolean): """ WDL expects `true` or `false` strings for `read_boolean`, Python `str` doesn't work """ return str(boolean).lower()
0482aa1f3d4234859fa03cbe053eb97dc5c09479
108,694
def Lipinski(calc, exp, low, high): """ Input: listLike calculated and experimental data you want to compare float low and high limits for the test returns: number of correctly predicted values (in or outside range) number of false positives number of false negatives """ correct = 0 falsePos = 0 falseNeg = 0 for c,e in zip(calc, exp): # If the calculated value is in range if low <= c <= high: # If the experimental is in range then add to correct if low <= e <= high: correct += 1 else: # Otherwise it is a false positive falsePos += 1 else: # c not in range # If experimental is in range then its a false negative if low <= e <= high: falseNeg += 1 else: # Otherwise they're both out so it's correct correct += 1 # Return number correctly predicted, number of false positives, number of false negatives return correct, falsePos, falseNeg
396dede616355999aa7535f3085cc6330ea554cb
108,699
def S(Lt, l): """ Calculates the S coefficient Parameters ---------- Lt : float The length way of liquid, [m] l : float The length way of liquid of one slot mix, [m] Returns ------- Lt : float The S coefficient, [dismensionless] References ---------- &&&& """ return Lt / l
0df571c0b597dd05c3e516919dd11a27f0451d3c
108,701
def _flatten_serializer_errors_to_list(serializer_errors): """ Flatten DRF Serializer validation errors to a list with one field per item. """ field_errors = [] for field_name, details in serializer_errors.items(): details_string = ','.join([str(detail) for detail in details]) field_errors.append(f'{field_name}: {details_string}') return field_errors
f7604edff451dd14d57cbb63d722230bc194587a
108,702
def test_tosolr_index_update_errors(basic_exporter_class, record_sets, new_exporter, setattr_model_instance, assert_records_are_indexed, assert_records_are_not_indexed): """ When updating indexes via a ToSolrExporter, if one record causes an error during preparation (e.g. via the haystack SearchIndex obj), the export process should: 1) skip that record, and 2) log the error as a warning on the exporter. Other records in the same batch should still be indexed. """ records = record_sets['item_set'] expclass = basic_exporter_class('ItemsToSolr') invalid_loc_code = '_____' exporter = new_exporter(expclass, 'full_export', 'waiting') def prepare_location_code(obj): code = obj.location_id if code == invalid_loc_code: raise Exception('Code not valid') return code exporter.indexes['Items'].prepare_location_code = prepare_location_code setattr_model_instance(records[0], 'location_id', invalid_loc_code) exporter.export_records(records) exporter.commit_indexes() assert_records_are_not_indexed(exporter.indexes['Items'], [records[0]]) assert_records_are_indexed(exporter.indexes['Items'], records[1:]) assert len(exporter.indexes['Items'].last_batch_errors) == 1
dda25db5a2e2c43e4aea9de15c77017dc8f75a13
108,704
def _build_summary(top_sentences, all_sents, max_len, sents_to_add=None): """ Auxillary function for summary building. Attempts to fit as many sentences into a summary as possible. Specifically, tries to add each sentence to the summary, starting from the best one and making sure to not go over a tweet's length Arguments: `top_sentences` A list of sentence indices sorted in decreasing order of importance `all_sents` All sentences from the original document `max_len` The maximum length of the summary in characters `sents_to_add` A list of sentence indices already added to the summary Returns a tuple containing: - The list of sentence indices to be contained in the summary - The length of the generated summary in characters """ # Try to add each sentence to the summary, starting from the best one # and making sure to not go over a tweet's length if sents_to_add is None: sents_to_add = set() summary_size = 0 for i in top_sentences: if i not in sents_to_add: full_sent = all_sents[i].text new_size = summary_size + len(full_sent) if summary_size + new_size <= max_len: sents_to_add.add(i) summary_size += len(full_sent) + 1 # +1 because of the space/newline between sentences return sents_to_add, summary_size
21094c402472106d4dd12ebe4be17f5725d7e2f4
108,705
def get_machine_from_parent(self): """Search in the parent to find the machine Parameters ---------- self : OP An OP object Returns ------- machine : Machine Machine from the parent (or None) """ parent = self.parent while parent is not None and not hasattr(parent, "machine"): parent = parent.parent if parent is not None: return parent.machine else: # Try to import machine from simu object parent = self.parent while parent is not None and not hasattr(parent, "simu"): parent = parent.parent if parent is not None: return parent.simu.machine else: return None
17d37b4e0015b725e4a19eb817f04f7e4bb98a99
108,706
def type_check(what, of_type, msg=None, allow_none=False): """Verify that object 'what' is of type 'of_type' and if not the case, raise a TypeError. :param what: the object to check :param of_type: the type (or tuple of types) to compare to :param msg: if specified, allows to customize the message that is passed within the TypeError exception :param allow_none: boolean, if True will not raise if the passed `what` is `None` :return: `what` or `None` """ if allow_none and what is None: return None if not isinstance(what, of_type): if msg is None: msg = "Got object of type '{}', expecting '{}'".format(type(what), of_type) raise TypeError(msg) return what
752bfd2a686d55f0db218c5d95353770c894732a
108,714
def enable_explore_tab(bio_network, enrichment=None): """Disables Explore tab if there is no network to explore.""" return False if bio_network and enrichment else True
c8598c02a782d50cdf515960dbc69427b7d8e749
108,716
def finddefault(f): """return the default value given a format""" if f.count('A'): default="UNKNOWN" elif f.count('I'): default=-999 else: default=-999.99 return default
793a8c8a003c46259b5e231b85e618cb84f86a19
108,725
def produce_can_h(can_entry): """generate code for a can info entry (row of csv file)""" field_name = can_entry["Field Name"].lower() can_id = can_entry["CAN ID"] py_str = "\n" py_str += f"// {can_entry['Field Name']}\n" py_str += f"// Type: {can_entry['Format']}\n" py_str += f"// Sender: {can_entry['Sender']}\n" py_str += f"// Receiver: {can_entry['Receiver']}\n" if can_entry["Bounds/Range"] != '': py_str += f"// Bounds/Range: {can_entry['Bounds/Range']}\n" if can_entry["Units"] != '': py_str += f"// Units: {can_entry['Units']}\n" if can_entry["J1939?"] != '': py_str += f"// J1939 compatible: {can_entry['J1939?']}\n" else: py_str += f"// J1939 compatible: NO\n" if can_entry["Field Description"] != '': py_str += "//\n" py_str += '// ' + can_entry['Field Description'].replace('\n','\n//\t') + "\n" py_str += f"#define CAN_ID_{field_name.upper()} {can_id}\n" py_str += f"#define BYTE_LENGTH_{field_name.upper()} {can_entry['Byte Length']}\n" if can_entry['PGN'] != '': py_str += f"#define PGN_{field_name.upper()} {can_entry['PGN']}\n" return py_str
8621dd1ad5939efd0f213c33358c0d282419c846
108,727
import typing import multiprocessing def enumerate_cpu_counts() -> typing.List[int]: """This program prints the number of CPU counts to benchmark on this machine. We remove some percentage of CPU cores off the top for system / background processing. With the CPUs that remain, we generate a list of evenly spaced worker counts. The list is limited by the number of trials desired. This is meant to help us explore the number of CPUs that should be dedicated to MZ_WORKERS, not as a prescription for the correct values to choose. On a Macbook with 8 cores, this will return [6, 4, 3, 2]. On a 56 core machine, this returns [24, 18, 12, 6]. On a 96 core machine, this returns [41, 30, 20, 10]. """ # 15% overhead and count physical cores only max_cpus = round(multiprocessing.cpu_count() * 0.425) num_trials = 4 # Yield the fractional points (4/4, 3/4, ...) between max and 0, not including 0 worker_counts = [round(i * max_cpus / num_trials) for i in range(num_trials, 0, -1)] return list(reversed(sorted(set(worker_counts))))
592fec8e11f381fb05d8b6dea12d3fe0cf34c5d6
108,729
def is_visible(lat, lon, domain_boundaries, cross_dateline) -> bool: """Check if a point (city) is inside the domain. Args: lat float latitude of city lon float longitude of city domain_boundaries list lon/lat range of domain cross_dateline bool if cross_dateline --> western lon values need to be shifted Returns: bool True if city is within domain boundaries, else false. """ if cross_dateline: if lon < 0: lon = 360 - abs(lon) in_domain = ( domain_boundaries[0] <= float(lon) <= domain_boundaries[1] and domain_boundaries[2] <= float(lat) <= domain_boundaries[3] ) if in_domain: return True else: return False
c6b5ac05b82fe1e6f2ecc1b36e5568e33ce68c55
108,731
def merge_dicts(idict): """ Merge an iterator over dictionaries into a single dict """ return {k:v for d in idict for k,v in d.items()}
3a718222099cac5e7725145466c61f20212eb6a2
108,733
def merge_dicts(dict1: dict, dict2: dict) -> dict: """ Merges two dictionaries into one dictionary. :type dict1: ``dict`` :param dict1: The first dictionary. :type dict2: ``dict`` :param dict2: The second dictionary. :return: The merged dictionary. :rtype: ``dict`` """ return {**dict1, **dict2}
ccf6339510777011edfcdb9107361a7e788e4c58
108,735
def all_keys_in_dict(in_dict, keys): """ Check that all keys are present in a dictionary. Args: in_dict (dict): Input dict. keys (list): Keys that must be present in ``in_dict``. Returns: True if all ``keys`` are in ``in_dict``, False otherwise. >>> all_keys_in_dict( ... {'key1': '', 'key2': '', 'key3': ''}, ['key1', 'key2']) True >>> all_keys_in_dict( ... {'key1': '', 'key2': '', 'key3': ''}, ['key1', 'key4']) False """ return all(k in in_dict for k in keys)
6a19d4b06ca0063aaeaf81d2c98fc93a6276559f
108,740
import itertools import six def iterate_allocations(path, alloc): """Generate (path, alloc) tuples for the leaves of the allocation tree.""" if not alloc.sub_allocations: return iter([('/'.join(path), alloc)]) else: def _chain(acc, item): """Chains allocation iterators.""" name, suballoc = item return itertools.chain( acc, iterate_allocations(path + [name], suballoc) ) return six.moves.reduce( _chain, six.iteritems(alloc.sub_allocations), [] )
4dcf6afbe5170a1cc0b6a67bd4695b9a0e11d95a
108,741
def _make_singular_filter(filter_name: str, filter_val): """Create a elasticsearch filter for a single filter_name, filter_val pair. Note filter_val can be a list and an OR will be applied Args: filter_name (str): Name of filter filter_val (str | str[]): Value of filter Returns: Dict: Valid elasticsearch filter params """ if isinstance(filter_val, list): filters = [_make_singular_filter(filter_name, val) for val in filter_val] return {"bool": {"should": filters}} return {"match": {filter_name: filter_val}}
804efe1943d11a62fe5ff6eb45ef0de9023aecb3
108,744
def map_transcript_id2gene_symbol(maf_df): """Create and return a mapping from transcript_id to gene_symbol. Assume that each transcript_id uniquely maps to a gene_symbol.""" transcript_id2gene_symbol = dict() for index, row in maf_df.iterrows(): gene_symbol = row['Gene_Symbol'] # e.g. 'AFF2' transcript_id = row['Transcript_ID'] # e.g. 'ENST00000369354' (Ensembl transcript ID) if transcript_id not in transcript_id2gene_symbol: transcript_id2gene_symbol[transcript_id] = gene_symbol else: assert transcript_id2gene_symbol[transcript_id] == gene_symbol return transcript_id2gene_symbol
be44d95fbfae013f6bf3897b367e0f256546aeb1
108,751
def real_project_name(project_name): """ Used to let Mezzanine run from its project template directory, in which case "{{ project_name }}" won't have been replaced by a real project name. """ if project_name == "{{ project_name }}": return "project_name" return project_name
fb46e32768009fad95a05c9e0ad4572c584291d1
108,756
import re def clean_value(value): """ Clean the value of any newline expressions and then convert it to a float :param value: A string representation of a value from the body of the table :type value: str :return: A float representation of a value from the body of the table :rtype: float :raises ValueError: If converting to float is not possible """ # Remove new line expressions value = re.sub("\n", "", value) # Change any numeric commas to periods value = re.sub(",", "", value) # Negative zero starting floats without a zero will not convert if value[0:2] == "-.": return float(f"-0.{value[2:]}") else: try: return float(value) except ValueError: return str(value)
8ce1cb23828f6ca8941c98e0a516279f21c09d89
108,757
import math def ipart(x): """Return integer part of given number.""" return math.modf(x)[1]
7a8b93315f461d91184ee6955a49df2fd3758948
108,769
from typing import OrderedDict def prep_json_entry(entry): """Properly format and return a json object """ json_obj = OrderedDict() json_obj["vocabulary"] = entry["vocabulary"] json_obj["variant_of"] = entry["variant_of"] json_obj["pronunciation"] = entry["pronunciation"] json_obj["meaning"] = entry["meaning"] json_obj["language"] = entry["language"] json_obj["about_ethnicity"] = entry["about_ethnicity"] json_obj["about_nationality"] = entry["about_nationality"] json_obj["about_religion"] = entry["about_religion"] return json_obj
87c1935e9844cc0d23b33a925b9d4ef4db545e50
108,770
def calculate_input_vector_length(user_count, computer_count, auth_type_count, logon_type_count): """ Return model input vector length with user, computer, auth_type, logon_type one-hot encoded. """ return 3 + 2 * user_count + 2 * computer_count + auth_type_count + logon_type_count
12c4256d6c97c7d2406e36a07ee08254406a4331
108,773
def get_mean_intensities(conn, image, the_c, shape_id): """ Get the mean pixel intensities of an roi in a time series image :param conn: The BlitzGateway :param image: The image :param the_c: The channel index :param shape_id: The ROI shape id :return: List of mean intensity values (one for each timepoint) """ roi_service = conn.getRoiService() the_z = 0 size_t = image.getSizeT() meanvalues = [] for t in range(size_t): stats = roi_service.getShapeStatsRestricted([shape_id], the_z, t, [the_c]) meanvalues.append(stats[0].mean[0]) return meanvalues
5834b96769ada2fbf72a8daeeed7843bfe1c0d10
108,777
import requests def get_metadata_value(key): """ Fetch the key from the metadata server """ url = 'http://metadata/computeMetadata/v1/instance/' + key headers = {'content-type': 'application/json', 'Metadata-Flavor': 'Google'} r = requests.get(url, headers=headers) return r.text
2ba0ae04e6e427afe1b4fc3e7a091947946b2556
108,780
def add_to_list(existing_list, inp_list): """ Add input list to an existing list. The existing "list" and input "list" are checked to see if they are list instances, and if not configured to be a lists. The existing list is extended with the input list. :param existing_list: Existing list, which needs to be extended. Can be made of heterogenous objects. :paramtype existing_list: list :param inp_list: Input list. Can be made of heterogenous objects. :paramtype inp_list: list :return: Extended list. :rtype: list """ if inp_list is not None: if not isinstance(inp_list, list): inp_list = [inp_list] # make to list if existing_list is not None: if not isinstance(existing_list, list): existing_list = [existing_list] # make to list existing_list.extend(inp_list) else: existing_list = inp_list return existing_list
b3dd8b08a7eb8a52d69bad0f4f9389416419995d
108,785
import math def partie_entiere(x): # Utilité ? Concept de la partie entière difficile au lycée pour les nombres négatifs. Faire une fonction pour prendre la partie sans la virgule ? """ Renvoie la partie entiere du nombre ``x``, c'est a dire le plus grand entier inferieur au reel ``x``. Arguments: x (float): Un nombre décimal. """ return math.floor(x)
776f5a2ad77b2b97c84c9a9862aa23b05c088428
108,787
def PadLeft(s, Len, Padding = '0'): """Pads s on the left with Padding to length Len. s: a string. Len: an integer. Padding: a string.""" while len(s) < Len: s = Padding + s return s
2c5c05bba6deee82ca4ba6d9eb154566833d2ef0
108,790
def convertArrayInTupleList(array): """ Convert an array (or a list) of element in a list of tuple where each element is a tuple with two sequential element of the original array/list Parameters ---------- array : numpy array/list Returns ------- tuple_list. List of tuple Given the input array = [a, b, c, d ...] the tuple_list will be [(a, b), (b, c), (c, d) ...] """ tuple_list = [] for i in range(len(array) - 1): tmp_tuple = (array[i], array[i + 1]) tuple_list.append(tmp_tuple) return tuple_list
c48a94b53814fd50264dd6975c0e8db64ed013a8
108,797
def get_color_from_color_code(color_code): """Converts a color code to a color name. Args: color_code (list): color code Returns: str: color name """ if color_code[0]: return 'red' elif color_code[1]: return 'green' elif color_code[2]: return 'blue' elif not all(color_code): return 'black' else: return 'unknown'
a567ea58d583286178fa98b124147a0c990ae13b
108,802
def all_a_in_b(a, b): """Return true if all elements *s* of <a> also exist in <b>.""" return all(s in b for s in a)
12db39a3c91ec9b16f2b919f66ca45016a9973e2
108,804
from typing import Iterable from typing import Deque import collections def rotate_copy(iterable: Iterable, n: int) -> Deque: """Return a deque of iterable with it's content rotated n places to the right. iterable: An iterable to rotate n: The number of places to rotate the iterable (negative values rotate to the left) """ deq: Deque = collections.deque(iterable) deq.rotate(n) return deq
522202ce5e57add3251a0c8b3bd032c1aaac713e
108,808
def _beam_fit_fn_2(z, d0, Theta): """Fitting function for d0 and Theta.""" return d0**2 + (Theta*z)**2
d95198169c6def9df41c03c870233029d6ffd770
108,815
import difflib def get_name_similarity(string1, string2): """ Return a score between 0 and 100 of the strings' similarity, based on difflib's string similarity algorithm returning an integer between 0 (no match) and 100 (perfect). 70 or more seems to be a confident enough match """ # Based on https://github.com/seatgeek/fuzzywuzzy/blob/master/fuzzywuzzy/fuzz.py return int(100 * difflib.SequenceMatcher(None, string1, string2).ratio())
2189507ed03d98226f8911d283e4fb4f10fc4b81
108,816