content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
import csv def read_csv_file(filename): """ Read the CSV file with the image and mask filenames """ imgFiles = [] mskFiles = [] with open(filename, "rt") as f: data = csv.reader(f) for row in data: if len(row) > 0: imgFiles.append(row[0]) mskFiles.append(row[1]) return imgFiles, mskFiles, len(imgFiles)
d62c31b7938a487ac743931ccd0fe6b457fd0673
67,724
def check_numeric(value_string): """ This function tries to determine if a string would be better represented with a numeric type, either int or float. If neither works, for example ``10 Mb``, it will simply return the same string provided :param value_string: input string to be parsed. :return: the input string, an int, or a float """ if type(value_string) in ('int', 'long', 'float'): return value_string try: if '.' in value_string: return float(value_string) except (TypeError, ValueError): pass try: return int(value_string) except (TypeError, ValueError): pass return value_string
6a85143c59cf90b6473788b0b45aa3c24ba348ae
67,728
import torch import math def shifted_softplus(x: torch.Tensor) -> torch.Tensor: """ Shifted softplus activation function. """ return torch.nn.functional.softplus(x) - math.log(2)
80902a0f39c9b00c0b604943360989971e9b9f15
67,731
from typing import Any from typing import Optional from typing import Tuple from typing import Type from typing import Union def _union_args(cls: Any) -> Optional[Tuple[Type]]: """Convert ``Union[T1, T2]`` to (T1, T2). Returns ``None`` if `cls` is not a specific :class:`typing.Union` type. """ if not isinstance(cls, type(Union[int, float])): return None try: # Python 3.5.3 onwards args = cls.__args__ # type: ignore except AttributeError: args = cls.__union_params__ # type: ignore return args
aca85b55f37ce6c307d5cd284aa191e6d6a4e319
67,732
def get_all(client, query_params): """Requests the list of roles.""" return client.get_roles(**query_params)
89c4a316fdcffbc935674716ab4cf0231bfc423a
67,735
def calculate_midpoint(l, r): """ @param l left index, included in the range of elements to consider @param r right index, included in range of elements to consider @return None if there's no elements to range over. Returns the midpoint from the "left side" or "left half" if there's an even number of elements. @details index = l, l+1, ... r are all included indices to consider. """ # Get the total number of elements to consider. L = r - l + 1 if (L <= 0): return None return (L//2 + l) if (L % 2 == 1) else (L//2 - 1 + l)
56485e9090889cc5a01e2728eb5c58d02990ff47
67,744
import math def yrlygrowth(total_growth, years): """ Determine the annual growth from the growth over an arbitrary time span. """ return math.exp(math.log(total_growth) / years)
5763cb27a9289395e3bc3885d68e76ef855fe887
67,751
def bpm2msec(bpm): """ Obtains the rr duration in msec corresponding to a specific frequency in beats per minute. """ return 60000 / bpm
f5f660cd1cc813f77e2749b9ba25ad20db5bc157
67,755
def palindrome(value:str) -> bool: """ Returns true if value/string is a palindrome.""" return str(value) == str(value)[::-1]
548f668ab40db01387d751e71687a71f64e63e4e
67,758
def wpa_validation_check(words=[]): """ Function to optimise wordlist for wpa cracking > Removes Duplicates. > Removes passwords < 8 or > 63 characters in length. """ custom_list = list(set(words)) custom_list = [x for x in custom_list if not (len(x) < 8 or len(x) > 63)] return custom_list
30c8a905c3b8c9d182d528db2c35fb89104b86c1
67,760
def _count_occurences(needle, haystack): """ Count occurences of the string needle in haystack """ return len(haystack.split(needle)) - 1
7ee78a5270da79611e8e5d67ecf038d49ad321a1
67,763
import re def to_snake_case(input): """Convert a string into its snake case representation.""" str1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", input).replace("-", "_") return re.sub("([a-z0-9])([A-Z])", r"\1_\2", str1).lower()
44a3656a0d0456a73bab78374bf87b9a35bb3acb
67,766
def is_truncate(q): """ True if `q` is a TRUNCATE table statement :param q: :return: """ return 'truncate' in q.lower()
c5def37dab064c36f0f28de6310d6ce265571e1d
67,773
def common_substr_2( seq1, seq2, sorting=None): """ Find the longest common consecutive subsequence(s). This version works for two iterables. This is known as the `longest common substring` problem, or LCS for short. Args: seq1 (iterable): The first input sequence. Must be of the same type as seq2. seq2 (iterable): The second input sequence. Must be of the same type as seq1. sorting (callable): Sorting function passed to 'sorted' via `key` arg. Returns: commons (list[iterable]): The longest common subsequence(s). Examples: >>> common_substr_2('academy', 'abracadabra') ['acad'] >>> common_substr_2('los angeles', 'lossless') ['los', 'les'] >>> common_substr_2('los angeles', 'lossless', lambda x: x) ['les', 'los'] >>> common_substr_2((1, 2, 3, 4, 5), (0, 1, 2)) [(1, 2)] """ # note: [[0] * (len(seq2) + 1)] * (len(seq1) + 1) will not work! counter = [[0 for j in range(len(seq2) + 1)] for i in range(len(seq1) + 1)] longest = 0 commons = [] for i, item in enumerate(seq1): for j, jtem in enumerate(seq2): if item == jtem: tmp = counter[i][j] + 1 counter[i + 1][j + 1] = tmp if tmp > longest: commons = [] longest = tmp commons.append(seq1[i - tmp + 1:i + 1]) elif tmp == longest: commons.append(seq1[i - tmp + 1:i + 1]) if sorting is None: return commons else: return sorted(commons, key=sorting)
467d96a1248531b560dc213383326accc5433bcd
67,775
import random import math def calculate_pi(attempts): """ This function calculates an approximation of Pi using a raw MonteCarlo integration technique :param: attempts: the number of iterations for the MonteCarlo method :type: attemts: int :return: the approximated value of Pi """ assert isinstance(attempts, int), 'you must provide an integer' assert attempts > 0, 'you must provide a positive integer' falling_inside = 0 for _ in range(attempts): # here we use functionalities provided by random and math modules x = random.uniform(0.0, 1.0) y = random.uniform(0.0, 1.0) if math.sqrt(x**2 + y**2) <= 1: falling_inside += 1 pi = 4 * falling_inside/attempts return pi
86623bb4866ded1fd7760a408f7b4bac1be8f2c6
67,776
def strftime_with_precision(tick, format, sub_secs_precision=2): """ Returns a formatted string for a datetime (tick). :param tick: The datetime for this tick :param format: The formatting string :param sub_secs_precision: Number of digits to used for sub-seconds. If None, will choose it "smartly/dynamically" :return: Formatted string """ t = tick.strftime(format) is_us = '%f' in format if is_us: if sub_secs_precision is None: while t[-1] == '0': t = t[:-1] while not t[-1].isdigit(): t = t[:-1] return t else: if sub_secs_precision < 0: sub_secs_precision = 0 elif sub_secs_precision > 6: sub_secs_precision = 6 DFLT_PRECISION = 6 digits_to_skip = DFLT_PRECISION - sub_secs_precision if digits_to_skip == 0: return t else: t = t[:-digits_to_skip] while not t[-1].isdigit(): t = t[:-1] return t else: return t
52bdf2ceea3084b44dc9796a5a1eaf8f4648a72f
67,778
def read_file(dbx, from_file): """Read file from Dropbox Args: dbx (Dropbox object): Dropbox object generated from API token from_file (string): File location in Dropbox accounts Returns: int32: Percentage value """ _, f = dbx.files_download(from_file) percentage = f.content percentage = percentage.decode('utf-8') return int(percentage)
f621ec3c36b1b934df03bfc849bce53c12559a9b
67,779
def zip_lsts(lsts): """ zip a list of lists """ lengths = [len(lst) for lst in lsts] assert len(list(set(lengths))) == 1 # assert that the lsts have the same lengths zipped_lst = [list(tp) for tp in list(zip(*lsts))] return zipped_lst
e79e998ad863a93fdadd4ee3e127cf2375f567a6
67,781
from typing import Iterable def flatten(iter, unlist=False): """ Flatten an arbitrarily nested iterable whose innermost items are strings into a flat list of strings. Parameters ---------- iter : iterable unlist : bool If True, convert single-item lists into a bare string """ if isinstance(iter, dict): iter = iter.values() def gen(): for item in iter: if isinstance(item, dict): item = item.values() if isinstance(item, Iterable) and not isinstance(item, str): yield from flatten(item) else: yield item results = list(gen()) if unlist and len(results) == 1: return results[0] return results
d7ee0d4f20d2de2785f4820f92ee53d570185568
67,782
def without(dict_in: dict, keys: list): """ Utility that copies a dictionary excluding a specific list of keys. """ if not isinstance(keys, list): keys = [keys] new_d = dict_in.copy() for key in keys: new_d.pop(key) return new_d
537c8a679ac1e4e30d5c8a47f6d77dda97b6b1f7
67,783
def _MakeDispatchListIntoYaml(application, dispatch_list): """Converts list of DispatchEntry objects into a YAML string.""" statements = [] if application: statements.append('application: %s' % application) statements.append('dispatch:') for entry in dispatch_list: statements += entry.ToYaml() return '\n'.join(statements) + '\n'
d849444368ce68e51585cc97da11d46158f3cea1
67,785
def purity(rho): """ Calculate the purity of a quantum state. Parameters ---------- rho : :class:`qutip.Qobj` Density matrix of a quantum system. Returns ---------- purity_rho : float Purity of the quantum system, p (p=1 if pure, 1/N<=p<1 if mixed). """ if rho.type=="ket": rho=rho*rho.dag() purity_rho = (rho*rho).tr() return purity_rho
21a11a761d0a5d9fa734f09e1b67e7c5b0283e8b
67,787
import pathlib def to_path(*p, as_string=True): """Convert string to pathlib path. INFO: Path resolving removes stuff like ".." with 'strict=False' the path is resolved as far as possible -- any remainder is appended without checking whether it really exists. """ pl_path = pathlib.Path(*p) ret = pl_path.resolve(strict=False) # default return in case it is absolute path if not pl_path.is_absolute(): # don't resolve relative paths (pathlib makes them absolute otherwise) ret = pl_path if as_string: return ret.as_posix() else: return ret
e821fea0d3798d743f5262082f60dd2c76812460
67,797
def predicatize(term): """Formats a term (string) to look like a Predicate.""" # Replace spaces by underscores and lowercase first letter of a Predicate term term = term.replace(" ", "_") return term[0].lower() + term[1:]
f9fb3f7c8a9fa716b5aa1c4e0ebc6e5febe4a97f
67,798
def get_match_filter(barcode): """Return a function for filtering a pair of (seq, bc) namedtuple pairs; the function returns True if bc.seq == barcode """ def filterfun(pair): seq, bc = pair return str(bc.seq) == barcode return filterfun
e0e4bc420673afb78e216e1dd1369ff4de537489
67,799
def _IsSpecified(args, name): """Returns true if an arg is defined and specified, false otherwise.""" return hasattr(args, name) and args.IsSpecified(name)
3730179ad3913d95d9cc4cdbfd168ee92f54aff5
67,800
def _is_symmetric_padding(padding, data_dim): """ Check whether padding is symmetrical. """ assert len(padding) == data_dim * 2 or len(padding) == data_dim is_sys = True if len(padding) == data_dim * 2: for i in range(data_dim): if padding[i * 2] != padding[i * 2 + 1]: is_sys = False return is_sys
d6963ddc823d019d12dee6195a6640a95d65a904
67,811
def min_power_rule(mod, g, tmp): """ **Constraint Name**: GenCommitCap_Min_Power_Constraint **Enforced Over**: GEN_COMMIT_CAP_OPR_TMPS Power minus downward services cannot be below a minimum stable level. """ return mod.GenCommitCap_Provide_Power_MW[g, tmp] \ - mod.GenCommitCap_Downwards_Reserves_MW[g, tmp] \ >= mod.Commit_Capacity_MW[g, tmp] \ * mod.gen_commit_cap_min_stable_level_fraction[g]
ff95763481ea356eb3c6108149de6b3e3550e3bb
67,813
import re def swap_twitter_subject(subject, body): """If subject starts from 'Tweet from...' then we need to get first meaning line from the body.""" if subject.startswith('Tweet from'): lines = body.split('\n') for idx, line in enumerate(lines): if re.match(r'.*, ?\d{2}:\d{2}]]', line) is not None: try: subject = lines[idx + 1] except IndexError: pass break return subject, body
7fbd1a236b0d65c2bec8d6519c0af65291c30688
67,821
import csv def parse_csv(filename): """ Convert CSV file to list of dictionaries. Source: https://stackoverflow.com/a/21572244/4562156 :param filename: filename containing CSV data (relative to project root) :return: list of dictionaries representing each row with CSV schema as keys """ with open(filename) as f: return [ {k: v for k, v in row.items()} for row in csv.DictReader(f, skipinitialspace=True) ]
ae663fff60fa04936875ce11f7e35910951c551e
67,822
import re def _valid_date(date): """ Whether the date requested matches the desired format (starts between 1 and 8 digits) :param date: The date to check :return: True if valid, false otherwise """ return re.compile('^\d{1,8}').search(date)
c18d8c4f22f2f028b5c0d66aec9afad733d7c79b
67,828
def pairwise_analogy_func(wrap, a1, b1, a2, b2, weight_direct, weight_transpose): """ Rate the quality of the analogy a1 : b1 :: a2 : b2. """ va1 = wrap.get_vector(a1) vb1 = wrap.get_vector(b1) va2 = wrap.get_vector(a2) vb2 = wrap.get_vector(b2) value = ( weight_direct * (vb2 - va2).dot(vb1 - va1) + weight_transpose * (vb2 - vb1).dot(va2 - va1) + vb2.dot(vb1) + va2.dot(va1) ) return value
6f96c26fb4c17d0006ab6888225dcc1f4bdc4391
67,833
def discrete_colorscale(bvals, colors): """ bvals - list of values bounding intervals/ranges of interest colors - list of rgb or hex colorcodes for values in [bvals[k], bvals[k+1]],0<=k < len(bvals)-1 returns the plotly discrete colorscale """ if len(bvals) != len(colors)+1: raise ValueError('len(boundary values) should be equal to len(colors)+1') bvals = sorted(bvals) nvals = [(v-bvals[0])/(bvals[-1]-bvals[0]) for v in bvals] #normalized values dcolorscale = [] #discrete colorscale for k in range(len(colors)): dcolorscale.extend([[nvals[k], colors[k]], [nvals[k+1], colors[k]]]) return dcolorscale
c3b4268416db6164708e1175558edcc6463fcbc8
67,835
def convertListDomainToDict(list_domains:list): """ convert a list of domains objects to a dictionary with the designation in the key and the id in value :param list_domains: list of domains json objects :type list_domains: list[DomainJ] :return: dictionary :rtype: dict[designation] = id """ dict_domains = {x.designation: x.id for x in list_domains} return dict_domains
ba2a528d96f3394f87d1c25e63e5f747b6aaed12
67,839
import requests import json def get(url, params=None): """ Performs a GET request to the given url with supplied params. If an error happens that will be printed to the console and None is returned """ response = requests.get(url, params) if response.status_code == 200: return json.loads(response.text) else: print( f"Unable to perform REST call {url} with params {params}. Status {response.status_code}. Message: {response.text}") return None
640c3550bc188b95f382e7dd7137c7487396657b
67,844
def target_subnet(model, subnet_idx): """ model = Keras model subnet_idx = indexes of the subnet you want to target. ex: subnet_idx = [1,0] -> targets model.layers[1].layers[0] """ subnet = model for idx in subnet_idx: subnet = subnet.layers[idx] return subnet
df6092b0287e5dde53f5ad0363ae635d91582a58
67,845
def annual_metrics( globvars, mass, prodloss, wetdep, drydep ): """ Computes the following terms: 1. "dyn": Ox subsiding from the stratosphere 2 "net": Net Ox = (POx-LOx) + Dyn - Drydep - Wetdep 3. "life": Ox lifetime (d) = Ox burden / (LOx + Drydep + Wetdep) Args: globvars: _GlobVars Global variables needed for budget computations. mass : dict prodloss : dict wetdep : dict drydep : numpy float64 Mass, prod/loss, and deposition terms Returns: result : dict Contains dyn, net, and life terms. """ result = {} acc = mass["Ox_acc"] burden = mass["Ox_end"] chem = prodloss["POx-LOx"] lox = prodloss["LOx"] wetd = wetdep["Total"] result["dyn"] = acc - (chem - wetd - drydep) result["net"] = (chem + result["dyn"]) - (wetd + drydep) result["life"] = (burden / (lox + wetd + drydep)) * globvars.d_per_a return result
cd3b3562630139a00f6db8da465dea31527155bb
67,846
def find_distance(df): """ Finds the number of words between the gender pronoun word, and the entity word. """ df = df.values g_idx = df[0] p_idx = df[1] return abs(g_idx - p_idx)
dc0158f8180cf8b1448b988300c2e940de5dfaf5
67,850
import string def pre_process_sentence(sentence: str) -> str: """ Given a sentence, return a new one all lower-cased and without punctuation. """ # lower case lower_sentence = sentence.lower() # remove punctuation exclude = set(string.punctuation) return ''.join(ch for ch in lower_sentence if ch not in exclude)
3a1bc048d486a54eea0d11277bddee1e1187ac8d
67,860
import math def heuristic(p1, p2): """ Heuristic for a* algorithm. :param p1: tuple :param p2: tuple :return: int """ x1, y1 = p1 x2, y2 = p2 return math.fabs(x1 - x2) + math.fabs(y1 - y2)
a774965b97a3a9b9ac6f0b6954f4e93d41406ff1
67,861
def _add_coordinates(domcfg): """ If existing in the domcfg dataset, adds the lat/lon/depth variables as coordinates """ coordinates = [ "glamt", "glamu", "glamv", "glamf", "gphit", "gphiu", "gphiv", "gphif", "gdept_0", "gdepw_0", "gdept_1d", "gdepw_1d", ] for coord in coordinates: if coord in domcfg: domcfg.coords[coord] = domcfg[coord] return domcfg
adec2027dd6da6167c897d964010f8747170a0bf
67,864
def list_journals(config): """List the journals specified in the configuration file""" sep = "\n" journal_list = sep.join(config['journals']) return journal_list
3c4ec8e93b896022ccd934bc3052b76f72f251f8
67,876
def get_labels(cs): """Return list of every label.""" records = [] for c in cs: records.extend(c.get('labels', [])) return records
b3017c7752089ec3c5e06306982dd0c0bb7c44d5
67,878
def le(x, y): """Implement `le`.""" return x <= y
e3c0601970460c90824a18be8a9ea2474f16e5a1
67,879
def are_close(a, b, epsilon): """Check if a and b are relative (to epsilon) close.""" return (a - epsilon) <= b and (a + epsilon) >= b
846eb4cd6e67958cfd1ec17fa05e1d628edfe4f5
67,884
import tempfile import pickle def _dump(partition): """ Dump the given partition to an external source. The default implementation is to pickle the list of objects to a temporary file. :param partition: The partition of objects to dump. :return: Unique id which can be used to reload the serialized partition. In the case of the default implementation this is the path to the temporary file. """ with tempfile.NamedTemporaryFile(delete=False) as fileobj: for item in partition: pickle.dump(item, fileobj) return fileobj.name
363b5a70848fe09b0c1ce9523a2f3e3abc23f9d2
67,885
def keep_last_labtest_instance(df): """ select last instance of every type of test for a patient """ df = df.sort_values('charttime', axis=0) df = df.drop_duplicates(subset=['hadm_id', 'itemid'], keep='last', ignore_index=True) return df
82eee9ce875f61d504775a2ba225d3816d51fbf6
67,887
def expand_boxes(boxes, scale): """Expand an array of boxes by a given scale.""" w_half = (boxes[:, 2] - boxes[:, 0]) * .5 h_half = (boxes[:, 3] - boxes[:, 1]) * .5 x_c = (boxes[:, 2] + boxes[:, 0]) * .5 y_c = (boxes[:, 3] + boxes[:, 1]) * .5 w_half *= scale h_half *= scale boxes_exp = boxes.copy() boxes_exp[:, 0] = x_c - w_half boxes_exp[:, 2] = x_c + w_half boxes_exp[:, 1] = y_c - h_half boxes_exp[:, 3] = y_c + h_half return boxes_exp
6c6d347d4fad8748f10af317497b3daf8e6794bf
67,889
from typing import Dict import requests from typing import cast def default_headers() -> Dict: """ Return default headers with a custom user agent """ headers = requests.utils.default_headers() # type: ignore headers.update({'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) ' 'AppleWebKit/537.36 (KHTML, like Gecko) ' 'Chrome/74.0.3729.169 Safari/537.36'}) # Chrome v74 2020-06-25 return cast(Dict, headers)
37554ce3d270751b501d3c29f1b8b50da845a0be
67,890
def fitness_func_oob(vector): """ out of bounds fitness func, returns a negative fitness""" return -1
720637750f94391bd178415885f2c33754342333
67,891
import operator def get_field_set(items, getter=operator.attrgetter('id'), cast=int): """ Get ids via getter and collect them in a set """ return {cast(getter(item)) for item in items}
a3aa9a4837743cf6746cef7fcbd11399325bb021
67,894
def commands_not_installed(commands, completer_content): """ Checking for installed commands in exists completer file :param commands: List of commands found in completions directory :param completer_content: Content of current installed complete file :return: List of not installed commands """ return [ command for command in commands if command not in completer_content ]
a318cd596f4b019341733490f9c31bb229fdbb4a
67,897
def get_model_cache_key(model): """ Returns a part of cache key used to identify mode. :param model: model, on which cache depends :return: Part of cache key used to identify model """ return f'{model._meta.app_label}.{model.__name__}'
bd2e80025a4e559b3b307ea3cecf423e26fb4f76
67,903
def get_inputs( filename ): """ The input file has intructions in each line with a command and a value. This function returns the instructions as a list of lists. """ with open( filename, 'r') as input_file: inputs = [ [ item[:1], int(item[1:]) ] for item in input_file.read().splitlines() ] return inputs
35c7a4e9f23519525f9785549bcc93b082acc647
67,911
def fetch_courses(job_config, data_bucket, data_dir ="morf-data/"): """ Fetch list of course names in data_bucket/data_dir. :param job_config: MorfJobConfig object. :param data_bucket: name of bucket containing data; s3 should have read/copy access to this bucket. :param data_dir: path to directory in data_bucket that contains course-level directories of raw data. :return: courses; list of course names as strings. """ s3 = job_config.initialize_s3() if not data_dir.endswith("/"): data_dir = "{0}/".format(data_dir) bucket_objects = s3.list_objects(Bucket=data_bucket, Prefix=data_dir, Delimiter="/") courses = [item.get("Prefix").split("/")[1] for item in bucket_objects.get("CommonPrefixes")] return courses
3fe3ed0670d3d22950d8be393404fc9665c4334d
67,916
def read_keywords(args): """This function reads the keywords from the input file and creates: - a dictionary where the key is the old name and the value is the new name, these keywords will be further processed. - a list of keywords which will not be processed, typically keywords with argument(s) in its names. - a list of duplicates - duplicated keyword names or names which are parts of another keyword name, they will not be processed. :param args: Parsed arguments. :type args: ArgumentParser :returns: keyword names - dictionary where the key is the old name and the value is the new name; ignored keyword names - list of keywords which will not be processed; duplicates - duplicated keyword names or names which are parts of another keyword name, they will not be processed. :rtype: tuple(dict, list, list) """ kw_names = dict() ignored_kw_names = list() duplicates = list() for line in args.input: old_name, new_name = line.split(args.separator) if '$' in old_name: ignored_kw_names.append((old_name, new_name[:-1])) elif old_name in kw_names.keys(): duplicates.append((old_name, new_name[:-1])) else: kw_names[old_name] = new_name[:-1] # Remove duplicates: for old_name, _ in duplicates: new_name = kw_names.pop(old_name, None) if new_name: duplicates.append((old_name, new_name)) # Find KW names which are parts of other KW names: for old_name in kw_names.keys(): count = 0 for key in kw_names.keys(): if old_name in key: count += 1 if old_name in kw_names[key]: if old_name != key: count += 1 if count > 1: duplicates.append((old_name, kw_names[old_name])) kw_names.pop(old_name) return kw_names, ignored_kw_names, duplicates
fd261c6819424e171776130e74538b05baf6e830
67,920
import importlib def get_suite_amount_of_workers(workspace, project, suite): """Get the amount of workers defined in a suite. Default is 1 if suite does not have workers defined""" amount = 1 suite_module = importlib.import_module('projects.{0}.suites.{1}'.format(project, suite), package=None) if hasattr(suite_module, 'workers'): amount = suite_module.workers return amount
7126ba3b1cfe3ab39f100fbdc8147009f50e7440
67,922
def mk_find_in_field_logical_query(field, query): """ Allows one to create "find in field" query of any logical complexity (since AND, OR, and NOT are supported). field is the field to consider query can be a string, a tuple, or a list, and can be nested: if query is a string, it is is considered to be "must be equal to this" if the string starts with "-", it is considered to be "must NOT equal to this" if query is a tuple, take the conjunction (AND) of the tuple's elements if query is a list, take the disjunction (OR) of the list's elements """ if isinstance(query, str): if query[0] == '-': return {field: {'$not': {'$eq': query[1:]}}} else: return {field: query} elif isinstance(query, tuple): return {'$and': [mk_find_in_field_logical_query(field, q) for q in query]} elif isinstance(query, list): return {'$or': [mk_find_in_field_logical_query(field, q) for q in query]} else: raise TypeError("query must be a string, tuple, or list")
e011ebe3a9b134e20532c04024c84ba40d0a6a1c
67,926
def get_decimal(f, aprox=2): """Gets the decimal part of a float Args: f (float): Real number to get the decimal part from aprox (int)(optional): Number of decimal digits, default is 2. Returns: float: Decimal part of f """ f = round((f - int(f)), aprox) return f
6b38939981d5f87d8d571855216bd6f452f9ef63
67,927
def fitness_fn(turns, energy, isDead): """Fitness function that scores based on turns and energy. Provides a score to a creature, with 3 times the amount of turns plus the energy, with a bonus for surviving. Args: turns: number of turns a creature survived. energy: amount of energy left after simulation. isDead: boolean of death state. Returns: int: score based on function calculation """ if (isDead): return (3*turns) + energy else: return (3*turns) + energy + 120
823d1295bf64c46cf0e4bb2a7b24b55c2024282d
67,935
def create_epoch(f, name, start_time, stop_time): """ add an <epoch_X>. f is the h5gate file object. name is name of epoch start_time and stop_time should both be float64 Returns the group for the created epoch. """ epoch = f.make_group("<epoch_X>", name) epoch.set_dataset("start_time", start_time) epoch.set_dataset("stop_time", stop_time) return epoch
d3b88d208e7576bd7195b89b2394bcd77b0970ff
67,950
def CheckChangeHasNoTabs(input_api, output_api, source_file_filter=None): """Checks that there are no tab characters in any of the text files to be submitted. """ # In addition to the filter, make sure that makefiles are blacklisted. if not source_file_filter: # It's the default filter. source_file_filter = input_api.FilterSourceFile def filter_more(affected_file): return (not input_api.os_path.basename(affected_file.LocalPath()) in ('Makefile', 'makefile') and source_file_filter(affected_file)) tabs = [] for f, line_num, line in input_api.RightHandSideLines(filter_more): if '\t' in line: tabs.append('%s, line %s' % (f.LocalPath(), line_num)) if tabs: return [output_api.PresubmitPromptWarning('Found a tab character in:', long_text='\n'.join(tabs))] return []
454f7dd0dd16a9f9fd23911a0d3b00c6dc4b4de3
67,953
def parse_filename(filename, full_output=False): """ Return the basic stellar parameters from the filename. """ basename = filename.split("/")[-1] teff = basename[1:5] logg = basename.split("_")[1][1:] feh = basename.split("_")[5][1:] parameters = map(float, [teff, logg, feh, int(basename[0].lower() == "s")]) if full_output: names = ("effective_temperature", "surface_gravity", "metallicity", "is_spherical?") return (parameters, names) return parameters
112fe1e70ed9244579755b791628f362d3e76e24
67,955
def retrieve_termination_obj(nb,termination_type,dev_name,termination_name): """Searches for a NetBox termination object of a given name from a given device Args: nb: PyNetbox connection to a Netbox instance termination_type: NetBox termination object type (ie. dcim.interface) dev_name: Name of NetBox device being searched termination_name: Name of NetBox termination object being searched Returns: If found: Returns NetBox termination object If not found: Returns None """ termination_obj = None if (termination_type == "dcim.interface"): termination_obj = nb.dcim.interfaces.get(device=dev_name,name=termination_name) elif (termination_type == "dcim.frontport"): termination_obj = nb.dcim.front_ports.get(device=dev_name,name=termination_name) elif (termination_type == "dcim.rearport"): termination_obj = nb.dcim.rear_ports.get(device=dev_name,name=termination_name) return termination_obj
b6b8542196ef3197b1f99190f9ced2ca7bab7ef2
67,958
def setup_method_arguments(method, bounds): """ prepare method specific arguments """ kwargs = {} # pass bounds to methods that support it if method in ["L-BFGS-B", "TNC", "SLSQP"]: kwargs['bounds'] = bounds return kwargs
88b1a9a21756228f23e6216a1287de6e205a8999
67,962
import re def _valid_name(tensor_name): """Return a valid tensor name (strips '/', ':', etc).""" return re.sub('[^a-zA-Z0-9_-]+', '', tensor_name)
078db600b49dd622f21e8882a6234d838980496f
67,967
def parse_name(text): """Split an arbitrary (maybe empty) word from the beginning of a string.""" lth = 0 while text[lth] not in ' ,*%();': lth += 1 return text[:lth], text[lth:]
4538a216993868b18c902bb26f40f07f5d0999cb
67,968
import time def wait_for_computation(predicate, timeout, retry_after): """Return wrapped function retrying computation until result satisfies given predicate or timeout is reached.""" def wrapper(func): def compute(*args, time_left=timeout, **kwargs): if time_left <= 0: raise TimeoutError('Timeout reached!') try: result = func(*args, **kwargs) if predicate(result): return result else: time.sleep(retry_after) return compute(*args, time_left=time_left - retry_after, **kwargs) except Exception: time.sleep(retry_after) return compute(*args, time_left=time_left - retry_after, **kwargs) return compute return wrapper
243f0f6fcc5a9eb15d0e8aa036ec484e85fd80f8
67,969
def trim_text(text, nbytes): # type: (str, int) -> str """Trim text such that its utf-8 encoded size does not exceed `nbytes`.""" return text.encode("utf-8")[:nbytes].decode("utf-8", "ignore").strip()
c633bb41970aa0004f4238315df476c72ab86dff
67,973
import torch def get_test_tensor(max_value=10, float=False): """Create simple test tensor.""" tensor = torch.LongTensor(list(range(max_value))) if float: tensor = tensor.float() return tensor
323b85375032c24a18de2d1cd2019e39ef64c683
67,976
def task_eta(eta): """Converts a datetime.datetime into a taskqueue ETA. Args: eta: Naive datetime.datetime of the task's ETA. Returns: The ETA formatted as a string. """ return eta.strftime('%Y/%m/%d %H:%M:%S')
5935f00912adb90f46657e397c835ab45d45af03
67,977
def bad_request(message: str, code: int = 400): """Format bad request response from a message.""" return { 'result': 'error', 'message': message }, code
b9d0d673491c4391e7ca7633a78bccc4acc3b177
67,978
def _arg(bytes, signed, dvi, _): """Read *bytes* bytes, returning the bytes interpreted as a signed integer if *signed* is true, unsigned otherwise.""" return dvi._arg(bytes, signed)
ad5ba774d27a671b9d84428a2e5b13600d5fe516
67,979
def solar_geometric_mean_anomaly(julian_century): """Returns the Anomaly of Solar Geometric Mean with Julian Century, julian_century.""" solar_geometric_mean_anomaly = 357.52911 + julian_century * ( 35999.05029 - 0.0001537 * julian_century ) return solar_geometric_mean_anomaly
2d859a748020d3bad144fff0dd299d22a70e692c
67,982
def splitPackageName(packageName): """e.g. given com.example.appname.library.widgetname returns com com.example com.example.appname etc. """ result = [] end = packageName.find('.') while end > 0: result.append(packageName[0:end]) end = packageName.find('.', end+1) result.append(packageName) return result
180606df01b0b342e050ed0467c875ba3e3e1121
67,983
def valid_csv_header(headers, expected_headers): """return True if all of expected headers are present in headers""" return all(h in headers for h in expected_headers)
3becea0c27aea3081af5da2b2a1d732bec752530
67,991
def calc_set(pos, n): """ Computes the subset of {1, 2, ..., `n`} with given rank `pos` in the ordering induces by gray ordering of indicator vectors. """ binary = [1 & (pos >> (n - k - 1)) for k in range(n)] assoc_seq = list(map(lambda x: x[0] ^ x[1], zip([0] + binary[:-1], binary))) return [x + 1 for x in range(n) if assoc_seq[x]]
6cbf5e11ae51137e339bdafa2aefc86cde3a5386
67,997
def instr_to_intcode(instructions): """ Takes a string of instructions written in ASCII and converts them to integers for using as input for the intcode program. """ output = [] for char in instructions: output.append(ord(char)) output.append(ord('\n')) return output
2ffd6523a2396dfa229d55760a98d419fbe144db
67,998
def names_in_graph(list_of_names, graph): """ Return true if all names are in the graph """ return all([name in graph.keys() for name in list_of_names])
df1d767cfadcf07fbbb11fc7eedd4266dbe53380
68,003
import re def prettify_keys(string): """Utility function used to clean keys with numerical values, so that they can be ordered alphabetically the way they should be.""" r = re.compile("bandwidth_([0-9]{1,3})") def replacer(match): m = match.groups()[0] return "bandwidth " + m return r.sub(replacer, string)
6a22ea364eb3ca6c2469f6fdfec2c8c16b2da6c1
68,006
import inspect def hasarg(func, arg): """ simple check whether a function takes a parameter as input Parameters ---------- func: function the function to be ckecked arg: str the argument name to be found Returns ------- bool does the function take this as argument? """ return arg in inspect.getfullargspec(func).args
ec726466fafff29645de4e620944f2e7c3da7989
68,007
def _extract_first_from_str(content: str) -> str: """Extract first line from str without splitting by line.""" if not content: return content newline_pos = content.find("\n") if newline_pos == -1: return content return content[:newline_pos]
bdea67f83d111425e9682a7974a20bf6f0b031b3
68,010
import inspect def get_init_params(cls): """Return the parameters expected when calling the class.""" initializer = cls.__init__ if initializer is object.__init__ and cls.__new__ is not object.__new__: initializer = cls.__new__ try: return list(inspect.signature(initializer).parameters)[1:] except TypeError: return None
134460accc96248178eb52a2b2be744af1198187
68,011
def _match_linear_pattern(root, pattern): """ Use Depth First Search to match the pattern :param root: operation :param pattern: List[Node] :return: Return List[operation] if pattern matches entirely else [] """ op = root if not pattern or len(op.outputs) != 1: return [] node = pattern[0] if op.op_type != node.op_type: return [] if not node.match_criterion(op): return [] for child in op.outputs[0].child_ops: op_list = [op] + _match_linear_pattern(child, pattern[1:]) if len(op_list) == len(pattern): return op_list return []
3f6e95d78a46f2bd2da5c7490fc5f9d4cf27e2c1
68,016
def to_iterable(var): """ convert things to list treat string as not iterable! """ try: if type(var) is str: raise Exception iter(var) except Exception: return [var] else: return var
e7c90f209600c0e81adaf437f53cdb971f7918eb
68,021
def generate_tld_cols(hostname_split_df, hostnames, col_len): """ This function generates tld columns. :param hostname_split_df: Hostname splits. :type hostname_split_df: cudf.DataFrame :param hostnames: Hostnames. :type hostnames: cudf.DataFrame :param col_len: Hostname splits dataframe columns length. :return: Tld columns with all combination. :rtype: cudf.DataFrame Examples -------- >>> import cudf >>> from clx.dns import dns_extractor as dns >>> hostnames = cudf.Series(["www.google.com", "pandas.pydata.org"]) >>> hostname_splits = dns.get_hostname_split_df(hostnames) >>> print(hostname_splits) 2 1 0 0 com google www 1 org pydata pandas >>> col_len = len(hostname_split_df.columns) - 1 >>> col_len = len(hostname_splits.columns) - 1 >>> dns.generate_tld_cols(hostname_splits, hostnames, col_len) 2 1 0 tld2 tld1 tld0 0 com google www com google.com www.google.com 1 org pydata pandas org pydata.org pandas.pydata.org """ hostname_split_df = hostname_split_df.fillna("") hostname_split_df["tld" + str(col_len)] = hostname_split_df[col_len] # Add all other elements of hostname_split_df for j in range(col_len - 1, 0, -1): hostname_split_df["tld" + str(j)] = ( hostname_split_df[j] .str.cat(hostname_split_df["tld" + str(j + 1)], sep=".") .str.rstrip(".") ) # Assign hostname to tld0, to handle received input is just domain name. hostname_split_df["tld0"] = hostnames return hostname_split_df
78b9cd967bd57e95622ee09f7fb3b075544be154
68,030
from typing import Dict from typing import List def counts_to_list(counts: Dict[str, int]) -> List[int]: """Converts counts to a list representation :param counts: a Qiskit-style counts dictionary :return: a list of integers """ num_bits = len(list(counts.keys())[0].replace(' ', '')) counts_list = [0] * 2 ** num_bits for state in counts: f_state = state.replace(' ', '') counts_list[int(f_state, 2)] = counts[state] return counts_list
d29d6c86b24d47ef77764eeeff547a85f4acb4a6
68,033
def get_shape_from_label(label): """ Extract the output shape of a flattened pyNN layer from the layer name generated during parsing. Parameters ---------- label: str Layer name containing shape information after a '_' separator. Returns ------- : list The layer shape. Example ------- >>> get_shape_from_label('02Conv2D_16x32x32') [16, 32, 32] """ return [int(i) for i in label.split('_')[1].split('x')]
ae41a785148373ee579b44e91758ac4297397fab
68,035
import re def _ClientIDFromSessionID(session_id): """Extracts the client id from a session id.""" parts = session_id.Split(4) client_id = parts[0] if re.match(r"C\.[0-9a-f]{16}", client_id): return client_id raise ValueError("Unable to parse client id from session_id: %s" % session_id)
3343950a41d98a715255ccdf35fc6950ca17d73d
68,036
def generate_ena_api_endpoint(result: str, data_portal: str, fields: str, optional: str = ''): """ Generate the url for ENA API endpoint :param result: either be read_run (for experiment, file, dataset import) or analysis (for analysis import) :param data_portal: either ena (legacy data) or faang (faang data) :param fields: all (only faang data supports all) or list of fields separated by ',' (for legacy data) :param optional: optional constraint, e.g. species :return: the generated url """ if optional == "": return f"https://www.ebi.ac.uk/ena/portal/api/search/?" \ f"result={result}&format=JSON&limit=0&fields={fields}&dataPortal={data_portal}" else: return f"https://www.ebi.ac.uk/ena/portal/api/search/?" \ f"result={result}&format=JSON&limit=0&{optional}&fields={fields}&dataPortal={data_portal}"
100e7cfba49cb97ebf5e66c84523fa758118f7ec
68,038
def gf_int(a, p): """Coerce `a mod p` to an integer in `[-p/2, p/2]` range. """ if a <= p // 2: return a else: return a - p
56af5f832b896f4fa9f810c3fb95619f90cf2a23
68,040
import re def is_abspath(pathname): """ Return True if pathname is an absolute pathname, else False. """ return bool(re.match(r"^([/\\])|([a-zA-Z]:)", pathname))
5ae179d0bd88d72531a59b2c522c69d0983d89c4
68,043
def format_to_string(obj): """ Formatter to print strings and bytes without leading/trailing quotes """ if isinstance(obj, bytes): return repr(obj.decode()).strip('"\'') if isinstance(obj, str): return repr(obj).strip('"\'') return obj
bf3f765f7837d39d1f29dff93fece3903b231b1b
68,044
def calculate_prediction_coverage(y_actual, y_predicted): """ Calculates the percentage of known user-item pairs which were predicted by the algorithm. It is different from the item_coverage in that only the user's actual ratings are analyzed vs all potential ratings In this manner it is likely that very low occuring items or users wouldn't hurt the final metric as much calculate_item_coverage will It is very important to NOT pass in the sorted and cut prediction RDD Args: y_actual: actual ratings in the format of an array of [ (userId, itemId, actualRating) ] y_predicted: predicted ratings in the format of a RDD of [ (userId, itemId, predictedRating) ]. It is important that this is not the sorted and cut prediction RDD Returns: item_coverage: value representing the percentage of user-item pairs that were able to be predicted """ predictionsAndRatings = y_predicted.map(lambda x: ((x[0], x[1]), x[2])) \ .join(y_actual.map(lambda x: ((x[0], x[1]), x[2]))) num_found_predictions = predictionsAndRatings.count() num_test_set = y_actual.count() prediction_coverage = num_found_predictions/float(num_test_set)*100 return prediction_coverage
c3f3130ac537be441985446bd7b7e76e1aa7cbc7
68,046
import re def find_at(text): """ @ - Used to mention someone in tweets Parameters ---------- text: str Text selected to apply transformation Examples: --------- ```python sentence="@David,can you help me out" find_at(sentence) >>> 'David' ``` """ line = re.findall(r'(?<=@)\w+', text) return " ".join(line)
a7c4cbedbcd5d65d6ea60682624409b297f73092
68,047
def parse_float(str_value): """ This fuction converts a string to float just like the built-in float() function. In addition to "normal" numbers it also handles numbers such as 1.2D3 (equivalent to 1.2E3) """ try: return float(str_value) except ValueError: return float(str_value.lower().replace("d", "e"))
52c04a69810a546bb7f8dfd06a0ab9b52069e2b3
68,050
def fetch_page(base_url, page, session): """ Fetch a particular page number for a GitHub API resource. :param unicode base_url: the URL of the GitHub API resource. :param int page: the page number to request (0 indexed). :param requests.Session session: the requests Session to use for the request. :return dict: The deserialized response content from GitHub. """ return session.get(base_url + "?page={}".format(page)).json()
a44da385b619add78cbe8f924231b05708c49b65
68,053
import collections import csv def read_value_file(filename, delim): """Read value file. The value file is a simple csv structure with a key and value. Arguments: filename (str): file to read delim (str): csv delimiter Returns: OrderedDict with results. """ result = collections.OrderedDict() if filename: with open(filename) as f: reader = csv.reader(f, delimiter=delim) for row in reader: # skip blank or comment lines if not row or row[0].startswith("#"): continue if len(row) != 2: continue result[row[0]] = row[1] return result
21494a82fe7c120cffdb399c848976027f362ad3
68,054
def clean_ns(tag): """Return a tag and its namespace separately.""" if '}' in tag: split = tag.split('}') return split[0].strip('{'), split[-1] return '', tag
b7e5c09c2ee1e8e881cfbbbd37a44510c8ba090d
68,058
def get_e_continued_fraction(n): """ Return the first n digits of the continued fraction of e.""" if not n: return [] result = [2] # First digit n -= 1 k = 1 # Pattern is 1,2k,1... for i in range(n): if i % 3 == 1: result.append(k * 2) k += 1 else: result.append(1) return result
4c9ea0386e4eb646061343a78e50c9f15af877d6
68,062