content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def ensure_dtype(data): """ To ensure that input file and output file are comparable, data types should be the same in all files. #Input: @data: pandas DataFrame #Output: @data_trans: transformed pandas DataFrame with corrected dtype """ # dtypes taken from Hestia website data_trans = data.astype(dtype={'cycle.@id': str, 'cycle.name': str, 'cycle.description': str, 'cycle.endDate': str, 'cycle.functionalUnit': str, 'impactAssessment.@id': str, 'impactAssessment.name': str, 'impactAssessment.endDate': str, 'impactAssessment.functionalUnitQuantity': int, 'impactAssessment.allocationMethod': str, 'impactAssessment.systemBoundary': bool, 'site.@id': str, 'site.name': str, 'site.siteType': str, 'source.@id': str, 'source.name': str, 'cycle.site.@id': str, 'cycle.defaultSource.@id': str, 'cycle.inputs.0.term.@id': str, 'cycle.inputs.0.term.name': str, 'cycle.inputs.0.value': float, 'cycle.inputs.1.term.@id': str, 'cycle.inputs.1.term.name': str, 'cycle.inputs.1.value': float, 'cycle.inputs.2.term.@id': str, 'cycle.inputs.2.term.name': str, 'cycle.inputs.2.value': float, 'cycle.inputs.3.term.@id': str, 'cycle.inputs.3.term.name': str, 'cycle.inputs.3.value': float, 'cycle.emissions.0.term.@id': str, 'cycle.emissions.0.term.name': str, 'cycle.emissions.0.value': float, 'cycle.emissions.0.methodModel.@id': str, 'cycle.emissions.0.methodModel.name': str, 'cycle.emissions.0.methodTier': str, 'cycle.emissions.1.term.@id': str, 'cycle.emissions.1.term.name': str, 'cycle.emissions.1.value': float, 'cycle.emissions.1.methodModel.@id': str, 'cycle.emissions.1.methodModel.name': str, 'cycle.emissions.1.methodTier': str, 'cycle.emissions.2.term.@id': str, 'cycle.emissions.2.term.name': str, 'cycle.emissions.2.value': float, 'cycle.emissions.2.methodModel.@id': str, 'cycle.emissions.2.methodModel.name': str, 'cycle.emissions.2.methodTier': str, 'cycle.products.0.term.@id': str, 'cycle.products.0.term.name': str, 'cycle.products.0.value': float, 'cycle.products.0.primary': bool, 'impactAssessment.cycle.@id': str, 'impactAssessment.country.@id': str, 'impactAssessment.country.name': str, 'impactAssessment.product.@id': str, 'impactAssessment.product.name': str, 'impactAssessment.source.@id': str, 'site.defaultSource.@id': str, 'site.country.@id': str, 'site.country.name': str, 'site.measurements.0.term.@id': str, 'site.measurements.0.term.name': str, 'site.measurements.0.value': float, 'site.measurements.1.term.@id': str, 'site.measurements.1.term.name': str, 'site.measurements.1.value': float, 'site.measurements.2.term.@id': str, 'site.measurements.2.term.name': str, 'site.measurements.2.value': float, 'site.measurements.3.term.@id': str, 'site.measurements.3.term.name': str, 'site.measurements.3.value': float, 'source.bibliography.name': str, 'source.bibliography.documentDOI': str, 'source.bibliography.title': str}) # Return transformed DataFrame return data_trans
bb99607a93cd5ef383c754e5d2dba62ed5478c06
657,851
import re def encode(name, system='NTFS'): """ Encode the name for a suitable name in the given filesystem >>> encode('Test :1') 'Test _1' """ assert system == 'NTFS', 'unsupported filesystem' special_characters = r'<>:"/\|?*' + ''.join(map(chr, range(32))) pattern = '|'.join(map(re.escape, special_characters)) pattern = re.compile(pattern) return pattern.sub('_', name)
1c8df18bde7535b8d7729bc708fd041236bd10fd
682,697
def collect_reducer_count(values): """Return the number of values >>> collect_reducer_count(['Sheep', 'Elephant', 'Wolf', 'Dog']) 4 """ return len(values)
f398326ffb9932a8a033d158140f065cff6d6453
657,975
def observer(*fields): """ Observer decorator The `observer` decorator takes `*args` which represent django field names that should be observed for mutations. The `ObserverMixin` is responsible for monitoring the fields for mutation & acting on it but the decorator takes the list of fields to observe & adds them to the wrapped function as a private `_observed_fields` property. """ def observer_wrapper(func): """ Add the hidden property with the fields to observe """ assert func.__name__.startswith('_observe_'), \ 'Observed method names must begin with "_observer_" not %s' % func.__name__ # pylint: disable=protected-access func._observed_fields = fields return func return observer_wrapper
7e49023470e677a3c56ae293daf47c628216c023
33,048
def libc_version(AVR_LIBC_VERSION): """ Example: 10604 -> 1.6.4 """ s = str(AVR_LIBC_VERSION) ls = [s[0], s[1:3], s[3:5]] ls = map(int, ls) ls = map(str, ls) return '.'.join(ls)
a600f5f1dc8a2959ac210fd955dc30f3201bf745
306,607
from typing import Dict from typing import Tuple def apply(dfg: Dict[Tuple[str, str], int]) -> Dict[Tuple[str, str], float]: """ Computes a causal graph based on a directly follows graph according to the heuristics miner Parameters ---------- dfg: :class:`dict` directly follows relation, should be a dict of the form (activity,activity) -> num of occ. Returns ------- :return: dictionary containing all causal relations as keys (with value inbetween -1 and 1 indicating that how strong it holds) """ causal_heur = {} for (f, t) in dfg: if (f, t) not in causal_heur: rev = dfg[(t, f)] if (t, f) in dfg else 0 causal_heur[(f, t)] = float((dfg[(f, t)] - rev) / (dfg[(f, t)] + rev + 1)) causal_heur[(t, f)] = -1 * causal_heur[(f, t)] return causal_heur
c022aa8da1d5436f62b000619959a14db75672b2
45,235
def to_ascii(text): """ Force a string or other to ASCII text ignoring errors. Parameters ----------- text : any Input to be converted to ASCII string Returns ----------- ascii : str Input as an ASCII string """ if hasattr(text, 'encode'): # case for existing strings return text.encode( 'ascii', errors='ignore').decode('ascii') elif hasattr(text, 'decode'): # case for bytes return text.decode('ascii', errors='ignore') # otherwise just wrap as a string return str(text)
44183b27e56912e99fb1d865ffd94497c50a6620
181,838
def _replace_keys(kwargs, replacements): """Replace illegal keys in kwargs with another value.""" for key, replacement in replacements.items(): if key in kwargs: kwargs[replacement] = kwargs[key] del kwargs[key] return kwargs
85e73ba58206c28a9bfa88599c65505febc91394
198,356
def commuting_sets_by_indices(pauli_sums, commutation_check): """ For a list of pauli sums, find commuting sets and keep track of which pauli sum they came from. :param pauli_sums: A list of PauliSum :param commutation_check: a function that checks if all elements of a list and a single pauli term commute. :return: A list of commuting sets. Each set is a list of tuples (i, j) to find the particular commuting term. i is the index of the pauli sum from whence the term came. j is the index within the set. """ assert isinstance(pauli_sums, list) group_inds = [] group_terms = [] for i, pauli_sum in enumerate(pauli_sums): for j, term in enumerate(pauli_sum): if len(group_inds) == 0: # Initialization group_inds.append([(i, j)]) group_terms.append([term]) continue for k, group in enumerate(group_terms): if commutation_check(group, term): group_inds[k] += [(i, j)] group_terms[k] += [term] break else: # for ... else means loop completed without a `break` # Which means this needs to start its own group. group_inds.append([(i, j)]) group_terms.append([term]) return group_inds
4e5a7413f363a3c315df665c21cf5c00a3020df9
269,300
def initCalc(hOverR, R0, boxwidth, nParticles, nSmooth=32, ndim=3): """ Initial derivations from settings Parameters ---------- hOverR : float Ratio of scale height to distance from star RO : float Distance to star boxwidth : float Width of the simulation box. Height will be set as a large number nParticles : int Number of particles nSmooth : int Number of neighbors to smooth over for the simulation ndim : int Number of dimensions running in """ # Initial derivations H = hOverR * R0 # Assuming G = 1 and Mstar = 1 in code units: omega0 = R0**(-1.5) cs = H * omega0 volume = H * boxwidth**(ndim-1) smooth = (float(nSmooth) * (volume)/nParticles)**(1./ndim) # approx smooth length boxshape = [boxwidth, boxwidth, 2*H] # Time stepping for gas settling tCourant = smooth/cs dDelta = tCourant/10. return H, cs, smooth, boxshape, dDelta
08225910f9aeb82d7982419d92142413473a8338
139,536
def make_list(items): """ Turn the items into a multicolumn itmeize list `items` is a list of (string, bool) tuples where the string is the participant name and the bool indicates whether the participant has been eliminated """ def color(name, eliminated): return (f'\\textcolor{{elim}}{{{name}}}' if eliminated else f'\\textbf{{{name}}}') items = '\n'.join([f'\\item {color(name, eliminated)}' for name, eliminated in items]) return '\n'.join([r'\begin{AutoMultiColItemize}', items, r'\end{AutoMultiColItemize}'])
24e2b6727626405e6fb77593c681a325cd41fcfb
166,553
def scale_net_input_data(data): """Rescale data, presumably obtained from an 8-bit grayscale image, to the range [0, 1] for feeding into the network. """ return data / 255.
cd088dcf53333efc9e698ab9b7c37dab8e9d4767
334,059
from typing import Tuple def get_channel_values(digitalOut, func) -> Tuple: """Get the result of applying 'func' to all DigitalOut channels.""" return tuple(func(channel_index) for channel_index in range(digitalOut.count()))
0f8fffa2bae965e72fe386ae356c8f11b14db9fc
653,910
from typing import Tuple def default_pruning_sparsities_loss(extended: bool) -> Tuple[float, ...]: """ The default sparsities to use for checking pruning effects on the loss :param extended: extend the sparsties to return a full range instead of a subset of target sparstiies :return: the sparsities to check for effects on the loss """ if not extended: return 0.0, 0.2, 0.4, 0.6, 0.7, 0.8, 0.85, 0.9, 0.95, 0.99 sparsities = [float(s) / 100.0 for s in range(100)] return tuple(sparsities)
3af38c848b984860830572e7f1126acb0d907d4c
325,695
def get_issue_info(obj): """Retrieve IssueTrackerIssue from obj. Args: obj: Instance of IssueTracked object. """ if hasattr(obj, "audit"): issue_obj = obj.audit.issuetracker_issue else: issue_obj = obj.issuetracker_issue return issue_obj.to_dict() if issue_obj else {}
e9d1a493a3e47cf871827a70354fb40f0b948908
567,748
def set_op1_str(nvols): """ Build the operand string used by the workflow nodes Parameters ---------- nvols : int Returns ------- strs : string operand string """ strs = '-Tmean -mul %d -div 2' % (int(nvols)) return strs
dd06e4fc1ed2872b341cac88f940d2bc39294a0d
95,913
def split_text(text: str, line_length: int) -> str: """ :param text: The text to split. Keeps original line breaks. If impossible to break words in a line, it will remain as is. :param line_length: The max line length. Must be higher than 1. :return: The message, split over multiple lines to keep within line_length, where possible. """ # The resulting lines. result = list() lines = text.split('\n') for i, line in enumerate(lines): # Short lines are not split. if len(line) <= line_length: result.append(line) continue # Lines that are long but are all whitespaces are shortened to an empty line. if line.isspace(): result.append('') continue # Longer lines are split. words = line.split(' ') new_line = words[0] for w in words[1::]: # Can't add next word? Break line. if len(new_line) + len(w) + 1 > line_length: result.append(new_line) new_line = w # Can add? Add. else: new_line += ' ' + w # Last line. result.append(new_line) return '\n'.join(result)
70430bf584d6b332cb5e6721bd79ae0af59ca8ed
179,108
def zero_indices(values): """Gets the indices of values that evaluate to false ("zeros").""" return [i for i, x in enumerate(values) if not x]
b71440f0eb895c1b432568656e38fc6557e18fd9
303,660
from typing import List from typing import Iterable def argmax_over_index_set(lst: List[float], index_set: Iterable[int]) -> List[int]: """ Computes the argmax function over a subset of a list. In contrast to the related `np.argmax` function, this returns a list of all indices that have the maximal value. Args: lst (List): Find the argmax of the values in this list index_set (Iterabel): Restricts the list to the indices in this set Returns: List: a list of elements in index_set at which lst has the maximal value """ max_idx = [0] max_val = -float("inf") for i in index_set: if lst[i] > max_val: max_val = lst[i] max_idx = [i] elif lst[i] == max_val: max_idx.append(i) return max_idx
c9e0e76ecd61eb81bf87cc69a4b6e7c90cc08caa
341,554
def filter_by_book_style(bibtexs, book_style): """Returns bibtex objects of the selected book type. Args: bibtexs (list of core.models.Bibtex): queryset of Bibtex. book_style (str): book style key (e.g. JOUNRAL) Returns: list of Bibtex objectsxs """ return [bib for bib in bibtexs if bib.bib_type_key == book_style]
ca3b46772930a6f6e28b6fc0ee4d175ee8d69c3c
703,474
from typing import List def assert_optional_type_hints(contents: List[str]) -> List[str]: """ Find parameters with default value None and add Optional[type] to them. :param contents: list of lines in file :return: list of lines in file """ i = 0 while i < len(contents) - 1: line = contents[i].strip() if line.startswith("def"): end_index = i while not line.endswith(":"): end_index += 1 line = contents[end_index].strip() parameters_text = "".join(contents[i:end_index+1]).replace('\n', ' ') parameters_text = parameters_text[parameters_text.find("(") + 1 : parameters_text.rfind(")")] parameters = [param.strip() for param in parameters_text.split(",") if param.strip() and not param.strip().endswith("]")] # find the parameters with default value None parameters_with_default_value_none = [ parameter for parameter in parameters if "= None" in parameter ] # add Optional[type] to parameters with default value None for parameter in parameters_with_default_value_none: parameter_name = parameter.split(":")[0].strip() parameter_type = parameter.split(":")[1].strip() parameter_type = parameter_type.replace("= None", "").rstrip() for j in range(i, end_index + 1): contents[j] = contents[j].replace( parameter, f"{parameter_name}: Optional[{parameter_type}] = None" ) i += 1 return contents
d95a3eec4c3e2ef0874d5ee974c614eecdddc8a7
668,142
import torch def interp(x: torch.Tensor, xp: torch.Tensor, fp: torch.Tensor) -> torch.Tensor: """One-dimensional linear interpolation for monotonically increasing sample points. Returns the one-dimensional piecewise linear interpolant to a function with given discrete data points :math:`(xp, fp)`, evaluated at :math:`x`. This is confirmed to be a correct implementation. See https://github.com/pytorch/pytorch/issues/1552#issuecomment-979998307 Args: x: the :math:`x`-coordinates at which to evaluate the interpolated values. xp: the :math:`x`-coordinates of the data points, must be increasing. fp: the :math:`y`-coordinates of the data points, same length as `xp`. Returns: the interpolated values, same size as `x`. """ i = torch.clip(torch.searchsorted(xp, x, right=True), 1, len(xp) - 1) return (fp[i - 1] * (xp[i] - x) + fp[i] * (x - xp[i - 1])) / (xp[i] - xp[i - 1])
0ff3efc776a9ed1b5261f89067fd14cbbd2ba485
301,631
def minmax_seq(a): """Find mix & max in a list by sequential algorithm. - Time complexity: O(n). - Space complexity: O(1). """ if a[0] < a[1]: cur_min, cur_max = a[0], a[1] else: cur_min, cur_max = a[1], a[0] for i in range(2, len(a), 2): if i + 1 < len(a): if a[i] < a[i + 1]: _min, _max = a[i], a[i + 1] else: _min, _max = a[i + 1], a[i] else: _min, _max = a[i], a[i] if _min < cur_min: cur_min = _min if _max > cur_max: cur_max = _max return [cur_min, cur_max]
bfa80767f1b110a9fe44d3b8162e7bd2b85ff8b4
356,237
def compute_ema(ema0, y_value, alpha=0.1): """ema_t+1 = (1-α)ema + αy""" assert isinstance(y_value, (int, float)), "y_value has to be a number" return (1-alpha)*ema0 + alpha*(y_value)
3751b27e56a7849589eaa7282fb67d9f03a5062c
424,574
def get_tagged_atoms_from_mol(mol): """Takes an RDKit molecule and returns list of tagged atoms and their corresponding numbers. Parameters ---------- mol: rdkit.Chem.Mol RDKit molecule. Returns ------- atoms: List[rdkit.Chem.Atom] List of tagged atoms atom_tags: List[str] List of atom-mapping numbers """ atoms = [] atom_tags = [] for atom in mol.GetAtoms(): if atom.HasProp("molAtomMapNumber"): atoms.append(atom) atom_tags.append(str(atom.GetProp("molAtomMapNumber"))) return atoms, atom_tags
b32d57a2feb7196907cdf70bf62371f58fe66c6a
71,117
def count_file_lines(file_name: str) -> int: """ Count the number of line in a file """ with open(file_name, encoding='utf-8') as f: return len(f.readlines())
96d49242214bf9048747ba15633a600fc4978ee9
212,968
def topic_record_subject_name_strategy(ctx, record_name): """ Constructs a subject name in the form of {topic}-{record_name}. Args: ctx (SerializationContext): Metadata pertaining to the serialization operation. record_name (str): Record name. """ return ctx.topic + "-" + record_name
a4a29c93f2b13319889ce242a4fbe0e8254598a0
225,703
def arcs2d(arcs): """Convert arcseconds into degrees.""" return arcs / 3600.0
0570463fa2c0a2723959f43767319b9a8d6c75e4
686,024
def _segregate_NREM_runs(runs, min_separation): """ Differentiate successive runs of NREM based on gap durations separating them. Stage 2 of detecting NREM cycles - minimum separation of successive runs Parameters ---------- runs : pd.DataFrame Successive NREM runs with onsets and offsets, thresholded based on durations. Output of _detect_NREM_runs() min_separation : int or float, positive non-zero Minimum duration in minutes for thresholding gaps between consecutive NREM runs. Returns ------- runs : pd.DataFrame Updated with NREM cycle numbers and gap from next run. """ # Calculate gap or separation between successive runs runs["Next_Run"] = runs["Onset"].shift(-1) - runs["Offset"] # Specify filtering condition condition = runs["Next_Run"] > min_separation # Get numbered cycles based on condtion, starting from 1 cycle_numbers = (1 + (condition).cumsum()).shift(fill_value=1) # Add a string identifier column runs["CYC"] = "CYC_" + (cycle_numbers).astype("str") return runs
b443654d7991e38872cdb7d6dd01d39e76f40529
415,953
import math def round_to_n(x: float, n_digits: int) -> float: """Round a floating point to n significant digits Args: x (float): Number to round n_digits (int): Number of digits to keep Returns: float: Rounded version of x with n_digits digits """ if not math.isfinite(x) or x == 0: return x main_digit = math.floor(math.log10(abs(x))) return round(x, -main_digit + n_digits - 1)
e7eab22122f741ed6b583fb6e4ce3abb0fa91271
669,393
def is_subj(tok): """ Is this token a subject of its parent token. """ return "subj" in tok.dep_
9a6d1b4ac759bac39d3ca14e929e9a0e96fb8c9a
282,612
import csv def count_samples_from_tsv(phenofile, keep_hpos, invert=False): """ Count number of samples in phenofile with at least one of keep_hpos """ n = 0 with open(phenofile) as fin: for sid, phenos in csv.reader(fin, delimiter='\t'): hits = [h in phenos.split(';') for h in keep_hpos] if any(hits): if not invert: n += 1 else: if invert: n += 1 return n
ad6e9c87c31236598aabeb66e6fac613a5c3db21
528,119
def get_max_id(corpus): """Get the highest feature id that appears in the corpus. Parameters ---------- corpus : iterable of iterable of (int, numeric) Collection of texts in BoW format. Returns ------ int Highest feature id. Notes ----- For empty `corpus` return -1. """ maxid = -1 for document in corpus: if document: maxid = max(maxid, max(fieldid for fieldid, _ in document)) return maxid
ef0fe6612f01b1434fc8c375c899f3a83c26dcea
254,662
def kwargs_to_flags(**kwargs): """Convert `kwargs` to flags to pass on to CLI.""" flag_strings = [] for (key, val) in kwargs.items(): if isinstance(val, bool): if val: flag_strings.append(f"--{key}") else: flag_strings.append(f"--{key}={val}") return " ".join(flag_strings)
aa672fe26c81e7aaf8a6e7c38354d1649495b8df
707,025
def case_default_panels(case_obj): """Get a list of case default panels from a case dictionary Args: case_obj(dict): a case object Returns: case_panels(list): a list of panels (panel_name) """ case_panels = [ panel["panel_name"] for panel in case_obj.get("panels", []) if panel.get("is_default", None) is True ] return case_panels
b59cf15cce29b2ce7c88d1ad91acce0b8c0e122b
291,409
def list_remove_repeat(x): """Remove the repeated items in a sequence.""" y = [] for i in x: if i not in y: y.append(i) return y
fe22b084887e00e4f25d85d388e18f4439a6a7a1
176,297
def generate_output(advisory_id: str, details: dict) -> str: """ Generates the output for the given advisory_id and list of CPEs :param advisory_id: Advisory ID from the OVAL XML file :param details: Dictionary containing a list of CPEs and repositories for the advisory_id :return: Output to display on the CLI for the advisory """ cpe_count = details.get('num_of_cpes', 0) repo_count = details.get('num_of_repos', 0) cpe_string = '\n'.join([f" - {cpe}" for cpe in details.get('list_of_cpes', [])]) repo_string = '\n'.join([f" - {cpe}" for cpe in details.get('list_of_repos', [])]) output = f'Advisory {advisory_id}:\n' output += f' Contains {cpe_count} {"CPEs" if cpe_count > 1 else "CPE"}...\n{cpe_string}\n' output += (f' Applies to {repo_count} ' f'{"repositories" if repo_count > 1 else "repository"}...\n{repo_string}\n') return output
06f19ea4988d95cba7c1afb23ad228b8f838fbb0
606,142
def to_int(s, default=0): """ Return input converted into an integer. If failed, then return ``default``. Examples:: >>> to_int('1') 1 >>> to_int(1) 1 >>> to_int('') 0 >>> to_int(None) 0 >>> to_int(0, default='Empty') 0 >>> to_int(None, default='Empty') 'Empty' """ try: return int(s) except (TypeError, ValueError): return default
9e102ee1bbd116584575930477c78f6c139e5da2
330,289
def strip_stac_item(item: dict) -> dict: """ Strips a stac item, removing not stored fields :param item dict: input stac item :rtype: dict :return: stripped stac item """ strip = item s3_key = None for link in item["links"]: if link["rel"] == "self": s3_key = link["href"] assert s3_key is not None, "Can't find self key" # Remove fields that will not be stored strip.pop("stac_version") strip.pop("stac_extensions") strip.pop("type") strip.pop("links") strip.pop("bbox") strip.pop("assets") # https://cbers-stac-0-6.s3.amazonaws.com/CBERS4/PAN5M/156/107/CBERS_4_PAN5M_20150610_156_107_L4.json strip["s3_key"] = "/".join(s3_key.split("/")[3:]) return strip
1909b44f316875f0cd0d65fff8bd62329cd229e5
17,881
def convertAtoInt(s): """ Convert little endian hex string to integer. B402 -> 02B4 -> 692 """ v = '' while len(s): v = v + s[-2:] s = s [:-2] return int(v, 16)
fb866f865ffb0e5c32f7b1757e6693d72adddb55
563,207
def get_free_area(space_region, obstacle_map): """ Return the total free area in space region, accounting for obstacles. Args: space_region: tuple of form (origin_x, origin_y), (range_x, range_y) obstacle_map: dict of form {id: (x, y), (range_x, range_y)} Returns: float area """ _, space_range = space_region l, b = space_range space_area = l * b obstacle_area = 0 for obstacle in obstacle_map.values(): _, obstacle_range = obstacle l, b = obstacle_range obstacle_area += l * b return space_area - obstacle_area
3ed8603cd43cb65695c6a03bbcc164518ff9665d
188,385
import re def sanitize_k8s_name(name, allow_capital_underscore=False, allow_dot=False, allow_slash=False, max_length=63, suffix_space=0): """From _make_kubernetes_name sanitize_k8s_name cleans and converts the names in the workflow. https://kubernetes.io/docs/concepts/overview/working-with-objects/names/ https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set https://github.com/kubernetes/kubernetes/blob/c369cf18/staging/src/k8s.io/apimachinery/pkg/util/validation/validation.go#L89 Args: name: original name allow_capital_underscore: whether to allow capital letter and underscore in this name (i.e. for parameters) allow_dot: whether to allow dots in this name (i.e. for labels) allow_slash: whether to allow slash in this name (i.e. for label and annotation keys) max_length: maximum length of K8s name, default: 63 suffix_space: number of characters reserved for a suffix to be appended Returns: sanitized name. """ k8s_name = re.sub('[^-_./0-9A-Za-z]+', '-', name) if not allow_capital_underscore: k8s_name = re.sub('_', '-', k8s_name.lower()) if not allow_dot: k8s_name = re.sub('[.]', '-', k8s_name) if not allow_slash: k8s_name = re.sub('[/]', '-', k8s_name) # replace duplicate dashes, strip enclosing dashes k8s_name = re.sub('-+', '-', k8s_name).strip('-') # truncate if length exceeds max_length max_length = max_length - suffix_space k8s_name = k8s_name[:max_length].rstrip('-') if len(k8s_name) > max_length else k8s_name return k8s_name
c67ed0ac6efa9659d19f4d0e59401f8e94c3b9f4
162,418
def list_games(interface): """ Sends a request to retrieve a list of games to join/currently played and outputs the response json. Returns the json and response. """ response = interface.get_request('/games/') json = response.json() for i in range(len(json)): json[i]['ref_number'] = i + 1 return json, response
a4e82ee7dde05e1c2faaa98197f2183a8950fa49
455,099
def remove_substrings(text, to_replace, replace_with=""): """ Remove (or replace) substrings from a text. Args: text (str): raw text to preprocess to_replace (iterable or str): substrings to remove/replace replace_with (str): defaults to an empty string but you replace substrings with a token. """ if isinstance(to_replace, str): to_replace = [to_replace] result = text for x in to_replace: result = result.replace(x, replace_with) return result
765247ffd7ee4caa33a3afffbca7e47799bc0b16
582,832
def _nb_models_and_deficit(nb_targets, potential_targets): """ Calculates the number of models to learn based on the number of targets and the potential targets Args: nb_targets: number of targets potential_targets: attributes that can be used as targets Returns: nb_models: number of models nb_leftover_targets: number of potential targets which will not be used """ nb_potential_targets = len(potential_targets) nb_models = nb_potential_targets // nb_targets nb_leftover_targets = nb_potential_targets % nb_targets return nb_models, nb_leftover_targets
9e664401ae9502be3675b3645bb9382666d9a017
240,539
def ask_yes_no(question): """ Ask the user a yes or no question via commandline :param question: The question to be displayed :return: True if the user answered with yes, False otherwise """ while True: print(question + " [y/n] ") choice = input().lower() if choice in ['true', '1', 't', 'y', 'yes']: return True elif choice in ['false', '0', 'f', 'n', 'no']: return False else: print("Please write yes or no.")
f049c08408d6f8bd4b429c6f8bc287e5f1c7c9d7
417,700
def right_triangle_calc(n): """ Returns right triangle of level n Formula derived from right triangle definition (n+1)choose2 """ return int((n*(n+1))/2)
828a87f2c2690599f2b7c0dc8b8762b8552c5d54
396,026
def clean_url(url): """ Strips the ending / from a URL if it exists. Parameters ---------- url : string HTTP URL Returns ------- url : string URL that is stripped of an ending / if it existed """ if url[-1] == '/': return url[:-1] return url
e5d5015adf4e06064a0d4d5638284bf1341f1a28
672,553
import requests import json def get_results_list(hda_dict): """ Generates a list of filenames to be available for download Parameters: hda_dict: dictionary initied with the function init, that stores all required information to be able to interact with the HDA API Returns: Returns the dictionary including the list of filenames to be downloaded. """ params = {'page':'0', 'size':'1'} response = requests.get(hda_dict['broker_endpoint'] + \ '/datarequest/jobs/' + hda_dict['job_id'] + \ '/result', headers=hda_dict['headers'], params = params) results = json.loads(response.text) hda_dict['results']=results print("************** Results *******************************") print(json.dumps(results, indent=4, sort_keys=True)) print("*******************************************") return hda_dict
b27f004e1b3a88ff94708ed65b3d450ea653c9fb
425,566
def valid_paths(view): """Return list of view paths except for Nones""" paths = [v.file_name() for v in view.window().views()] paths = [p for p in paths if p is not None] return paths
b5612da62b2cf7411a0452614397253f20b632c5
93,106
def get_pixel_dist(pixel, red, green, blue): """ Returns the color distance between pixel and mean RGB value Input: pixel (Pixel): pixel with RGB values to be compared red (int): average red value across all images green (int): average green value across all images blue (int): average blue value across all images Returns: dist (int): color distance between red, green, and blue pixel values """ return ((red-pixel.red)**2+(green-pixel.green)**2+(blue-pixel.blue)**2)**0.5
1a53a06b54ceef4ebaa450e9df7ff75aa2858371
377,632
def getObjectLabel(objectData, objectName): """ Returns the object label specified in object_data.yaml :param objectName: :return: """ if objectName not in objectData: raise ValueError('there is no data for ' + objectName) return objectData[objectName]['label']
d733a1d2107b38a8d5b49b6288319959fe9dafcf
231,600
def determine_classes_based_on_gain_in_r2_score(dataset): """This function determines the class of each row in the dataset based on the value of column 'gain_in_r2_score' """ gains = dataset['gain_in_r2_score'] classes = ['good_gain' if i > 0 else 'loss' for i in gains] dataset['classes'] = classes return dataset
5b1beb6a6043aae976c737d73284851d2ff56188
488,362
def make_title(passage_name, process=True, line_end=''): """Make a valid page header from a name""" passage_name = passage_name.lower() if process else passage_name return f':: {passage_name}{line_end}'
c148285fc49a32bf612f32eeb98671aaf4c1ab8a
659,601
def list_add(lists_of_lists): """Element-wise sum of list of lists Parameters ---------- lists_of_lists: list List of numbers Returns ------- list """ assert len(lists_of_lists) > 0 new_ls = [0] * len(lists_of_lists[0]) for sublist in lists_of_lists: for i, elem in enumerate(sublist): new_ls[i] += elem return new_ls
67290590ba443250b637fea85491fc2e3aae9b7f
257,735
def sum_2_level_dict(two_level_dict): """Sum all entries in a two level dict Parameters ---------- two_level_dict : dict Nested dict Returns ------- tot_sum : float Number of all entries in nested dict """ '''tot_sum = 0 for i in two_level_dict: for j in two_level_dict[i]: tot_sum += two_level_dict[i][j] ''' tot_sum = 0 for _, j in two_level_dict.items(): tot_sum += sum(j.values()) return tot_sum
6b5be015fb84fa20006c11e9a3e0f094a6761e74
708,615
def user_identity_lookup(user): """ Specifies which field will be used to identify the jwt subject """ return user.id
2c23079edaaaa4bbe45ce78b93ac17fabc765730
127,561
import random import string def random_user(n): """generate a random user id of size n""" chars = [] for i in range(n): chars.append(random.choice(string.ascii_lowercase)) return ''.join(chars)
21d8ec2ef8b275ffca481e4553ec396ff4010653
703,714
import re def cleanPropertyName(s): """ Generate a valid python property name by replacing all non-alphanumeric characters with underscores and adding an initial underscore if the first character is a digit """ return re.sub(r'\W|^(?=\d)','_',s).lower()
21b0b8241306b439de4d83e850cd4d6b8a9f8eec
495,261
import inspect def _has_default(parameter: inspect.Parameter) -> bool: """Check if parameter has a default value""" return parameter.default != inspect.Parameter.empty
77eb90e5f6ed5d62527a5531f974a5779a1e21ff
488,885
def _GenerateUpdateMask(args, target_fields): """Constructs updateMask for patch requests. Args: args: The parsed args namespace from CLI target_fields: A Dictionary of field mappings specific to the target. Returns: String containing update mask for patch request. """ arg_name_to_field = { # Common flags '--description': 'description', '--schedule': 'schedule', '--time-zone': 'timeZone', '--clear-time-zone': 'timeZone', '--attempt-deadline': 'attemptDeadline', # Retry flags '--max-retry-attempts': 'retryConfig.retryCount', '--clear-max-retry-attempts': 'retryConfig.retryCount', '--max-retry-duration': 'retryConfig.maxRetryDuration', '--clear-max-retry-duration': 'retryConfig.maxRetryDuration', '--min-backoff': 'retryConfig.minBackoffDuration', '--clear-min-backoff': 'retryConfig.minBackoffDuration', '--max-backoff': 'retryConfig.maxBackoffDuration', '--clear-max-backoff': 'retryConfig.maxBackoffDuration', '--max-doublings': 'retryConfig.maxDoublings', '--clear-max-doublings': 'retryConfig.maxDoublings', } if target_fields: arg_name_to_field.update(target_fields) update_mask = [] for arg_name in args.GetSpecifiedArgNames(): if arg_name in arg_name_to_field: update_mask.append(arg_name_to_field[arg_name]) return ','.join(sorted(list(set(update_mask))))
ddc6474b04a0bdc4095265dc5951dc3c47d18766
516,199
def is_subset(subsampling, reference): """Return whether indices specified by ``subsampling`` are subset of the reference. Args: subsampling ([int] or None): Sample indices reference ([int] or None): Reference set. Returns: bool: Whether all indices are contained in the reference set. """ if reference is None: return True elif subsampling is None and reference is not None: return False else: return set(subsampling).issubset(set(reference))
66e924370b0698bb0f1c2b73d058cf99d4543259
87,352
def login(client, user, password): """Return the User instance after logging the user in.""" assert client.login(username=user.username, password=password) return user
32c1d621b515086cf1811b146ca10a26ebc1d6fa
635,533
def _to_hhmmss(seconds): """ Converts seconds to hh:mm:ss,ms time format """ total_secs = float(seconds) hours = int(total_secs / 3600) mins = int((total_secs - (hours * 3600)) / 60) secs = total_secs - (hours * 3600) - (mins * 60) hours = '{:02d}'.format(hours) mins = '{:02d}'.format(mins) secs = '{:.3f}'.format(secs) if float(secs) < 10: secs = '0{}'.format(secs) return '{}:{}:{}'.format(hours, mins, secs.replace('.', ','))
5538f5844a7be5ad0fe1e78c3288b8184f885147
432,743
def has_magnet(self): """Return if any of the Holes have magnets Parameters ---------- self : LamHole A LamHole object Returns ------- has_magnet : bool True if any of the Holes have magnets """ has_mag = [hole.has_magnet() for hole in self.hole] return any(has_mag)
16b52b586b4057d33bb5e636fb41f86403d21852
96,182
def make_biplot_scores_output(taxa): """Create convenient output format of taxon biplot coordinates taxa is a dict containing 'lineages' and a coord matrix 'coord' output is a list of lines, each containing coords for one taxon """ output = [] ndims = len(taxa['coord'][1]) header = '#Taxon\t' + '\t'.join(['pc%d' %(i+1) for i in range(ndims)]) output.append(header) for i, taxon in enumerate(taxa['lineages']): line = taxon + '\t' line += '\t'.join(map(str, taxa['coord'][i])) output.append(line) return output
4c461fb160544333109a126806cea48047103db1
666,409
from typing import Tuple def split_iban(iban: str) -> Tuple[str, str, str]: """Split `iban` into country_code, check_digits and bban.""" country_code, check_digits, bban = iban[:2], iban[2:4], iban[4:] return country_code, check_digits, bban
3826bbf346d0290d4b9a1577fc907d08eb44f3a7
498,625
def reliability_calc(RACC, ACC): """ Calculate Reliability. :param RACC: random accuracy :type RACC: float :param ACC: accuracy :type ACC: float :return: reliability as float """ try: result = (ACC - RACC) / (1 - RACC) return result except Exception: return "None"
f33d1d81dffb21c8379b6e135c967809061dcf10
690,439
from typing import Dict from typing import Any def check_first_element_and_populate_current(attribute_details: Dict[str, Any], key: str) -> str: """ Retrieve the value of requested key from the first element of the list :param attribute_details: list of properties from which the value is the be retrieved :param key: the key whose value is to be fetched :return: attribute_value: value of the requested key """ attribute_value = '' if attribute_details.get(key, []) and attribute_details.get(key, [])[0].get('current', ''): attribute_value = attribute_details.get(key, [])[0].get('value', '') return attribute_value
af516cf4e4f5519fbaa971513eafe7a5bb48fcdb
236,874
def emote_list_to_string(emote_list): """Convert an EmoteList to a string.""" # Use string.join to glue string of emotes in emoteList separator = " " return separator.join(emote_list)
50171852ba183fe366ad20e6281c3c867f700ffb
537,797
from pathlib import Path def splitext(fname): """Splits filename and extension (.gz safe) >>> splitext('some/file.nii.gz') ('file', '.nii.gz') >>> splitext('some/other/file.nii') ('file', '.nii') >>> splitext('otherext.tar.gz') ('otherext', '.tar.gz') >>> splitext('text.txt') ('text', '.txt') """ basename = str(Path(fname).name) stem = Path(basename.rstrip('.gz')).stem return stem, basename[len(stem):]
ab0327b781f08ca11eefd5295c546cd94e186779
272,572
def unit(x): """ Optimizes the rendering of time. .. runpython:: :showcode: from jupytalk.benchmark.mlprediction import unit print(unit(34)) print(unit(3.4)) print(unit(0.34)) print(unit(0.034)) print(unit(0.0034)) print(unit(0.00034)) print(unit(0.000034)) print(unit(0.0000034)) print(unit(0.00000034)) """ if x >= 1: return "%1.2f s" % x elif x >= 1e-3: return "%1.2f ms" % (x * 1000) elif x >= 1e-6: return "%1.2f µs" % (x * 1000**2) elif x >= 1e-9: return "%1.2f ns" % (x * 1000**3) else: return "%1.2g s" % x
19231cf1f1afdcff69211e627bd97f0de23d4efa
575,300
def percentage_birts_by_month(df, years_greater_than=1980): """Take the prepared df and calculage the average births (avg_births) and percent above average (percent_above_average) for each month. Args: df (pd.DataFrame): The prepared birth data (created using df_birth_no_geo_prep). Requires columns: "conc_yy", "conc_month", "dob_yy", "birth_month", "conc_mm", "dob_mm", "births", years_greater_than (int): Only include data from above this year. """ # group by month df = df.groupby([ 'conc_yy', 'conc_month','dob_yy', 'birth_month','conc_mm','dob_mm',], as_index=False).sum() df = df.sort_values(by=['dob_yy','dob_mm']).reset_index(drop=True) # add 12 month rolling average df['avg_births'] = df['births'].rolling(window=12).mean() df['percent_above_avg'] = (df['births'] - df['avg_births'])/df['avg_births']*100 # only select dates > years_greater_than df = df[df['dob_yy'] > years_greater_than] return df
9948505aa9c51a5259c98b47a6668149f584946f
302,242
def locToLatLong(location): """ :param location: location in string format :return: latitude and longtitude in float format """ long, lat = str(location).split(',') long = float(long.split(':')[-1]) lat = float(lat.split(':')[-1][:-1]) return lat, long
7776b4b3a4d5d7491b632a82c7b59312ffb8ea65
691,859
import functools import operator def prod(xs): """Product (as in multiplication) of an iterable. """ return functools.reduce(operator.mul, xs, 1)
87381e7da37b9164767720f2886d5ce47b8df778
105,431
def isatty(stream): """Check if a stream is a tty. Not all file-like objects implement the `isatty` method. """ fn = getattr(stream, 'isatty', None) if fn is None: return False return fn()
3498d6694a602c95efb2e2268c424e5333f38c9d
580,188
def dict2args(data): """Convert a dictionary of options to command like arguments. Note: This implementation supports arguments with multiple values. """ result = [] for k, v in data.items(): if v is not False: prefix = "-" if len(k) == 1 else "--" flag = f"{prefix}{k}".replace("_", "-") if v is True: result.append(flag) elif isinstance(v, (tuple, list)): for x in v: result.extend([flag, str(x)]) else: result.extend([flag, str(v)]) return result
1659ad70c033ca4d6401ccf50fb99294b1d0c30d
177,048
def EncodePrivate(sk): """ Encode a private key into bytes (exactly 32 bytes for both do255e and do255s). """ return bytes(sk)
4763eb9bedf0b5d802b3ed3cd9441941c4567710
639,641
def build_msg(request, msg_type, name, drink): """Personalize SMS by replacing tags with customer info""" msg = request.form.get(msg_type) msg = msg.replace('<firstName>', name) msg = msg.replace('<productType>', drink) return msg
2a22f1b48717d9a874e2397791f734fce483e703
21,410
from typing import OrderedDict def parse_manifest(manifest): """ Parses a ballot manifest. Identifiers are not necessarily unique *across* batches. Input ----- a ballot manifest in the syntax described above Returns ------- an ordered dict containing batch ID (key) and ballot identifiers within the batch, either from sequential enumeration or from the given labels. """ ballot_manifest_dict = OrderedDict() for i in manifest: # assert that the entry is a string with a comma in it # pull out batch label (batch, val) = i.split(",") batch = batch.strip() val = val.strip() if batch in ballot_manifest_dict.keys(): raise ValueError('batch is listed more than once') else: ballot_manifest_dict[batch] = [] # parse what comes after the batch label if '(' in val: # list of identifiers # TO DO: use regex to remove )( val = val[1:-1] # strip out the parentheses ballot_manifest_dict[batch] += [int(num) for num in val.split()] elif ':' in val: # range of identifiers limits = val.split(':') ballot_manifest_dict[batch] += list(range(int(limits[0]), \ int(limits[1])+1)) else: # this should be an integer number of ballots try: ballot_manifest_dict[batch] += list(range(1, int(val)+1)) except: print('malformed row in ballot manifest:\n\t', i) return ballot_manifest_dict
e93fe98e5d5d11dc17a492d33a8fa05b04e8a800
507,944
def gc_content_percent(sequence): """ Calculates the GC-content percentage of the input sequence Returns the percentage as an integer out of 100 """ gc = sequence.count('G') + sequence.count('C') atcg = len(sequence) percent_gc = (gc * 100) / atcg return percent_gc
3e2344096df626556f1f0947d7a13dcd8e9ff600
641,250
def filter_dict(source, d): """Filter `source` dict to only contain same keys as `d` dict. :param source: dictionary to filter. :param d: dictionary whose keys determine the filtering. """ return {k: source[k] for k in d.keys()}
612096842e0b73f524ddc57ad1f9bd8e0d8f49b9
74,294
def register_with(registry): """ Register a passed in object. Intended to be used as a decorator on model building functions with a ``dict`` as a registry. Examples -------- .. code-block:: python REGISTRY = dict() @register_with(REGISTRY) def build_empty(base): return base """ def decorator(func): registry[func.__name__] = func return func return decorator
42fcc5b5084be32173a9a14bb7e73c7ce3068a62
264,001
def greet_by_name(name): """Returns a greeting to the given person.""" greeting = "Hello, " + name + "!" return greeting
2c2e8ebda1a2556a719728b5ce0d21ee588b9860
515,521
def transformUnits(size): """ Transform the number of size param (received in MB) into an appropriate units string (MB/GB)""" if size > 1024: return "%.02f" % (float(size) / 1024.0) + " Gb" else: return str(size) + " Mb"
f04e42f0ae2ed29bc5a329fb90661f26f2f485fc
410,574
def get_test_methods(testcase): """ Retrieve a list of test method names from a TestCase instance. """ return [s for s in dir(testcase) if s.startswith('test_')]
5f6faa48273f5268b1f3cfe5c307c970a5fbd2fa
609,693
def get_group(name, match_obj): """return a blank string if the match group is None""" try: obj = match_obj.group(name) except: return '' else: if obj is not None: return obj else: return ''
8ea0e942e6c9fbf7eadbfeb5bbac20b01d2a5750
631,556
def FlagIsRequired(flag_dict): """Returns whether a flag is required or not. Args: flag_dict: a specific flag's dictionary as found in the gcloud_tree Returns: True if the flag's required, False otherwise. If the passed dictionary does not correspond to a flag (does not contain the 'required' key), False is also returned """ return flag_dict.get('required', False)
c5520a55e1d8c2b6e0bd1c975b92c7099ca8b1b9
335,839
def viz_b(body): """Create HTML b for graphviz""" return '<B>{}</B>'.format(body)
3dfdf329977826b5ed791f1896538dacc18a041f
652,131
from typing import Any def str_lmd(expr: Any, indent: int = 0): """Convert str into a proper lambda function definition.""" s = str(expr) call = s.lstrip("<lambda>(").rstrip(")") return " " * indent + f"lambda {call}"
734ef45b3d4af5c61cf41a92d47bb3bdd00c8e5e
393,117
import random def successful_starts(success_prob, num_trials): """Assumes success_prob is a float representing probability of a single attempt being successful. num_trials a positive int Returns a list of the number of attempts needed before a success for each trial.""" tries_before_success = [] for t in range(num_trials): consec_failures = 0 while random.random() > success_prob: consec_failures += 1 tries_before_success.append(consec_failures) return tries_before_success
c05cf8e95e7a311bfeeecaf84a990c308fa718ca
490,073
def aggregate_permuted_network(observed_networks): """This method handles the case where multiple observed networks are generated (e.g. from models produced by different random seed initializations of an unsupervised feature construction algorithm). We handle analysis of multiple networks by aggregating them; likewise, we require that the permutation test generates N aggregate permuted networks. Parameters ----------- observed_networks : list(CoNetwork) the list of observed networks, generated from models produced by different random seed initializations of the same unsupervised feature construction algorithm Returns ----------- CoNetwork, the aggregate permuted network created by generating a permutation for each individual observed network and then aggregating them into a single network """ aggregate_permuted_network = None for pathcore_network in observed_networks: permuted_network = pathcore_network.permute_pathways_across_features() if not aggregate_permuted_network: aggregate_permuted_network = permuted_network else: aggregate_permuted_network.aggregate(permuted_network) return aggregate_permuted_network
d56ebab767a32942a9d292277dbea591f00eee27
86,907
def is_prime(num): """ Checks if the given number is a prime number or not. """ if num > 1: for i in range(2, num): if (num % i) == 0: return False else: return True else: return False
ff96a841fa24a180de0921859925a0b5061555e5
182,800
def TimeDeltaToSeconds(delta): """Converts a datetime.timedelta to integer seconds. Args: delta: a datetime.timedelta object. Returns: the integer seconds for the timedelta. """ return delta.seconds + (delta.days * 3600 * 24)
20cf1a8d4d3ac0387012dae08213422c42b596cd
423,767
import math def _equal(source, target): """Treats slightly differing floats as equal.""" # type-dependent treatment: # the lookup server yields floats with lower accuracy then the direct storage broker, # i.e. comparison will fail at '1646312878.401044' == '1646312878.401' if isinstance(source, float) and isinstance(target, float): return math.isclose(source, target, rel_tol=1e-9, abs_tol=0.0) else: return source == target
ce15a07fdaf251f5d19b38e5007fa1036811cb61
228,084
def t_INT(t): """Return the parsed int value.""" return t
e5b664ccd22e5499d0c0ea4f4397f741011a4fe1
211,887
def vector_check(vector): """ Check input vector items type. :param vector: input vector :type vector: list :return: bool """ for i in vector: if isinstance(i, int) is False: return False if i < 0: return False return True
c59fc20bb92dca0f119b01e96eeba4a19f85c58a
473,227
def get_headers(environ): """ Returns headers from the environ """ headers = {} for name in environ: if name[0:5] == "HTTP_": headers[name[5:]] = environ[name] return headers
6b3a5a1edf7bbb633b67cb7bae1a2c9590cfb9d2
312,920
import six import importlib def import_class(cls_path): """ Imports a class from dotted path to the class """ if not isinstance(cls_path, six.string_types): return cls_path # cls is a module path to string if '.' in cls_path: # Try to import. module_bits = cls_path.split('.') module_path, class_name = '.'.join(module_bits[:-1]), module_bits[-1] module = importlib.import_module(module_path) else: # We've got a bare class name here, which won't work (No AppCache # to rely on). Try to throw a useful error. raise ImportError("Rquires a Python-style path (<module.module.Class>) " "to load given cls. Only given '%s'." % cls_path) cls = getattr(module, class_name, None) if cls is None: raise ImportError( "Module '{}' does not appear to have a class called '{}'.".format( module_path, class_name)) return cls
1d237a81a28778cdcab6f549d692178d091cb006
120,481
import base64 def encode(file): """ A base64 encoder for Images :param file: Path of the Image which should be encoded. !IMPORTANT! file ending e.g.: eecevit.jpg :return: the encoded Image as base64 String """ with open(file, "rb") as image_file: encoded_string = base64.b64encode(image_file.read()) return encoded_string
5e76d861ca255f3be91d1337754477269e4ca1e2
507,975