content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def enum_name(name):
"""Translate an HID name to an enumeration name."""
prefix = "KEY_"
if name.startswith("KP "):
name = name[3:]
prefix = "KP_"
return prefix + name.replace(" ", "") | 4ae8d3ef0d034abc306754965455b754b1fdce38 | 122,714 |
def get_name(node):
"""Retrieve the name of a field."""
return node.get('abbrev').lower() | 18682745623654bb877541d501cad1492c353138 | 122,717 |
def splittext(text, line_len):
"""split the given text on space according to the given max line size
return a 2-uple:
* a line <= line_len if possible
* the rest of the text which has to be reported on another line
"""
if len(text) <= line_len:
return text, ''
pos = min(len(text)-1, line_len)
while pos > 0 and text[pos] != ' ':
pos -= 1
if pos == 0:
pos = min(len(text), line_len)
while len(text) > pos and text[pos] != ' ':
pos += 1
return text[:pos], text[pos+1:].strip() | ff860e8cfc8687fc77cc2f8de2c42c4ddde1d6ee | 122,719 |
def git_set_branch(branch):
"""
Change to a given branch in a local copy of the checked out repository
Args:
branch: string representing the branch to change to.
Returns:
cmd: list containing the command line arguments to set a git repo to
"""
return ['git', 'checkout', branch] | 730e5c0526d01f8cb45ff4b287b113e455e30649 | 122,720 |
import re
def _parse_mut_string(s):
"""
A string representation of a protein mutation of the form
(old residue)(position)(new residue). Example: T34U.
Parameters
----------
s : str
The string representation of the protein mutation
Returns
-------
old_residue : str
The old residue, or None of the mutation string cannot be parsed
position : str
The position at which the mutation occurs, or None if the mutation
string cannot be parsed
new_residue : str
The new residue, or None if the mutation string cannot be parsed
"""
m = re.match('([A-Za-z]+)([0-9]+)([A-Za-z]+)', s)
if m is None:
# Mutation string does not fit this pattern, other patterns not
# currently supported
return None, None, None
else:
return (m.group(1), m.group(2), m.group(3)) | 3c20ce5ee38e2acf180c6fcd6a98186a69f6b7e6 | 122,721 |
from typing import List
def check_capability(capabilities: List[str], capability: str) -> bool:
"""Evaluate capabilities and return True if capability is available."""
return any(True for cap in capabilities if capability in cap) | 433348773d74fced9e1b8f6f35c6fdf8c9d5ced5 | 122,723 |
def build_rule_map(rules):
"""Turn a list of rules into a mapping of rules.
In case of gid:sid conflict, the rule with the higher revision
number will be used.
"""
rulemap = {}
for rule in rules:
if rule.id not in rulemap:
rulemap[rule.id] = rule
else:
if rule["rev"] > rulemap[rule.id]["rev"]:
rulemap[rule.id] = rule
return rulemap | 6e058640a94c6fd6cf9b98e48c9d98f8201bb5d3 | 122,724 |
def parse_snetsta(fname, verbose=False):
"""
Parses the raw text file in an antelope db.snetsta file. It returns a dict with their sta attribute
as the key (their sta is not necessarily the seed sta). Each entry points to a dict with keys net and fsta.
We use fsta as that is the field BRTT defines for the seed sta field in snetsta.
:param fname: is the snetsta file to be parsed.
:param verbose: if True the function will print all stations for which fsta does not match sta
"""
with open(fname, 'r') as fp:
staindex = {}
for lines in fp.readlines():
x = lines.split() # depend that default is whitespace
net = x[0]
fsta = x[1]
sta = x[2]
staindex[sta] = {"fsta": fsta, "net": net}
if verbose and fsta != sta:
print('Warning: station in net=', net,
' uses altered sta code=', sta, ' for sta=', fsta)
return staindex | 2e7e244889a91b800488c858b70b3fd25479adb8 | 122,725 |
import string
import random
def generate_password(length=8):
"""
Generates a random password of a given length
"""
printable = f'{string.ascii_letters}{string.digits}'
# randomize
printable = list(printable)
random.shuffle(printable)
# generate random password and convert to string
random_password = random.choices(printable, k=length)
random_password = ''.join(random_password)
return random_password | 3604942c041c439933ad29f5047a07449277e61e | 122,726 |
def torch2np(tensor):
"""
Convert from torch tensor to numpy convention.
If 4D -> [b, c, h, w] to [b, h, w, c]
If 3D -> [c, h, w] to [h, w, c]
:param tensor: Torch tensor
:return: Numpy array
"""
d = tensor.dim()
perm = [0, 2, 3, 1] if d == 4 else \
[1, 2, 0] if d == 3 else \
[0, 1]
return tensor.permute(perm).detach().cpu().numpy() | 3161edf14c053fcc1f9dda21febfa7057fe41a69 | 122,727 |
def _query_doctop_dict(docopt_dict, key):
"""
Check the :module:`doctopt` dictionary for a key. Returns None is not
found, otherwise the key's value.
"""
if key in docopt_dict:
return docopt_dict[key]
else:
return None | 7595cc47c6e560b670e133902c788eb252c7db9f | 122,728 |
def normal_shock_stag_pressure_ratio(M, *args):
"""Gives the normal shock stagnation pressure ratio as a function of upstream Mach number."""
gamma = args[0]
a = (0.5*(gamma+1.0)*M)**2.0
b = a/(1.0+0.5*(gamma-1.0)*M**2.0)
c = b**(gamma/(gamma-1.0))
d = 2.0/((gamma+1.0)*(gamma*M**2-0.5*(gamma-1.0))**(1/(gamma-1.0)))*c
return d | f9f428f9475060d0e87b7cdcc20e831b293d6ed2 | 122,732 |
def _split_steps_loggers(steps):
"""Splits the loggers and returns the estimators in the format
scikit-learn expects them.
Parameters
----------
steps: list of sklearn.Estimator or tuples of (sklearn.Estimator,
rubicon_ml.sklearn.EstimatorLogger).
The steps and estimator loggers to split.
Returns
-------
list of sklearn.Estimator and list of rubicon_ml.sklearn.EstimatorLogger
The ordered lists of estimators and rubicon-ml loggers.
"""
ret_loggers = []
ret_steps = []
for step in steps:
if isinstance(step, tuple):
ret_loggers.append(step[1])
ret_steps.append(step[0])
else:
ret_loggers.append(None)
ret_steps.append(step)
return ret_steps, ret_loggers | b8bcb9075b128644c85a05eb1808fb6bf3b30bb3 | 122,735 |
def get_first(items_list: list):
"""Safely return the first element of a list."""
return items_list[0] if items_list else None | eb7276d4392c3ad6e1f32090acb3ad69fdd35107 | 122,741 |
def get_usa_acc_id(year, case_index):
"""
Returns global accident id for USA, year and index of accident.
USA ids will be in form 1200700000001234
1 at the beginning means it is from USA
2007 means the year
1234 means the case ID as in FARS data
"""
try:
acc_id = 1000000000000
acc_id += year * 100000000
acc_id += case_index
except KeyError:
raise ValueError("Country code incorrect")
return acc_id | 868271786f433dda58b0df7f0d91a600b2102ab7 | 122,745 |
def pretty_size(size):
"""
Return human readable size as a string, eg '512GiB', for an integer size.
"""
if size % 1024 == 0:
for suffix in ['', 'KiB', 'MiB', 'GiB', 'TiB']:
if size % 1024:
return '%d%s' % (size, suffix)
size /= 1024
return '%d%s' % (size, 'PiB')
if size % 1000 == 0:
for suffix in ['', 'KB', 'MB', 'GB', 'TB']:
if size % 1000:
return '%d%s' % (size, suffix)
size /= 1000
return '%d%s' % (size, 'PB')
return '%d%s' % (size, 'B') | d9fa591e1273490667b84a7cac82de4c37b7b878 | 122,747 |
def get_delta_frame_id(span0, span1):
"""Computes the minimum distance between two non-overlapping spans.
Args:
span0 (Span): First span.
span1 (Span): Second span.
"""
if span0.overlaps(span1):
assert False, (span0, span1)
return 0
if span0.end < span1.start:
return span1.start - span0.end
else:
return span0.start - span1.end | 93e61c6aa338955751a7eeaa52c4f2bf036aa6b4 | 122,748 |
def count_list_freq(l):
"""
Find the frequency of elements in a list
"""
freq = {}
for items in l:
freq[items] = l.count(items)
return freq | afe38f170097fb66b5ed060b5dfa20687f8d6767 | 122,752 |
def get_section_path(section):
"""Return a list with keys to access the section from root
:param section: A Section
:type section: Section
:returns: list of strings in the order to access the given section from root
:raises: None
"""
keys = []
p = section
for i in range(section.depth):
keys.insert(0, p.name)
p = p.parent
return keys | 6054794c4c77165b6f60b3634d81ed65aafcee6b | 122,753 |
import mpmath
def pi(accuracy: int) -> mpmath.mpf: # pylint: disable=invalid-name
"""Return actual Pi value.
Args:
accuracy (int): number of digits
Returns:
mpmath.mpf: Pi value
"""
mpmath.mp.dps = accuracy
return +mpmath.pi | f6f6db90f1b321deacc993e020b7a874fd0143c3 | 122,754 |
def map_dict_to_lower(input_dict):
"""Return an equivalent to the input dictionary with lower-case keys."""
lower_case_dict = {}
for key in input_dict:
lower_case_dict[key.lower()] = input_dict[key]
return lower_case_dict | a077809d687a69162c30141f18be25edc9a107ce | 122,756 |
from typing import Any
def repr_or_str(o: Any) -> str:
"""
repr_or_str function
Returns a string representation of the input:
- If input is bytes returns the hex representation
- If input is str returns the string
- If input is None returns empty string
:type o: ``Any``
:param o: Input data (str or bytes)
:return: String representation of the input
:rtype: ``str``
"""
if isinstance(o, str):
return o
elif isinstance(o, bytes):
return o.hex()
elif o is None:
return ''
return repr(o) | 5e334d639ef7e6bb7d373d2264db29683077b23b | 122,758 |
def group_by(iterable, key_selector):
"""
Returns an iterator which groups the iterable with a key provided by the
supplied function. The source iterable does not need to be sorted.
:param iterable: The items over which to iterate.
:param key_selector: A function which selects the key to group on.
:return: A tuple of the key and the list of items matching the key.
"""
groups = {}
for item in iterable:
key = key_selector(item)
if key in groups:
groups[key].append(item)
else:
groups[key] = [item]
return groups | ff113b01b060950f94731ed373f2ff09d26f2a20 | 122,761 |
from typing import Tuple
def get_first_string(expression: str) -> Tuple[str, str]:
"""
Reads the letter string in the beginning of the expression string.
:param expression: str of the expression
:return: letter string in the beginning and the string of the remaining
expression
"""
found_string = ''
for char in expression:
if char.isalpha():
found_string += char
else:
break
return found_string, expression[len(found_string):] | 35c05087f6a03f97db686c0d9cdb86330b9697b6 | 122,762 |
def mask_table_l() -> dict[int, str]:
"""Table of mask codes and correction level for correction level L.
Returns:
dict[int, str]: Dictionary of the form {mask number: mask and
correction Level Code}
"""
table = {
0: "111011111000100",
1: "111001011110011",
2: "111110110101010",
3: "111100010011101",
4: "110011000101111",
5: "110001100011000",
6: "110110001000001",
7: "110100101110110"
}
return table | ee1008a72be12e0b3cf68249eaf1a3c9cad6858a | 122,764 |
from typing import Dict
def is_codecommit_with_event_source(source: Dict) -> bool:
"""Check if a source is a CodeCommit repo with CloudWatch events for change detection"""
return source["from"] == "CodeCommit" and source["event_for_source_changes"] | 11dc53d6eb2822ab6363c0d47368cae37d44c7df | 122,766 |
def get_closest_targets(targets_with_distances, min_distance):
"""Get closest targets with min_distance from targets_with_distances."""
return [
(target_row, target_col)
for distance, (target_row, target_col) in targets_with_distances
if distance == min_distance] | 5a5e5ed4c6ffb26d197fbe7be531ed952c45e07c | 122,770 |
def convert_bbox_info(f_frame_len, bbox_info):
"""Convert bbox old information: <bb_left>, <bb_top>, <bb_width>, <bb_height>
to new form to fit cv2.rectangle() inputs: <bb_left>, <bb_top>, <bb_right>, <bb_bottom>"""
total_length = 0
bbox = list(bbox_info)
for key in f_frame_len.keys():
total_length += f_frame_len.get(key)
for i in range(total_length):
if i % 4 == 2 or i % 4 == 3:
bbox[i] = bbox[i - 2] + bbox[i]
return bbox | 602455e12ca0c57d3d97c0f08d64d22a3fcc47bd | 122,772 |
from dateutil.parser import parse
def is_date(string: str) -> bool:
"""Return whether the string can be interpreted as a date.
Parameters
----------
string: str
string to check for date
Returns
----------
bool
"""
try:
parse(string)
return True
except ValueError:
return False | a48accaaffe433a63b84ec5bc3158403870bad75 | 122,774 |
def QueryElasticsearch(es, index, query_body):
"""
Input: Take in ES conenctor, query, and index to search
Output: Return results from index based on query
"""
results = es.search(
index=index,
body=query_body
)
return results | e9c4dfc26ed6cb2f297e3a96fe8f61f5de213b5a | 122,778 |
from typing import Optional
import re
def get_server_type(data: str) -> Optional[str]:
"""
Extracts type of the server.
:param data: Input data.
:return: First regex match.
"""
server_type = re.findall(r"(cx.*)", data)
if server_type:
return server_type[0]
else:
return None | 703f8be02f095269ab4d98e2eb03578a0df9ef8f | 122,780 |
import base64
def base64encode(data):
"""
对字符进行base64编码
:param str data: 需要编码的字符串
:return str return_str: base64编码后的字符串
"""
data = data.encode('utf-8')
data = base64.b64encode(data).decode('utf-8')
return data | c4c2802a47449382a7db47b478bb256bdba1c759 | 122,781 |
from typing import List
def find_averages_of_subarray_brute_force(k: int, arr: List[int]) -> List[float]:
"""
A brute-force algorithm will calculate the sum of every k-element contiguous subarray of the given array and divide
the sum by ‘k’ to find the average.
Since for every element of the input array, we are calculating the sum of its next ‘k’ elements, the time complexity
of the algorithm will be O(k * len(arr))
Parameters
----------
k : int
window size
arr : List[int]
input array
Returns
-------
result : List[float]
average k-element contiguous subarray
"""
result = []
for i in range(len(arr) - k + 1):
_sum = 0.0
for j in range(i, i + k):
_sum += arr[j]
result.append(_sum / k)
return result | 8383816e8037a5f33f6dfb3a95c6401d52d6ae44 | 122,782 |
def tokenize_query(tokenizer, prompt):
""" Prepare input """
prompt = f"<|endoftext|> english: {prompt}\nbash:"
encoded_prompt = tokenizer(prompt, return_tensors="pt")
return encoded_prompt | 43e261371946761894650742ba37064e19c8a830 | 122,785 |
def epoch_span_contains(span, epoch):
"""Determine if a given epoch falls within a given timespan.
Args:
span (tuple of Time): Pair of Time objects in increasing order.
epoch (Time): Epoch to compare with span.
Returns:
contains (bool): True if input epoch is in the input span, inclusive of
the endpoint epochs.
"""
return epoch >= span[0] and epoch <= span[1] | 6c6e679b752c4e59b534c99d4561e66d1ae4c2a2 | 122,786 |
def generateFWGList (minI = 7, maxI = 13, minJ = 3, maxJ = 23):
""" Generate a list of fwgrid IPs that can be used """
return ["fwg-c%s-%s" % (i, j)
for i in range(minI, maxI)
for j in range(minJ, maxJ)] | 0fa694dadeec1e7a7dd34f53465c8301e4b6da4e | 122,787 |
def license_header(year_from, year_to, holder, license_type):
"""Return SPDX license header using specified data."""
return [
f"// SPDX-FileCopyrightText: {year_from} - {year_to} {holder}\n",
f"// SPDX-License-Identifier: {license_type}\n",
] | 93ba3f12c78b285066f429c10cecd6970f62f71e | 122,797 |
def flatten_top_level_keys(data, top_level_keys):
""" Helper method to flatten a nested dict of dicts (one level)
Example:
{'a': {'b': 'bbb'}} becomes {'a_-_b': 'bbb'}
The separator '_-_' gets formatted later for the column headers
Args:
data: the dict to flatten
top_level_keys: a list of the top level keys to flatten ('a' in the example above)
"""
flattened_data = {}
for top_level_key in top_level_keys:
if data[top_level_key] is None:
flattened_data[top_level_key] = None
else:
for key in data[top_level_key]:
flattened_data['{}_-_{}'.format(top_level_key, key)] = data[top_level_key][key]
return flattened_data | 30576e3a48839aecc8537128817e9c04700fa955 | 122,799 |
def read_file(filename,
delim = "\t"):
"""
=================================================================================================
read_file(filename, delim)
This function is meant to read the lines of a file, split the lines on the delimiter, and
return a list of lines
=================================================================================================
Arguments:
filename -> A string containing the name of a file, and the path to the file if required.
delim -> A string containing the character that splits elements of the file.
=================================================================================================
Returns: A list of lists
=================================================================================================
"""
# Open the file
with open(filename, "r") as f:
# and stip the lines, and split the lines on the delimiter
lines = [line.strip().split(delim) for line in f]
# Close the file
f.close()
# and return the lines of the file
return lines | 9c49e3d1886d933886fcd9c72286d7abffe2ea98 | 122,800 |
def calc_controllers_for_workload(num_streams, heavy_operations_per_second, light_operations_per_second, performance_profile):
"""
Given the Controller throughput for the different operation types provided in the performance profile, we derive the
necessary number of Controllers to handle the target metadata workload.
:param num_streams: Expected number of Streams in the system.
:param heavy_operations_per_second: Heavy (Stream, Transaction) operations per second.
:param light_operations_per_second: Light (Scope, Ping, Endpoints, Segments) operations per second.
:param performance_profile: Class containing the performance of the Controller in the target environment.
:return: Number of Controller instances to absorb the target metadata workload.
"""
return int(max(num_streams / performance_profile.max_streams_per_controller + 1,
heavy_operations_per_second / performance_profile.controller_max_heavy_operations_per_second + 1,
light_operations_per_second / performance_profile.controller_max_light_operations_per_second + 1)) | 004ebcf7bc4fbc8b67e25e5a60d84286cac8b098 | 122,803 |
from typing import List
def compare_paths(paths_lhs: List[List[str]], paths_rhs: List[List[str]]) -> bool:
"""
Compare two input paths for equality.
Args:
paths_lhs: list of paths to compare against
paths_rhs: other list of paths to compare against
Returns:
True if the paths are identical, false otherwise
"""
paths_lhs.sort()
paths_rhs.sort()
return all(lhs == rhs for lhs, rhs in zip(paths_lhs, paths_rhs)) | ff4ebb762f0b5588217265ffa364c2c208d68f3a | 122,811 |
def enforce_not_None(e):
"""Enforce non-nullness of input. Used for typechecking and runtime safety."""
if e is None:
raise Exception("Input is None.")
return e | 72c8156c0b63c1f1512442ea3d49fc75e4541d9b | 122,814 |
def gini_gain_quotient(
left_total,
right_total,
left_amount_classified_zero,
left_amount_classified_one,
right_amount_classified_zero,
right_amount_classified_one):
"""
Returns a pair of numbers that represent the Gini gain, given a split in
the dataset.
Keyword arguments:
left_total -- the number of samples that on the left side of the split
right_total -- the number of samples that on the right side of the split
left_amount_classified_zero -- the number of samples on the left side that are classified as '0'
left_amount_classified_one -- the number of samples on the left side that are classified as '1'
right_amount_classified_zero -- the number of samples on the right side that are classified as '0'
right_amount_classified_one -- the number of samples on the right side that are classified as '1'
Return value:
(numerator, denominator) -- such that the Gini gain equals
(1 / total) * (numerator / denominator), where total is the total number
of samples (left and right)
May return a denominator of 0, use avoid_zero() to avoid division by zero.
See also:
Explanation of Gini gain -- https://victorzhou.com/blog/gini-impurity/
Secure Training of Decision Trees with Continuous Attributes -- paper to be
published by Mark Abspoel, Daniel Escudero and Nikolaj Volgushev
"""
numerator = \
right_total * (left_amount_classified_zero ** 2 +
left_amount_classified_one ** 2) + \
left_total * (right_amount_classified_zero ** 2 +
right_amount_classified_one ** 2)
denominator = left_total * right_total
return (numerator, denominator) | 02025acfa4381ec3ef0da6e2046cebfd2719b893 | 122,817 |
def _hue2RGB(v1, v2, vH):
""" convert hue to color component (0;1)
:param v1: v1 value (0;1)
:param v2: v2 value (0;1)
:param vH: vH value (0;1)
:return: component value for R, G or B """
if vH < 0.0:
vH += 1.0
if vH > 1.0:
vH -= 1.0
if (6.0 * vH) < 1.0:
return v1 + (v2 - v1) * 6.0 * vH
if (2.0 * vH) < 1.0:
return v2
if (3.0 * vH) < 2.0:
return v1 + (v2 - v1) * ((2.0 / 3.0) - vH) * 6.0
return v1 | 897e40a232ddaeb903f2be6339a355b897188832 | 122,818 |
from typing import Mapping
import json
def load_categories(filename: str) -> Mapping[int, str]:
"""
Load categories from specified file.
:param filename: path to category filename
:return: a dictionary of flowers id and name
"""
with open(filename, 'r') as f:
cat_to_name = json.load(f)
return cat_to_name | 5abbc2cdb3218d7a59e30a536175a45d2e69f9e6 | 122,821 |
def Quartiles( key, numbers ):
"""
This function identifies the 0, 25, 50, 75 and 100 percentile elements in
the list of number (which must be sorted). The elements are returned in
a dictionary whose keys are given by the key (three letter acroymn) with
the percentile string substituted.
If numbers contains less than 5 elements it retuns a dictionary with all values
equal to int(0).
"""
numbers.sort();
l = (len(numbers)-1)/4.0;
if l < 0:
return { key%0 :0,
key%25 :0,
key%50 :0,
key%75 :0,
key%100 :0 };
quartiles = { key % (q*25,): numbers[int(q*l)] for q in range(0,5) };
return quartiles; | 9cdb0cc78cca2c821ef12aa03ed883e8f70acdf6 | 122,822 |
from typing import Dict
def _sort_dictionary_alphabetically(
input_inv_dict: Dict[str, float]
) -> Dict[str, float]:
"""
Sorts a dictionary alphabetically by its keys.
Parameters
----------
input_inv_dict : dict
Dictionary containing radionuclide strings or Radionuclide objects as keys and activities
as values.
Returns
-------
dict
Inventory dictionary which has been sorted by the radionuclides alphabetically.
Examples
--------
>>> rd.inventory._sort_dictionary_alphabetically({'U-235': 1.2, 'Tc-99m': 2.3, 'Tc-99': 5.8})
{'Tc-99': 5.8, 'Tc-99m': 2.3, 'U-235': 1.2}
"""
return dict(sorted(input_inv_dict.items(), key=lambda x: x[0])) | 0a052054be508ced713d7cae8de9970e348168ad | 122,823 |
from bs4 import BeautifulSoup
import requests
def get_soup(url):
"""Retrieve the HTML from a URL and convert it to tag soup."""
return BeautifulSoup(requests.get(url).text, "html.parser") | 1cf7bc6833c9af5b49a877b29496898ab85239cf | 122,824 |
def merge_sort(arr):
"""
merge sort
Time: O(nlog(n))
Space: O(n)
"""
if len(arr) <= 1:
return arr
mid = len(arr) // 2
left = merge_sort(arr[:mid])
right = merge_sort(arr[mid:])
# Merge the sorted lists into a new one
i = j = k = 0
while i < len(left) and j < len(right):
if left[i] < right[j]:
arr[k] = left[i]
i += 1
else:
arr[k] = right[j]
j += 1
k += 1
# Checking if any element was left
while i < len(left):
arr[k] = left[i]
i += 1
k += 1
while j < len(right):
arr[k] = right[j]
j += 1
k += 1
return arr | 0ead74af71350d1f3383b53d532674a733fdb747 | 122,827 |
def IntersectionofBodyPartsandOnesGivenbyUser(cfg, comparisonbodyparts):
"""FUNCTION TAKEN FROM DEEPLABCUT. Returns all body parts when comparisonbodyparts=='all', otherwise all bpts that are in the intersection of comparisonbodyparts and the actual bodyparts"""
allbpts = cfg["bodyparts"]
if "MULTI" in allbpts:
allbpts = cfg["multianimalbodyparts"] + cfg["uniquebodyparts"]
if comparisonbodyparts == "all":
return allbpts
else: # take only items in list that are actually bodyparts...
cpbpts = []
# Ensure same order as in config.yaml
for bp in allbpts:
if bp in comparisonbodyparts:
cpbpts.append(bp)
return cpbpts | fbbe840affd7d9163be9ecc079422526fd4fea5b | 122,834 |
def psd_plot(
read_variable, default_analysis=None, plot_kwargs={}, extra_lines=[],
text="As an example, we now plot the PSDs stored in the file"
):
"""Return a string containing the function to generate a plot showing the
stored PSDs
Parameters
----------
read_variable: str
name of the read object
default_analysis: str, optional
The analysis PSD that you wish to plot
plot_kwargs: dict, optional
kwargs for the `.plot()` method.
extra_lines: list, optional
additional lines to add to the end of the string
text: str, optional
Markdown text explaining the plot
"""
if default_analysis is None:
raise ValueError("Please provide a default analysis to use")
kwargs = ", ".join([f"{key}={item}" for key, item in plot_kwargs.items()])
string = "psd = {}.psd['{}']\n".format(read_variable, default_analysis)
string += "fig = psd.plot({})\n".format(kwargs)
string += "\n".join(extra_lines)
if text is not None:
return [text, string]
return [string] | fdc8f63df2a6b38203e42e539e6dfbac7ce0893b | 122,835 |
def Multiply(xs, **unused_kwargs):
"""Multiplies two tensors."""
return xs[0] * xs[1] | d33f317c082d4365bb45c12665fb9eec38c141d2 | 122,842 |
def filter_pubs(pubs):
"""Remove publications without links, and merge
datasets and publications data together.
Also deduplicates publications based on pids.
Args:
pubs (dict): Publication data from OpenAIRE.
Returns:
_pubs (list): Flattened list of input data.
"""
_pubs, pids = [], set()
for p in pubs['datasets'] + pubs['publications']:
if 'pid' not in p:
continue
already_found = any(id in pids for id in p['pid'])
pids = pids.union(p['pid'])
if already_found or len(p['pid']) == 0:
continue
_pubs.append(dict(id=p['pid'][0], **p))
return _pubs | 90c011c8de51691390a886393eccc555ff38c9a7 | 122,844 |
def _no_gaps(sequence):
"""Returns True if a sequence has all values between 0..N with no gaps."""
return set(sequence) == set(range(len(sequence))) | 20de069ec2c4e1c41f081c0c12f6248e36c79fe5 | 122,845 |
def xStr(value, default=''):
"""
Extended str() adding a default result, if the input is None
"""
return default if (value is None) else str(value) | 3ba950b312051abf94be259b9b4c1274a0fc47d8 | 122,846 |
def rectangleSelect(x1, y1, x2, y2, ts):
"""
Returns the coordinates of a rectangle whose edges are snapped to the
divisions between tiles. The returned value is in pixel units in the form
(x, y, w, h)
@type x1: int
@param x1: left x-coordinate in tiles
@type y1: int
@param y1: top y-coordinate in tiles
@type x2: int
@param x2: right x-coordinate in tiles
@type y2: int
@param y2: bottom y-coordinate in tiles
@type ts: int
@param ts: size of tile in pixels
"""
rx = min(x1, x2) * ts
ry = min(y1, y2) * ts
rw = (abs(x2 - x1) + 1) * ts
rh = (abs(y2 - y1) + 1) * ts
return int(rx), int(ry), int(rw), int(rh) | c44022a11b72807a55467220ead473dddc488178 | 122,850 |
def convert_pandas_dtypes_to_builtin_types(col_type):
"""Convert pandas data types to python builtin ones, like pandas object to python str."""
col_type = str(col_type)
if "str" in col_type:
return str
elif "int" in col_type:
return int
elif "float" in col_type:
return float
elif "bool" in col_type:
return bool
else:
return str | b53cc870ef947c4630b9990535289b5d00de3eca | 122,852 |
def line_separated(value):
"""
Return a list of values from a `value` string using line as list delimiters.
"""
if not value:
return []
return list(value.splitlines(False)) | edffd933dffa3dce4729013529b1b6ff60f97d0e | 122,856 |
def is_stdstars(header):
""" Tests if the 'OBJECT' of the given header is associated with a Standard star exposure. (True / False)
None is returned if the header do not contain an 'OBJECT' entry
Returns
-------
bool or None
"""
obj = header.get("OBJECT",None)
if obj is None:
return None
stdnames = ["STD","Feige", "Hitlner", "LTT"]
return any([s_ in obj for s_ in stdnames]) | 95ffdbe3ff437da8eedf16e4a06d445b28f636a4 | 122,858 |
def remove_amp(tweet):
"""Takes a string and removes 'amp ' """
tweet = tweet.replace('amp ', '')
return tweet | f0d7900ccb7c464e3e71e0826ec54da049ceb147 | 122,859 |
import torch
def quantization(x):
"""quantize the continus image tensors into 255 levels (8 bit encoding)"""
x_quan=torch.round(x*255)/255
return x_quan | 403921c56cf76f94ebecbcc02fac75ac7dc1a1bb | 122,869 |
def normalize_rgb_values(color: tuple) -> tuple:
"""
Clean-up any slight color differences in PIL sampling.
:param color: a tuple of RGB color values eg. (255, 255, 255)
:returns: a tuple of RGB color values
"""
return tuple([0 if val <= 3 else 255 if val >= 253 else val for val in color]) | 63de22ca8d927a50e007bb2143e4a9408d6aac37 | 122,876 |
def make_cookie_values(cj, class_name):
"""
Makes a string of cookie keys and values.
Can be used to set a Cookie header.
"""
path = "/" + class_name
cookies = [c.name + '=' + c.value
for c in cj
if c.domain == "class.coursera.org"
and c.path == path]
return '; '.join(cookies) | af241a9e917b58b1a6f8afc32fc2b903502bcbcf | 122,877 |
def native_type(value):
"""
Converts a numpy type to a native python type.
See:
https://stackoverflow.com/questions/9452775/converting-numpy-dtypes-to-native-python-types/11389998
"""
return getattr(value, 'tolist', lambda: value)() | c09083ae7e44cc218dac17d649b33068111b8496 | 122,878 |
def find_number_3_multiples(x):
"""Calculate the number of times that 3 goes into x."""
mult3=x//3
return mult3 | 64d429b520688afabad656b9d975daf8d9846ff1 | 122,881 |
import torch
def sigmoid_threshold(tensor, threshold=0.5, high=1, low=0):
"""Applies the sigmoid function to the tensor and thresholds the values
out_tensor(x) = low if tensor(x) <= threshold
= high if tensor(x) > threshold
Arguments:
tensor (torch.Tensor): the tensor to threshold.
Returns:
torch.Tensor: same shape as the input with values {low, high}.
"""
high = torch.Tensor([high]).to(tensor.device)
low = torch.Tensor([low]).to(tensor.device)
out = torch.sigmoid(tensor)
return torch.where(out > threshold, high, low) | f2279828975d1bd776152beb920506553a7c2483 | 122,882 |
def index_label(frequency):
"""Returns appropriate mapping from frequency to index column name"""
return {'D': 'date',
'M': 'month'}[frequency] | 8c07e1a0a16e91cc3ff7e3e92835dd572930dbb0 | 122,883 |
def remove_nix_hash(string):
"""Given a nix store name of the form <hash>-<packagename>, remove
the hash
"""
return "-".join(string.split("-")[1:]) | 83531135cb02cc3a8f4a1f457ca9744814b4203a | 122,884 |
import json
def load_json(fn: str):
"""Standard built in for loading json from file to dictionary"""
with open(fn, "r") as file:
data = json.load(file)
return data | 0f7bef349ce4789765b3759073f605b87724e422 | 122,889 |
from typing import Counter
def get_note_histogram(chorale, key):
"""
Arguments
chorale: a music21 Stream object
key: music21.key.Key
Returns a note histogram as a collections.Counter object for input chorale
Counter key: (scale degree, accidental) or 'Rest'
Counter value: count
"""
nh = Counter()
for note_or_rest in chorale.flat.notesAndRests:
if note_or_rest.isNote:
sd = key.getScaleDegreeAndAccidentalFromPitch(note_or_rest.pitch)
nh[sd] += 1
else:
nh['Rest'] += 1
return nh | d0526b52b39b2731052a3b261e998434ff5b65ff | 122,891 |
import re
def split_into_words(s):
"""Split a sentence into list of words."""
s = re.sub(r"\W+", " ", s)
s = re.sub(r"[_0-9]+", " ", s)
return s.split() | 4ce2cdf1dce295c20bc8bbf503b2d7ab6a2d1d59 | 122,895 |
import urllib.request
def request_url(url, data, headers):
"""Request URL."""
return urllib.request.Request(url, data=data, headers=headers, method='POST') | c89e27a528af29a441fd8cfb3dfa1bb3e1d2e745 | 122,899 |
def null_replacer(value):
""" Replaces None with 0. for user input float values to be used in equations """
if not value:
value = 0.
return value | 68d89f37f251a62e6b89a6b2e531c0b6c57b5734 | 122,903 |
from pathlib import Path
def path_contains_file(path: Path, filename: str) -> bool:
"""Determine whether a file exists in the given path.
Args:
path: the path in which to search for the file
filename: the name of the file
Returns:
A boolean to indicate whether the given file exists in the given path
"""
return (path / filename).is_file() | dc9b88c987711c910accff61b132fab79bc74982 | 122,904 |
def value_str(value):
""" used to format value lists to a colon-delimited (unicode) string """
# cast to float to remove 0-padding
return ':'.join(map(str, map(float, value))) | 5e5a97f8a8eca5b06dd38b93ac32f680c22ef7ad | 122,909 |
import math
def mach(M1, gamma):
"""Mach # after a normal shock (eq. 3.51)
:param <float> M1: Mach # before the shock
:param <float> gamma: Specific heat ratio
:return <float> Mach # after the chock
"""
n1 = 1.0 + (gamma - 1.0) * 0.5 * M1 ** 2
d1 = gamma * M1 ** 2 - (gamma - 1.0) * 0.5
return math.sqrt(n1 / d1) | f40cec4185230e29e39be402d8b7f8f7927539dd | 122,911 |
def getFileContents(file_path):
"""Reads the contents of a file as a single string, then closes the file.
Args:
file_path: path to the file to read its contents into a string
Returns:
a single string containing the entire contents of the file
"""
file_to_read = open(file_path)
file_contents = file_to_read.read()
file_to_read.close()
return file_contents | 44c17977ca69881b1fee27199b72ee94ae24da0b | 122,913 |
def unpack_launcher(**kwargs):
""" Unpacks the launcher kwargs for easy use in launcher method definition within script modules.
Copy paste the following implementation at the top of script.launch() method:
logger, loghost logport, clients, guis, params = unpack_launcher(**kwargs)
:param kwargs: (dict) contains all keyword arguments required for launching a script from launcher module
e.g.: dict(logger=log, loghost='localhost', clients=[client1, client2], guis=[gui_client1, gui_client2]),
logport=1234, params=experimental_parameters_container)
Note that experimental parameters should go in "params" and can be customized to contain all other
script specific stuff
:return: (tuple) logger, logport, clients, guis, params
"""
logger = kwargs['logger']
loghost = kwargs['loghost']
clients = kwargs['clients']
guis = kwargs['guis']
logport = kwargs['logport']
params = kwargs['params']
return logger, loghost, logport, clients, guis, params | bbed0442775062f27387ed01ce558a5829b90ef8 | 122,914 |
def normal_sort(method1, method2):
"""Normal, everyday, alphanumeric sort."""
if method1 < method2:
return -1
elif method2 < method1:
return 1
else:
return 0 | 6c741ed0b1744a5d6c151391b0188a279023151c | 122,918 |
from typing import Union
from pathlib import Path
import hashlib
from typing import Callable
def checksum(path: Union[str, Path], hash_fn: str = "sha256", chunk_num_blocks=8192, **kwargs) -> str:
"""Return checksum of file or directory.
Args:
path (Union[str, Path]): A path to file or directory.
hash_fn (str): A hash function to use. Defaults to 'sha256'.
chunk_num_blocks (int): A number of blocks to read at once. Defaults to 8192.
**kwargs: Additional arguments to pass to hash function.
Returns:
str: checksum of file or directory
"""
if hasattr(hashlib, hash_fn):
hash_func: Callable = getattr(hashlib, hash_fn)
else:
raise ValueError("Unknown hash function")
hashlib.blake2b
h: hashlib._Hash = hash_func(**kwargs)
path = Path(path)
if path.is_file():
path_list = [path]
else:
path_list = sorted(path.glob("**/*"))
for path in path_list:
if not path.is_file():
continue
with path.open("rb") as f:
for chunk in iter(lambda: f.read(chunk_num_blocks * h.block_size), b""):
h.update(chunk)
return h.hexdigest() | 2523948ecd28dd9d8790bfe141b53ce0920ddbbc | 122,921 |
import functools
def getitem(d:dict, k:list):
"""Get an item by keys in a nested dictionary.
Example
-------
For the nested dictionary `{'a':{'b':{'c':0}}}`, query with the key `['a','b','c']` returns 0;
and query with the key `['a','b']` returns `{'c':0}`.
"""
# retrieve from a nested dictionary
# possible to use dict.get() or operator.getitem()
return functools.reduce(dict.__getitem__, k, d) | ca86784a3076e9b4371855a3ade9e18ace0bd834 | 122,922 |
def repoNameToPath(repoName):
""" Extracts the repo name from a full path """
return repoName.split("/")[-1] | fffc5a17297734b0967c22823568dd239e2c2574 | 122,923 |
from typing import Dict
def wrap_single_element_in_list(data: Dict, many_element: str):
"""Make a specified field a list if it isn't already a list."""
if not isinstance(data[many_element], list):
data[many_element] = [data[many_element]]
return data | 1c1c2b1ff5e65af37f86c7ee1d4dd7a38ab0faf4 | 122,928 |
def getColumn(df, name):
""" THis gets a column from a Datafram regardless if it is an index or data column"""
# First figur out if this is a data or index column
if name in df.columns:
return df[name].values
elif name in df.index.names:
return df.index.get_level_values(name).values
else:
raise ValueError('Name is not a data or index column') | 0310a1576e605004865b18aadbdec7c18582fdc6 | 122,930 |
def map_wikt_pos2fn_pos(wikt_pos):
"""
map Wiktionary part of speech to FrameNet part of speech
IMPLEMENTED: noun, verbs, and adjectives
:param str wikt_pos: a part of speech coming from Wiktionary
"""
fn_pos = None
if wikt_pos == 'noun':
fn_pos = 'N'
elif wikt_pos == 'verb':
fn_pos = 'V'
elif wikt_pos == 'adj':
fn_pos = 'A'
return fn_pos | 0badd8bae478de6305fdd1acc005af482a863185 | 122,931 |
def find_frontmatter_ending(mdx: str, stop_looking_after: int = 10) -> int:
"""Find the line number where the mdx frontmatter ends.
Args:
mdx (str): String representation of the mdx file.
stop_looking_after (int): Optional, default is 10. Number of lines to stop
looking for the end of the frontmatter.
Returns:
int: The next line where the frontmatter ending is found.
Raises:
IndexError: No markdown frontmatter was found.
"""
indices = []
still_looking = 0
lines = mdx.splitlines()
for i, line in enumerate(lines):
still_looking += 1
if still_looking >= stop_looking_after:
break
if line == "---":
indices.append(i)
still_looking = 0
if i == len(line) - 1:
break
if not indices:
msg = "No markdown frontmatter found in the tutorial."
raise IndexError(msg)
return max(indices) + 1 | 4ce29deb90168f2dc6859778e63ec14ef2554cbd | 122,932 |
def _get_fuzzer_module(fuzzer):
"""Get the module for |fuzzer|'s fuzzer.py."""
return 'fuzzers.{}.fuzzer'.format(fuzzer) | 74a019757782f1b02c720ebdce2b9486e8e48799 | 122,934 |
def count_bits(n):
"""
Write a function that takes an integer as input,
and returns the number of bits that are equal to one
in the binary representation of that number.
You can guarantee that input is non-negative.
"""
result = ""
f_num = f"{n:08b}"
for el in f_num:
if el == "1":
result += el
return len(result) | a9c4ded4896fdee1c879b85580798f06e1c40f07 | 122,935 |
def two_pair(ranks):
"""If there are two pair, return the two ranks as a
tuple: (highest, lowest); otherwise return None."""
two_pair = set()
for r in ranks:
if ranks.count(r) == 2: two_pair.add(r)
two_pair_lst = list(two_pair)
two_pair_lst.sort(reverse = True)
return tuple(two_pair_lst) if len(two_pair) == 2 else None | bf5c7b5059cf9a83c2c77109b2e4fe76de487853 | 122,938 |
def getBoardCopy(board):
"""Make a duplicate of the board list and return it."""
dupeBoard = []
for space in range(len(board)):
dupeBoard.append(board[space])
return dupeBoard | 529027e03da7bfc207a4b4daa590fe1565aeb807 | 122,941 |
import re
def is_valid(policy: str, password: str) -> bool:
"""
Given a policy (e.g. `1-3 a`) and a password (e.g. `abcde`),
determine if the password complies with the policy
"""
char = policy[-1:]
atleast = int(re.findall("[0-9]+", policy)[0])
atmost = int(re.findall("[0-9]+", policy)[1])
assert atleast < atmost
if atleast <= password.count(char) <= atmost:
return True
return False | 1dd29e215c7693f3d7f136c0a0037292872dcb49 | 122,947 |
def block_comments_begin_with_a_space(physical_line, line_number):
"""There should be a space after the # of block comments.
There is already a check in pep8 that enforces this rule for
inline comments.
Okay: # this is a comment
Okay: #!/usr/bin/python
Okay: # this is a comment
K002: #this is a comment
"""
MESSAGE = "K002 block comments should start with '# '"
# shebangs are OK
if line_number == 1 and physical_line.startswith('#!'):
return
text = physical_line.strip()
if text.startswith('#'): # look for block comments
if len(text) > 1 and not text[1].isspace():
return physical_line.index('#'), MESSAGE | 6996573e2f5988f8952e13d13fe2437ba7913f79 | 122,949 |
def IOU(box1, box2):
"""
Params
box1 - [x1,y1,x2,y2] which are the coordinates of the top left and bottom right corners of a box
box2 - [x1,y1,x2,y2] which are the coordinates of the top left and bottom right corners of a box
Returns - Intersection over union of the two bounding boxes
"""
x_left = max( box1[0], box2[0])
y_top = max( box1[1], box2[1])
x_right = min(box1[2], box2[2])
y_bottom = min(box1[3], box2[3])
if x_right < x_left or y_bottom < y_top:
return 0.0
intersection_area = (x_right - x_left) * (y_bottom - y_top)
box1area = (box1[2] - box1[0]) * ( box1[3] - box2[1])
box2area = (box2[2] - box2[0]) * (box2[3] - box2[1])
iou = intersection_area / float(box1area + box2area - intersection_area)
return iou | 1db55593b87326d19d999da3462592889f0293d2 | 122,950 |
def fromAsn1IntBytes(b, size):
"""Return a bytearray of "size" bytes representing a big-endian integer
by converting the input bytearray's ASN.1 integer.
An ASN.1 integer is a big-endian sequence of bytes, with excess zero bytes
at the beginning removed. However, if the high bit of the first byte
would be set, a zero byte is prepended. Note that the ASN.1 type/len
fields are NOT removed by this function.
Raises SyntaxError.
"""
if len(b) > size+1:
raise SyntaxError("ASN.1 integer is too big")
if len(b)==size+1: # This can occur if the integer's high bit was set
if b[0]:
raise SyntaxError("ASN.1 integer too big")
if not (b[1] & 0x80):
raise SyntaxError("ASN.1 integer has excess zero padding")
return b[1:]
else:
# Prepend zero bytes if needed to reach "size" bytes
return bytearray([0]*(size-len(b))) + b | 3129aa1005bb0a9978bb21f3f96339f63f2c3899 | 122,951 |
import numbers
import yaml
def make_metadata(channels, n_channels, dtype, output_path):
"""Make and save metadata for a binary file
Parameters
----------
chahnels: str or int
The value of the channels parameter
n_channels: int
Number of channels in the whole dataset (not necessarily match the
number of channels in the subset)
dtype: str
dtype
output_path: str
Where to save the file
"""
if channels == 'all':
_n_channels = n_channels
elif isinstance(channels, numbers.Integral):
_n_channels = 1
else:
_n_channels = len(channels)
# save yaml file with params
path_to_yaml = str(output_path).replace('.bin', '.yaml')
params = dict(dtype=dtype, n_channels=_n_channels, data_order='samples')
with open(path_to_yaml, 'w') as f:
yaml.dump(params, f)
return params | 44e3fa12585a6294e6b78d17333c56bc41f08f38 | 122,952 |
def matrix_from_string(mtx_str):
"""Turning a string into it's matrix representation.
This function accepts a string representation of a matrix, and turns it
into an actual matrix. This function uses \n as a delimiter for the rows,
and ignores any extra spaces.
Turned into a one-liner comprehension, it's equivalent was commented out.
Args:
mtx_str: A string representation of a matrix
Returns:
An actual matrix for the string
"""
# mtx_rows = mtx_str.split("\n")
# final_matrix = []
#
# for row in mtx_rows:
# if row.replace(" ", "") != "":
# mtx_row = [float(col) for col in row.strip().split(" ") if col != ""]
# final_matrix.append(mtx_row)
# return final_matrix
return ([[float(col) for col in row.strip().split(" ") if col != ""]
for row in mtx_str.split("\n") if row.replace(" ", "") != ""]) | d381d596b384bbceb8c03264eaf14b7fe64a02ab | 122,954 |
def nonwhitespace(argument):
"""Return argument with all whitespace removed.
This includes removing any single spaces within the string.
"""
return "".join(argument.split()) | fd32afc0fd8ce94dc8cb5da05a998e8b404cef50 | 122,955 |
import base64
def filter_b64decode(param):
"""
Decode base64 encoded string
F.e. ::
- echo: '{{ token |b64decode }}'
:param param: data to convert
"""
return base64.b64decode(param.encode('utf-8')).decode('utf-8') | 8dc9f7b2aa5030c1d0fa900a3e03dfcafa7a5b8c | 122,956 |
def check_index(index: str, values: list, messages: list) -> bool:
"""Checks that min-max values are a two-items list.
Parameters
----------
index : str
Numeric indicator.
values : list
Metadata variables in criteria.
messages : list
Message to print in case of error.
Returns
-------
boolean : bool
Whether to keep the key/value or not.
"""
boolean = False
if index == '2' and len(values) != 2:
messages.append('For min-max subsetting, two-items list need: no min (or no max) should be "None"')
boolean = True
return boolean | eb93bbbd681b4cc73f4c082170b76ec6b66bb72a | 122,958 |
from pathlib import Path
from typing import List
def list_subdirs(base_path: Path, min_depth: int = 1) -> List[Path]:
"""
Crawl through the base path and return all folders with the given minimum depth
:param base_path: path where crawling should start
:param min_depth: folders of depth to the base path which should be considered for the evaluation
:return: list of subfolder paths
"""
# Get all subdirectories
all_subdirs = [cur_path for cur_path in base_path.iterdir() if cur_path.is_dir()] if min_depth > 0 else []
# Stop if nothing is left
if min_depth < 1 and len(all_subdirs) < 1:
return all_subdirs
# Go one deeper
for cur_path in base_path.iterdir():
# We only consider directories
if not cur_path.is_dir():
continue
next_subdirs = list_subdirs(base_path=cur_path, min_depth=min_depth-1)
all_subdirs.extend(next_subdirs)
return all_subdirs | c7679188318b52c6e9336ac3e7314add852bc990 | 122,967 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.