content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def strbool(x):
"""
Return an string representation of the specified boolean for an XML
document.
>>> strbool(False)
'0'
>>> strbool(True)
'1'
"""
return '1' if x else '0' | dd9b6a406c0cc45d01aae1b17a4ceb2c8b78c140 | 37,030 |
from pathlib import Path
def get_file_path(filepath):
"""Return the path of the config file if exists.
:param filepath: The path of the file.
:type filepath: str|Path
:return: Path
:raises: FileNotFoundError if file path does not exist
"""
if type(filepath) is str:
real_filepath = Path(filepath)
elif isinstance(filepath, Path):
real_filepath = filepath
else:
real_filepath = None
if real_filepath is None or not real_filepath.exists():
raise FileNotFoundError(f"{filepath} does not exist")
return real_filepath | 6597ba2dd51a3304862207504f81e7aae35c8a17 | 37,031 |
import sys
def format_results_df(df):
"""Format colnames and units of df result file."""
col_mapper = {
"nom": "pl_id",
"COUV BASSE": "vt_veg_b",
"COUV INTER": "vt_veg_moy",
"COUV HAUTE": "vt_veg_h",
"couverture basse calibree": "pred_veg_b",
"couverture inter calibree": "pred_veg_moy",
"Taux de couverture haute lidar": "pred_veg_h",
}
df = df.rename(col_mapper, axis=1)
cols_of_interest = [
"pl_id",
"vt_veg_b",
"vt_veg_moy",
"vt_veg_h",
"pred_veg_b",
"pred_veg_moy",
"pred_veg_h",
]
# Check columns and select them
assert all(coln in df for coln in cols_of_interest)
df = df[cols_of_interest]
# Convert if necessary
if df["vt_veg_b"].max() > 1:
df[["vt_veg_b", "vt_veg_moy", "vt_veg_h"]] = (
df[["vt_veg_b", "vt_veg_moy", "vt_veg_h"]] / 100
)
if df["pred_veg_b"].dtype == object:
if any(df["pred_veg_b"].str.contains("%")):
df[["pred_veg_b", "pred_veg_moy", "pred_veg_h"]] = df[
["pred_veg_b", "pred_veg_moy", "pred_veg_h"]
].applymap(lambda x: float(x.replace("%", "")) / 100)
else:
sys.exit("ERROR: UNKNOWN CASE")
return df | c9f1feaba81aa606a0ddd9293a5803601ea0f3dc | 37,032 |
def get_world_euler(best_sound_loc):
""" Get Yaw and pitch in the global world representation:
Arguments:
best_sound_loc -- Sound localization event
Return:
new calculated vector [yaw, pitch]
"""
world_yaw = best_sound_loc[2][5] + best_sound_loc[1][0]
world_pitch = best_sound_loc[2][4] + best_sound_loc[1][1]
return [world_yaw, world_pitch] | 2a9c5b6485db598cf7e22f821a47effd116ed884 | 37,033 |
import sys
def _tool():
"""run when script is from a tool
"""
in_fc = sys.argv[1]
cent = str(sys.argv[2])
from_north = str(sys.argv[3])
out_fc0 = sys.argv[4]
out_fc1 = sys.argv[5]
return in_fc, from_north, cent, out_fc0, out_fc1 | e029955e308d945636e0afeb60c5f9e73864b253 | 37,034 |
def _get_testm_tree(ind):
"""Generate a fake package with submodules
We need to increment index for different tests since otherwise e.g.
import_modules fails to import submodule if first import_module_from_file
imports that one
"""
return {
'dltestm%d' % ind: {
'__init__.py': '',
'dlsub1': {'__init__.py': 'var = 1'},
'dlsub2.py': 'var = 2'}
} | d8cf76706355a513e50c645cdba05329e335cd4c | 37,035 |
def _emitterIsWorking():
"""Ask the question "Did you see the ir emitter flashing (not just turn on) ? Yes/No ? "
Returns:
bool: true if the user input yes, otherwise false
"""
check = input("Did you see the ir emitter flashing (not just turn on) ? Yes/No ? ").lower()
while (check not in ("yes", "y", "no", "n")):
check = input("Yes/No ? ").lower()
return check in ("yes", "y") | 2c0f3e763508280e88c272af58968654b93800cf | 37,037 |
def get_filenames_of_set(train_set):
"""
This function reads the names of the files that we are going to use as our training test
:param train_set - the file which contains the names of the training set
:return: content - an array containing the names of each filew
"""
# read the names of the files that we are going to use as our training test
with open(train_set) as f:
content = f.readlines()
content = [x.strip() for x in content]
return content | 0da5e7321606b1a863b081cace61ecb5f92cd83f | 37,039 |
def myfibonacci():
"""
Function to generate next fibonacci numbeer
# Closure:
generate_my_next_number:
This function takes in two nonlocal variables i.e. first and second and uses thhem
to generate next number and returns that
"""
first = 0
second = 1
def generate_my_next_number()-> int:
"""
This function sums the previous two numbers and returns the generated sum, it also updates the previous
two numbers with the new value
"""
nonlocal first,second
temp = second
second = first + second
first = temp
return second
return generate_my_next_number | 07b597dcf9d44a35f78aab4cdd6a72d7e9a083c6 | 37,041 |
import io
def fileredir(call, *args, **kwargs):
""" Override `file` with stringio object and return a tuple of a function
to get the output and the instantiated callable. """
file = io.StringIO()
output = lambda: file.getvalue().splitlines()
return output, call(*args, file=file, **kwargs) | 73cec2f776896010b775cb9f6a16830b5b3d1770 | 37,042 |
def querystr(d):
"""Create a query string from a dict"""
if d:
return '?' + '&'.join(
['%s=%s' % (name, val) for name, val in d.items()])
else:
return '' | 17cca6005f8ce685d40de7bc1cbe42a6640fba0c | 37,043 |
import os
import sys
def get_main_path():
"""Shorcut"""
return os.path.abspath(os.path.dirname(sys.argv[0])) | 3d036072e8e5c6e6085d880c702203c769c7da34 | 37,044 |
def part2(input):
"""
The scanner will be in pos0 every range*2 picoseconds, and the pkg will be
at layer[depth] after exactly [depth] picoseconds. Thus we can find out the smallest delay such that
for each layer, delay + depth != n * (range-1) * 2, where n can be any number.
This works for the given example. If delay = 0, delay + depth of layer 0 = 0 + 0 = n * (3-1) * 2 = 0 -> we hit first layer.
If delay = 1, we hit the scanner in layer at depth 1. (1 + 1 = n * (2-1) * 2)
If delay = 2, we hit layer d=4 (2 + 4 = n * (4 - 1) * 2)
If delay = 3, we hit .... this goes on trust me.
The first number we reach that doesn't have any constraints is 10.
Mathematically
let x to be our desired number, let k = number of layers - 1
and let a0, a1, ... be respectively, 2*(layer d0.range - 1), 2*(layer d1.range -1) ...,
Then we have
x + 0 not a multiple of a0
x + 1 not a multiple of a1
...
x + k not a multiple ak
"""
layers = {}
for line in input.strip().split('\n'):
if not line:
continue
depth, range_ = [int(n) for n in line.split(': ')]
layers[depth] = 2 * (range_ - 1)
found = False
delay = 0
while not found:
# Check the delay for the constraints
found = True
for depth, check in layers.items():
if (delay + depth) % check == 0:
# Wrong delay, hit this layer
found = False
delay += 1
break
return delay | de93f4b362d5fc6d48cef475fba0c15eb6ba90ab | 37,045 |
def encode_bin(word):
"""
Encode a binary vector into an integer.
"""
return sum(a * 2**i for (i,a) in enumerate(word)) | 81b3acf0aaeb8ec5118ec340dc9b77df57956971 | 37,046 |
def compare_vertices(vertex1_index, distinct_vertices_list, data_model, merge_threshold):
"""returns true if abs between given points is less that the merge_threshold
Arguments:
:param distinct_vertices_list: (list) vertex coordinates
:param merge_threshold: (float) difference between vertices is compared to this value
:param data_model: (object) the data model holding coordinates for vertices
:param vertex1_index: (tuple) First vertex to be compared. First int is index of triangle, second is index of vertex
Returns bool
"""
for nr_of_tuples in range(len(distinct_vertices_list)):
coordinate_index1 = vertex1_index[1] * 3
nr_of_matches = 0
for i in range(3):
subtraction = data_model.triangles[vertex1_index[0]].vertex_list[coordinate_index1] - \
distinct_vertices_list[nr_of_tuples][i]
if abs(subtraction) < merge_threshold:
nr_of_matches += 1
coordinate_index1 += 1
if nr_of_matches == 3:
return False
return True | 0cfcfb4be631b412ae14f8022118326449ee8496 | 37,048 |
def find_closest_level(levels, elevation):
"""Find the level closest to the given elevation. """
closest = None
difference = float("inf")
for level in levels:
level_difference = abs(level.Elevation - elevation)
if level_difference < difference:
closest = level
difference = level_difference
return closest | 182538dd929516a1fd3581968fc0061fc7472963 | 37,049 |
def seq_gen_call(treefile, path, s, i, prefix, z = None):
"""
Make seq-gen call.
"""
if z == None:
return path + " -m HKY -l 1 -s " + str(s) + ' -wa <"' + treefile + '" > ' + prefix + '.seqs' + str(i) + '.tmp'
else:
return path + " -m HKY -l 1 -s " + str(s) + ' -wa <"' + treefile + '" > ' + prefix + '.seqs' + str(i) + '_' + str(z) + '.tmp' | da63872e53bf46cfdc37a671cffd01f871ce1844 | 37,050 |
import torch
def select_action_ddpg(state, actor):
"""Selects action using actor.
"""
actor.eval()
with torch.no_grad():
action = actor.forward(state).item()
return action | f2e8476cabe6c6448cd522438a8cd69d788237ec | 37,051 |
def playlist_transform(s,t,compareType="Song"):
"""
Computes the edit distance for two playlists s and t, and prints the minimal edits
required to transform playlist s into playlist t.
Inputs:
s: 1st playlist (format: list of (track name, artist, genre) triples)
t: 2nd playlist (format: list of (track name, artist, genre) triples)
compareType: String indicating the type of comparison to make.
"Song" (default): songs in a playlist are considered equivalent if the
(song name, artist, genre) triples match.
"Genre": songs in a playlist are considered equivalent if the same genre is used.
"Artist": songs in a playlist are considered equivalent if the same artist is used.
Output: The minimum edit distance and the minimal edits required to transform playlist
s into playlist t.
"""
if compareType == "Song":
type = 0
elif compareType == "Artist":
type = 1
else:
type = 2
A, B = [], []
s, t = [" "] + s, [" "] + t
A.append(range(len(s) + 1))
B.append(range(len(t) + 1))
for i in range(len(s)): #appends index from s to A
A.append([i])
B.append([4])
for j in range(len(t)): # appends index from t to A
A.append([j])
B.append([3])
for i in range(1, len(s)):
for j in range(1, len(t)):
if type == 0: # if equal to a SONG
if s[i] == t[j]: # if songs are equal
c_match = A[i-1][j-1]
match = True
else:
c_match = A[i-1][j-1] + 1
match = False
else: # ARTIST or GENRE
if s[i][type] == t[j][type]:
c_match = A[i-1][j-1]
match = True
else:
c_match = A[i-1][j-1]+1
match = False
insert = A[i][j-1] + 1
delete = A[i-1][j] + 1
minimum = min(c_match, insert, delete)
if minimum == c_match:
if match:
B[i].append(1) #do not change
else:
B[i].append(2) #change s[i] to t[j]
elif minimum == insert:
B[i].append(3) #insert t[j]
else:
B[i].append(4) #remove s[i]
A[i].append(minimum)
x = len(s)-1
y = len(t)-1
listt = []
while x >= 0 or y >= 0: # Printing out of operations
if x == 0 and y == 0:
break
if B[x][y] == 1:
a = "Leave " + str(s[x]) + " unaltered"
listt.insert(0, a)
x -= 1
y -= 1
elif B[x][y] == 2:
b = "Change " + str(s[x]) + " to " + str(t[y])
listt.insert(0, b)
x -= 1
y -= 1
elif B[x][y] == 3:
c = "Insert " + str(t[y])
listt.insert(0, c)
y -= 1
elif B[x][y] == 4:
d = "Remove " + str(s[x])
listt.insert(0, d)
x -= 1
for k in range(0, len(listt)):
print(listt[k])
return A[len(s)-1][len(t)-1] | 390239d78349b82176ed1a54828ad302df1b585e | 37,053 |
def ngpus(smi):
""" """
result = smi.DeviceQuery("count")["count"]
assert result > 0
print("[" + str(result) + " GPUs]", end=" ")
return result | 15755af94255d3abaf72bcf828eb400a68eba97c | 37,054 |
def flatten_dictlist(dictlist):
"""
Turns a list of dictionaries into a single dictionary.
:param dictlist: List of dictionaries.
:type dictlist: list
:return: Flattened dictionary.
:rtype: dict
:Example:
>>> dictlist = [{"a": 1}, {"b": 2, "a": 3}, {"c": 4}]
>>> flatten_dictlist(dictlist)
{"a": 3, "b": 2, "c": 4}
"""
new_dict = {}
for dict_ in dictlist:
new_dict.update(dict_)
return new_dict | 86eb654965cd43bef6924a8aaa9873a7b36bc8f4 | 37,056 |
def schema():
"""
This returns
"""
return { 'title': 'apt schema',
'type': 'string'
} | ace5ea088d421aee285f86189b968ac85429f14b | 37,057 |
import math
def _j_s(query_len: int, known_len: int, d_g_x: float, temp: float) -> float:
"""Estimate the free energy of length query_len based on one of length known_len.
The Jacobson-Stockmayer entry extrapolation formula is used
for bulges, hairpins, etc that fall outside the 30nt upper limit
for pre-calculated free-energies. See SantaLucia and Hicks (2004).
Args:
query_len: Length of element without known free energy value
known_len: Length of element with known free energy value (d_g_x)
d_g_x: The free energy of the element known_len
temp: Temperature in Kelvin
Returns:
float: The free energy for a structure of length query_len
"""
gas_constant = 1.9872e-3
return d_g_x + 2.44 * gas_constant * temp * math.log(query_len / float(known_len)) | 77dbc59e63d58e2f8a294411f85b7f1ae18ada08 | 37,058 |
def knapsack(w, n, vals, wts):
"""Find the maximum value possible with n weights upto given weight in given list
Duplicates are allowed
:param w: Given weight
:param n: Number of weights
:param vals: List of values of each weight
:param wts: List of n weights
:returns: Return the maximum value
"""
dp = [0 for _ in range(0, w + 1)]
for i in range(w + 1):
for j in range(n):
if wts[j] <= i:
dp[i] = max(dp[i], dp[i-wts[j]] + vals[j])
return dp[w] | d2d711156f80bd0c5ff9b76526bd7643270818e8 | 37,060 |
def print_results_table(results_table):
"""Print human readable results table to stdout."""
print('')
print('=== Results Table ===')
print('Format: # reps [success rate, avg total NPE]')
def info_str(info_row):
# num_runs (success_rate, avg_total_npe)
if not info_row[0]:
return '0'
return '%s [%s, %s]' % (str(info_row[0]).ljust(2), info_row[1], info_row[2])
nc = len(results_table[0]) # num cols
out_table = [
[results_table[0][0]] + [results_table[0][i] for i in range(1, nc, 3)]]
for row in results_table[2:]:
out_table.append([row[0]] + [info_str(row[i:i + 3]) for i in range(1, nc, 3)])
nc = len(out_table[0]) # num cols
col_widths = [max(len(row[col]) for row in out_table) for col in range(nc)]
table_string = ''
for row in out_table:
table_string += ''.join(
[row[c].ljust(col_widths[c] + 2) for c in range(nc)]) + '\n'
print(table_string) | a7309e19c57d3199ab21a65f73900ac73e930dbd | 37,065 |
def continue_to():
"""
Determine if the game continues
The input character
If the character is yes
Returns true
If the character is no
Returns false
Returns: The boolean value (boolean)
"""
to = input('Do you want to continue? ')
if to == 'yes':
return True
if to == 'no':
return False | 00ba015cc110bf284b2239d128aecfae5ba4d5d2 | 37,068 |
def _return_empty_tuple(*_):
""" Return empty tuple
"""
return () | 079f59f698f988a92763fb9093b7fb90fc2febc9 | 37,069 |
def _get_key_and_indices(maybe_key_with_indices):
"""Extracts key and indices from key in format 'key_name[index0][index1]'."""
patterns = maybe_key_with_indices.split('[')
if len(patterns) == 1:
return (maybe_key_with_indices, None)
# For each index ensure that the brackets are closed and extract number
indices = []
for split_pattern in patterns[1:]:
# Remove surrounding whitespace.
split_pattern = split_pattern.strip()
if split_pattern[-1] != ']':
raise ValueError(
'ParameterName {} has bad format. Supported format: key_name, '
'key_name[index0], key_name[index0][index1], ...'.format(
maybe_key_with_indices))
try:
indices.append(int(split_pattern[:-1]))
except ValueError:
raise ValueError(
'Only integer indexing allowed for ParameterName. '
'Faulty specification: {}'.format(maybe_key_with_indices))
return patterns[0], indices | ffc065c60da419b73b1283e06a69098eeac19fbb | 37,070 |
from re import UNICODE
def normalize(str_):
"""Normalize a string through possible permutations for comparison."""
if not str_:
return ""
translate_table = str_.maketrans(UNICODE)
return str_.translate(translate_table).strip().casefold() | 0460a61b12897b4027518b0523a9171c8542b574 | 37,072 |
def var_name ( prefix , used_names , *garbage ) :
"""Generate new, unique name for the variable
"""
name = prefix + '%x' % ( hash ( ( prefix , used_names , garbage ) ) % ( 2**32 ) )
while name in used_names :
name = prefix + '%x' % ( hash ( ( name , prefix , used_names , name , garbage ) ) % ( 2**32 ) )
return name | c2bc2084e2f2f85c3c31d463f8531ae5be28767c | 37,073 |
import torch
def mdiou_loss(pred, target, eps=1e-6):
""" D-IoU loss
Computing the distance-IoU loss between a set of predicted bboxes and target bboxes.
The loss is calculated as negative log of IoU.
Args:
pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2),
shape (n, 4).
target (Tensor): Corresponding gt bboxes, shape (n, 4).
eps (float): Eps to avoid log(0).
Return:
Tensor: Loss tensor.
"""
# center point
x_p = (pred[:, 2] + pred[:, 0]) / 2
y_p = (pred[:, 3] + pred[:, 1]) / 2
x_g = (target[:, 2] + target[:, 0]) / 2
y_g = (target[:, 3] + target[:, 1]) / 2
# overlap
i_x1y1 = torch.max(pred[:, :2], target[:, :2])
i_x2y2 = torch.min(pred[:, 2:], target[:, 2:])
i_wh = (i_x2y2 - i_x1y1).clamp(min=0)
overlap = i_wh[:, 0] * i_wh[:, 1]
# union
ap = (pred[:, 2] - pred[:, 0]) * (pred[:, 3] - pred[:, 1])
ag = (target[:, 2] - target[:, 0]) * (target[:, 3] - target[:, 1])
union = ap + ag - overlap + eps
# IoU
ious = overlap / union
# enclose box each (n, 2)
c_x1y1 = torch.min(pred[:, :2], target[:, :2])
c_x2y2 = torch.max(pred[:, 2:], target[:, 2:])
# diagonal length ^2 of enclose box
c = ((c_x2y2[:, 0] - c_x1y1[:, 0]) ** 2) + ((c_x2y2[:, 1] - c_x1y1[:, 1]) ** 2) + eps
# center distance ^2
d = ((x_p - x_g) ** 2) + ((y_p - y_g) ** 2)
# u = d / c
r = d / c
loss = 1 - ious + r
return loss | 7b63b999cdcde5ec265b5eb9d27b9da1550c0510 | 37,074 |
def penetration(N, R, Es):
"""
Given normal load, sphere radius and contact modulus compute rigid body
penetration.
Parameters
----------
N : float
Normal force.
R : float
Sphere radius.
Es : float
Contact modulus: Es = E/(1-nu**2) with Young's modulus E and Poisson
number nu.
Returns
-------
float
Normal load
"""
return (9 * N ** 2 / (16 * R * Es ** 2)) ** (1 / 3) | 22128a89b7318dd194338709263e5cf3a11af591 | 37,077 |
def padTo(n, seq, default=None):
"""
Pads a sequence out to n elements,
filling in with a default value if it is not long enough.
If the input sequence is longer than n, raises ValueError.
Details, details:
This returns a new list; it does not extend the original sequence.
The new list contains the values of the original sequence, not copies.
"""
if len(seq) > n:
raise ValueError("%d elements is more than %d." % (len(seq), n))
blank = [default] * n
blank[:len(seq)] = list(seq)
return blank | d15030acec1144f94ceb1af8faa41f3d05db5621 | 37,078 |
import os
import re
def get_partition_info(target, header=False):
"""<comment-ja>
指定したファイル/フォルダパスのパーティション情報(dfコマンドの結果)を取得する。
- return
- 0: 'Filesystem' デバイス名
- 1: '1048576-blocks' 最大サイズ
- 2: 'Used' 使用サイズ
- 3: 'Available' ディスク残量
- 4: 'Capacity' 使用率
- 5: 'Mounted' マウント先
値はすべてMB
@param target: 調べるファイル/フォルダパス
@type target: str
@param header: ヘッダ情報を表示するか
@type header: bool
@rtype: dict
</comment-ja>
<comment-en>
English Comment
</comment-en>
"""
if os.path.exists(target) is True:
ret = {}
if header is True:
pipe = os.popen("LANG=C /bin/df -P -m " + target)
try:
data = []
for line in pipe.readlines():
data.append(re.sub(r'[ \t]', ' ', line).split())
finally:
pipe.close()
for i in range(0,6):
ret[i] = (data[0][i], data[1][i])
else:
pipe = os.popen("LANG=C /bin/df -P -m %s | /bin/sed -n 2p" % target)
try:
line = pipe.read()
finally:
pipe.close()
data = re.sub(r'[ \t]', ' ', line).split()
for i in range(0,6):
ret[i] = (data[i],)
return ret
else:
return None | 513f45f05e62d2990e83ec636dde98688acc04a5 | 37,079 |
import re
def sanitize(name, space_allowed=False, replace_with_character="_"):
"""sanitizes string to remove unwanted characters
Args:
name ([type]): name to sanitize
space_allowed (bool, optional): identify if space allowed in sanitized string.\
Defaults to False.
replace_with_character (str, optional): replacement character. Defaults to "_".
Returns:
string: sanitized string
"""
if space_allowed:
sanitized_name = re.sub(
r"([^\sa-zA-Z0-9._-])", replace_with_character, name
)
else:
sanitized_name = re.sub(
r"([^a-zA-Z0-9._-])", replace_with_character, name
)
return sanitized_name | b1f85efab9e96fc1125223895aaa023770f839a7 | 37,080 |
import pathlib
import logging
import sys
def find_file(file_name):
"""Check if a file exists and exit if not."""
if (pathlib.Path(file_name).resolve()):
file_name = str(file_name)
logging.info(f' found {file_name}.')
return file_name
else:
logging.error(f' no file {file_name} found for processing.')
sys.exit() | 37d5298b81e665c551eabf5253b5381ed79f9c68 | 37,081 |
def _site_names(action_graph, node, parent_components):
""" kappa site name from locus or state of the actiongraph """
return ["{}_{}".format(node, parent)
for parent in action_graph[node]
if parent in parent_components] | c5a154f053c08ecaf48d861fd3fb085b02da3002 | 37,082 |
def make_candidate_numbers(candidate_count):
"""Return an iterable of candidate numbers."""
return range(1, candidate_count + 1) | 36bc2cb8f63556e1bf4622f420219be1992b1053 | 37,083 |
import os
def splitdrive(p):
"""Split a pathname into drive name and relative path specifiers.
Returns a 2-tuple (drive, path); either part may be empty.
If you assign
result = splitdrive(p)
It is always true that:
result[0] + result[1] == p
On RISC OS, we se drive to mean the filesystem, special fields and media
descriptior. Eg. Net#0.254::Server., ADFS:, SDFS::RISCOSPi. In order to
satisfy the condtion above, the drive may include a trailing . (marking the
end of disc specification).
"""
p = os.fspath(p)
pos = p.find(':')
if pos == 0: # Starts with a disc specification
pos = p.find('.',0) # No end of disc spec, so everything is the drive
if pos < 0:
return p,''
return p[0:pos+1],p[pos+1:]
if pos > 0: # Starts with a filing system
if pos < len(p)-1 and p[pos+1] == ':': # Found disc spec too
pos = p.find('.',pos)
if pos < 0: # No end of disc spec, so everything is the drive
return p,''
else:
return p[0:pos+1],p[pos+1:]
else: # No disc spec
return p[0:pos+1],p[pos+1:]
return '',p | a79e1cf688841a26f1270076797713905df3a54c | 37,084 |
def _path(source, target, parent, path):
"""
This function finds the path from source to the target
according to the parent dictionary. It must be used for
shortest_path_faster function.
:param source: Float
Id of the start node
:param target: Float
Id of the goal node
:param parent: Dictionary
The value of each key is the parent
node (predecessor node).
:param path: list
The list contains the id of the nodes
of the path from source to the target.
:return: list
The list contains the id of the nodes
of the path from source to the target.
"""
if len(path) == 0:
path.append(target)
if target == source:
pass
elif parent.get(target) is None:
print("Target cannot be reached")
return False
else:
path.append(parent.get(target))
_path(source, parent.get(target), parent, path)
return path[::-1] | 3c44a5ba53a1fa05b19b1da875273bb2d64ca521 | 37,085 |
def good_astrom_func(table):
"""Require the star to have good astrometry."""
return table['ruwe'] < 1.4 | 580dbf5e1095384277bb561a8e54fe3429ad9810 | 37,086 |
import re
def stem(body,stopwords):
"""
这里要定几条规则:
1. 全部转化为小写
2. 去掉所有'\n'
3.
Arguments:
- `body`:将要处理的字符串
"""
body = body.lower()
body = body.strip()
body = body.replace('\n',' ')
body = body.replace('\t',' ')
body = body.replace('<p>',' ')
body = body.replace('<pre>',' ')
body = body.replace('</pre>',' ')
body = body.replace('< pre>',' ')
body = body.replace('<code>',' ')
body = body.replace('</code>',' ')
body = body.replace('</p>',' ')
body = body.replace('<ol>',' ')
body = body.replace('<li>',' ')
body = body.replace('</ol>',' ')
body = body.replace('</li>',' ')
body = body.replace('<a',' ')
body = body.replace('&',' ')
body = body.replace('<',' ')
body = body.replace('>',' ')
body = body.replace('e.g.',' ')
body = body.replace('/',' ')
body = body.replace('=',' ')
body = body.replace(',',' ')
body = body.replace('(',' ')
body = body.replace(')',' ')
body = body.replace('?',' ')
body = body.replace('[',' ')
body = body.replace(']',' ')
body = body.replace('{',' ')
body = body.replace('}',' ')
body = body.replace('~',' ')
body = body.replace(':',' ')
body = body.replace('$',' ')
body = body.replace('"',' ')
body = body.replace(';',' ')
body = body.replace('*',' ')
body = body.replace('+',' ')
body = body.replace('>',' ')
body = body.replace('<',' ')
body = body.replace('%',' ')
body = body.replace('@',' ')
body = body.replace('!',' ')
body = re.sub("\.{1,3}"," ",body)
body = re.sub("\d+|\d.\d+", " ", body)
body = re.sub(" +", " ", body)
body = body.split()
#这两个函数的作用是去掉所有以‘ 开头或者结尾的,我觉得’出现在这里肯定是多余的
g1 = lambda x : x[1:] if x.startswith("'") else x
g2 = lambda x : x[:-1] if x.endswith("'") else x
#将所有的下划线替换成-
g3 = lambda x : '-' if x == '_' else x
g4 = lambda x : x if x not in stopwords else ""
body = map(g1,body)
body = map(g2,body)
body = set(body)
body = filter(g4,body)
body = ' '.join(body)
body = map(g3,body)
body = ''.join(body)
return body | cf4ff036e61461142b23eda961bf05ae30ac8de0 | 37,087 |
def binaryToDecimal(binary):
"""
Helper function to convert binary string (example: '01001') to decimal
Args:
binary: string which to convert to decimal fraction
"""
length = len(binary)
fracDecimal = 0
# Convert fractional part of binary to decimal equivalent
twos = 2
for ii in range(length):
fracDecimal += (ord(binary[ii]) - ord("0")) / twos
twos *= 2.0
# return fractional part
return fracDecimal | b0df8730b4d819a02d14d77c51c343078ded1d93 | 37,088 |
import yaml
def yaml_dump(object, **kwargs):
"""
Give the yaml representation of an object as a unicode string.
:param object: The object to get the unicode YAML representation from.
:returns: A unicode string.
"""
encoding = 'utf-8'
result = yaml.dump(
object,
encoding=encoding,
allow_unicode=True,
**kwargs
)
return result.decode(encoding) | 12c6028d60a262081b430a3cfd2e433f5e2b234f | 37,089 |
import numpy
def calculate_beamsize(semimajor, semiminor):
"""Calculate the beamsize based on the semi major and minor axes"""
return numpy.pi * semimajor * semiminor | ca2b2aebe34b1504c7b82c10b21582287ab7e55c | 37,090 |
def format_file_size(v):
"""Format file size into a human friendly format"""
if abs(v) > 10**12:
return '%.2f TB' % (v / 10**12)
elif abs(v) > 10**9:
return '%.2f GB' % (v / 10**9)
elif abs(v) > 10**6:
return '%.2f MB' % (v / 10**6)
elif abs(v) > 10**3:
return '%.2f kB' % (v / 10**3)
else:
return '%d B' % v | b56c9bd78ceee77cbe2dccb982380dbaf447ee43 | 37,091 |
from typing import List
import random
def generate_random_adjacent_matrix(size: int) -> List[List[int]]:
"""
This function generates a squared adjacent matrix, with different proportions for
1 and 0 vertexes. As it's a square matrix, it means that the numbers of columns are always the
same numbers of the rows.
:param int size: The size of the matrix, for example 4, will generate a 4x4 matrix.
:rtype: List[List[int]]
"""
choices = [0] * 25 + [1] * 75
return [[random.choice(choices) for x in range(size)] for y in range(size)] | 04114888f52cfcfea9a9eff8e7da09f83bf809df | 37,092 |
def _get_labels_from_sample(labels, classes):
"""Translate string labels to int."""
sorted_labels = sorted(list(classes))
return [sorted_labels.index(item) for item in labels] if isinstance(labels, list) else sorted_labels.index(labels) | 031760e1e9e1fd487ebd24928def619e5360179a | 37,093 |
def build_s3_url(filenames, bucket):
"""
convert filenames to AWS S3 URLs
params:
bucket: string, AWS S3 bucket name
filenames: list of strings, AWS S3 filenames
"""
s3_urls = []
for f in filenames:
s3_urls.append('https://{}.s3.amazonaws.com/{}'.format(bucket, f))
return s3_urls | 74dde98752adbaf72a1739add7816120053b019f | 37,094 |
import os
def mock_mode():
"""
Checks whether the ADAM_GATK_MOCK_MODE environment variable is set.
In mock mode, all docker calls other than those to spin up and submit jobs to the spark cluster
are stubbed out and dummy files are used as inputs and outputs.
"""
return True if int(os.environ.get('TOIL_SCRIPTS_MOCK_MODE', '0')) else False | 92a23906b53a75a4df0d26f4bab236bce4ead81d | 37,095 |
from typing import List
import hashlib
def hashes() -> List[str]:
"""
Return a list of available hashing algorithms.
:rtype: list of strings
"""
t = []
if 'md5' in dir(hashlib):
t = ['MD5']
if 'md2' in dir(hashlib):
t += ['MD2']
hashes = ['SHA-' + h[3:] for h in dir(hashlib) if h.startswith('sha')]
return t + hashes | 4077694caa1200e923821515a3ec77e720fe755c | 37,096 |
from math import sqrt, ceil
def grid_shape(i, max_x=4):
"""Return a good grid shape, in x,y, for a number if items i"""
x = round(sqrt(i))
if x > max_x:
x = max_x
y = ceil(i / x)
return x, y | e0f5109dc410f2dae632e002600e1c0094585b29 | 37,097 |
def part_1(puzzle_input):
"""Function which calculates the solution to part 1
Arguments
---------
Returns
-------
"""
nr_list = [int(ii) for ii in puzzle_input.split()]
node_stack = []
ii = 0
tot = 0
while ii < len(nr_list):
if len(node_stack) != 0:
nr_children, nr_meta = node_stack[-1]
if nr_children == 0:
del node_stack[-1]
else:
node_stack[-1] = (nr_children-1, nr_meta)
nr_children, nr_meta = nr_list[ii:ii+2]
ii += 2
else:
nr_children, nr_meta = nr_list[ii:ii+2]
ii += 2
if nr_children == 0:
for jj in range(nr_meta):
tot += nr_list[ii]
ii += 1
else:
node_stack += [(nr_children, nr_meta)]
return tot | 360f443b30995f21fe2096213d831b569084f81e | 37,098 |
def scene_element_slicer(r_i, c_i, scene_element_position_dict, scene_element_dimension, as_slice=False):
"""
Returns the starting and ending row and column indices needed to slice pixels from a scene element.
Parameters
----------
r_i
Scene element row
c_i
Scene element column
scene_element_position_dict
Lookup table
scene_element_dimension
Size of scene element
as_slice
Whether to return as an actual slice, or just the start/end indices
Returns
-------
row, col slices
"""
dx = scene_element_dimension // 2
row, col = scene_element_position_dict[r_i, c_i]
r1, r2 = int(row) - dx, int(row) + dx + 1
c1, c2 = int(col) - dx, int(col) + dx + 1
if as_slice:
return slice(r1, r2), slice(c1, c2)
else:
return r1, r2, c1, c2 | 0dfa1611eb56865507e6be78dd5a31b72c95d7a7 | 37,100 |
import os
def get_dataset_names():
"""
"""
list_shp = []
root = os.path.abspath(os.path.join(os.path.dirname(__file__), 'data'))
for path, subdir, files in os.walk(root):
for file in files:
list_shp.append(file.split('.', maxsplit=1)[0])
list_shp.sort()
return list_shp | 0de55ce1dab2677b7fa6f25eee48addd611c033e | 37,101 |
def parse_input(value):
""" Get intput from the user. No error-checking. """
result = None
try:
result = int(value)
except:
pass
return result | f62683727f37ec0a3ce97fa629a621f2060c759b | 37,103 |
import socket
def host_to_ip(hname):
"""**host_to_ip(hname)** -> return the ip if founded, '' otherwise.
* hname: (string) name of the host to get IP from DNS.
<code>
Example:
host_to_ip('www.google.com')
Returns:
'74.125.206.147'
</code>
"""
try:
ip = socket.gethostbyname(hname)
return ip
except socket.error:
return '' | 5933117332a8c37cacc8b08e73abaff99650d577 | 37,104 |
def psfScale(D, wavelength, pixSize):
"""
Return the PSF scale appropriate for the required pixel size, wavelength and telescope diameter
The aperture is padded by this amount; resultant pix scale is lambda/D/psf_scale, so for instance full frame 256 pix
for 3.5 m at 532 nm is 256*5.32e-7/3.5/3 = 2.67 arcsec for psf_scale = 3
Args:
D (real): telescope diameter in m
wavelength (real): wavelength in Angstrom
pixSize (real): pixel size in arcsec
Returns:
real: psf scale
"""
DInCm = D * 100.0
wavelengthInCm = wavelength * 1e-8
return 206265.0 * wavelengthInCm / (DInCm * pixSize) | cbd98743f8716bfe935daeaa0a11f87daeb873f3 | 37,105 |
def _render_namespace_prefix(namespace):
"""Returns namespace rendered as a prefix, like ::foo::bar::baz."""
return "".join(["::" + n for n in namespace]) | 68e20189cdfd47872b9e9db34451da15b036d280 | 37,107 |
import os
from pathlib import Path
def get_good_jars(dir, prefix=True):
"""
Walk a directory (non-recursively) and return a list of (good) jars.
Some jars included in giraph have classes that are incompatible with yarn
nodemanagers. Filter these out when constructing a list of good
jars. If omitting the entire .jar is too coarse, the jar will need to be
reconstructed with the offending .class removed.
param: str dir: Directory to walk
param: bool prefix: When true, prepend the directory to each jar entry
"""
# Known incompatible jars:
# - hive-exec-0.11.0 protobuf class
# java.lang.VerifyError: ... overrides final method getUnknownFields
bad_jars = ['hive-exec-0.11.0.jar']
good_jars = []
for file in os.listdir(dir):
if file.endswith('.jar') and file not in bad_jars:
good_jars.append(Path(dir / file) if prefix else file)
return good_jars | bae911050b8170af35844024366541caf96e720c | 37,108 |
def twe_parameter_space_getter(X, y):
"""
generate the twe distance measure
:param X: dataset to derive parameter ranges from
:return: distance measure and parameter range dictionary
"""
return {
'penalty' : [0, 0.011111111, 0.022222222, 0.033333333, 0.044444444, 0.055555556, 0.066666667,
0.077777778, 0.088888889, 0.1],
'stiffness': [0.00001, 0.0001, 0.0005, 0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1]
} | dece44a14e92a778f3e5f30df934e161e73308a2 | 37,109 |
from typing import FrozenSet
import dateutil
def get_known_timezones() -> FrozenSet[str]:
"""
Return a cached set of the known timezones.
This actually pulls its list from the internal database inside `dateutil`
as there doesn't seem to be a nice way to pull the data from the system.
These are expected to change sufficiently infrequently that this is ok. In
any case, `dateutil` falls back to using this data source anyway, so at
worst this is a strict subset of the available timezones.
"""
# Get a non-cached copy of the `ZoneInfoFile`, not because we care about
# the cache being out of date, but so that we're not stuck with a MB of
# redundant memory usage.
# ignore types because `dateutil.zoneinfo` isn't present in the typeshed
info = dateutil.zoneinfo.ZoneInfoFile( # type: ignore
dateutil.zoneinfo.getzoneinfofile_stream(), # type: ignore
)
return frozenset(info.zones.keys()) | ad149814bf72ea8dd13eebd9c0affc4eabf90c76 | 37,110 |
def get_mrns_from_database(results):
"""Accepts json request and posts new patient heart rate
to server database.
Method curated by Braden Garrison
json request should contain a dict formatted as follows:
{
"patient_id": int, # Should be patient MRN
"heart_rate_average_since": str # Should be formatted in form:
# "2018-03-09 11:00:36"
}
This method will be used to calculate and return the heart
rate interval average of a specified patient since the given
date/time.
Args:
QuerySet: all patients within database
Returns:
list [int]: list of MRNs within database
"""
MRN_list = []
for item in results:
MRN_list.append(item.MRN)
return MRN_list | ee316763ec64f737127901004996bf8c91077cf6 | 37,111 |
def image_shape(rows, cols):
"""defines image shape from appays of pixel rows and cols in image
"""
#rowsfl = rows.ravel()
#colsfl = cols.ravel()
#nrows = int(rows.max())+1
#ncols = int(cols.max())+1
return int(rows.max())+1, int(cols.max())+1 | a1eb5ca4a89134cd7e4245d15bd16d606609c4e5 | 37,112 |
def _checkNconvertStr(texts):
"""
Checks whether input is a string or if it can be casted into one
:param texts: A string or a type which can be converted into one
:return: Text as a string if successful otherwise nothing (exception)
"""
concatTextLst = []
for text in texts:
# Test texts elements
tempText = str(text)
if type(tempText) is not str:
raise TypeError("Input type must be a string or convertible into a string")
concatTextLst.append(str(text))
return " ".join(concatTextLst) # Preserve whitespaces when using multiple text elements | 430157ecf0f404f77fef497c39d4b0525b1b4f71 | 37,113 |
from typing import List
from typing import Any
def getListDifference(listOne: List[Any], listTwo: List[Any]) -> List[Any]:
""" Return difference (items that are unique just to a one of given lists)
of a given lists.
Note:
Operation does not necessary maintain items order!
Args:
listOne: first list to compare.
listTwo: second list to compare.
"""
difference = list(set(listOne).symmetric_difference(set(listTwo)))
return difference | 5839988761a8f242828c8526d75e2b7ea8aea2f3 | 37,114 |
def validate(model, val_loaders, criterion, device):
"""Validation routine.
Args:
model (nn.Module): Model
val_loaders (list(DataLoaders)): Validation dataloaders, used to save the model
criterion (torch.nn.criterion): Torch Loss Criterion
device (str, optional): . Defaults to 'cpu'.
Returns:
float: validation error
"""
model.eval()
val_er = 0.0
for val_loader in val_loaders:
for (X,y) in val_loader:
X.to(device)
y.to(device)
y_pred = model(X)
loss = criterion(y_pred, y)
val_er += loss.item() * X.size(0)
return val_er | 934685b593b2481dbc8146a23ca6207169f050b4 | 37,116 |
import requests
from bs4 import BeautifulSoup
def get_soup(link):
"""
function that returns soup object of a 8-k link
"""
try:
request = requests.get(link)
soup = BeautifulSoup(request.content, 'html5lib', from_encoding='ascii')
except:
soup = 'na'
return soup | 407334b4dfbfac11eb3034b6b469413a1c5f77bc | 37,117 |
def _get(isamAppliance, id):
"""
Internal function to get data using "id" - used to avoid extra calls
:param isamAppliance:
:param id:
:return:
"""
return isamAppliance.invoke_get("Retrieve a specific mapping rule",
"/iam/access/v8/mapping-rules/{0}".format(id)) | fbaccf5cf8a0b1b8f4be12e07d0ac2bf7dd20463 | 37,118 |
from typing import Iterable
def dedupe(iterable: Iterable) -> Iterable:
"""
Removes duplicates.
In python 3.6+, this algorithm preserves order of the underlying
iterable.
"""
return iter(dict.fromkeys(iterable)) | 67c46849b279a1423f81aaa64e0e2d80c4900aa5 | 37,119 |
def _latex_unit(unit):
""" Hack to get a single line latex representation of a unit
Will be obsolete with format='latex_inline' in astropy 0.4.1
"""
l = unit.to_string('cds').split('.')
out = ''
for uni in l:
try:
int(uni[-1])
if uni[-2] == '-':
out += ' {0}$^{{{1}}}$'.format(uni[:-2], uni[-2:])
else:
out += ' {0}$^{1}$'.format(uni[:-1], uni[-1:])
except ValueError:
out += ' ' + uni
return out[1:] | d9ac04dabf2a217d97e3d3767201c8fa1d9d528c | 37,120 |
def boto_all(func, *args, **kwargs):
"""
Iterate through all boto next_token's
"""
resp = {}
ret = []
while True:
resp = func(*args, **kwargs)
for val in resp.values():
if type(val) is list:
ret.extend(val)
if not resp.get('NextToken', None):
break
kwargs['NextToken'] = ret[-1].NextToken
return ret | ff56ff88c5e6c130ed79043a270ad0b3837ca198 | 37,123 |
import time
def create_export_file(converted_posts):
"""
Create a Ghost import json from a list of Ghost post documents.
:param converted_posts: Ghost formatted python docs.
:return: A Dict representation of a ghost export file you can dump to json.
"""
return {
"db": [
{
"meta": {
"exported_on": int(time.time()),
"version": "2.18.3"
},
"data": {
"posts": converted_posts
}
}
]
} | e0ee20a9b61ea6e86e3efb96fbe95085a2a9b2d0 | 37,124 |
def get_prots(docked_prot_file):
"""
gets list of all protein, target ligands, and starting ligands in the index file
:param docked_prot_file: (string) file listing proteins to process
:return: process (list) list of all protein, target ligands, and starting ligands to process
"""
process = []
with open(docked_prot_file) as fp:
for line in fp:
if line[0] == '#': continue
protein, target, start = line.strip().split()
process.append((protein, target, start))
return process | 17b31a045004d19f5b79345cb53eded38df1a8e5 | 37,126 |
from pathlib import Path
def relative_to(path: Path, root: Path) -> Path:
"""Returns a path to `path` relative to `root`
Postcondition: (root / relative_to(path, root)).resolve() = path.resolve()
"""
while True:
try:
return path.relative_to(root)
except ValueError:
return ".." / relative_to(path, (root / "..").resolve()) | 5a1091ba87d45917302778b4cee8b30a6d9f6bf9 | 37,127 |
import os
def partfile(path, raw=False):
"""Split directory into directory, basename and/or extension """
dirpath, filename = os.path.split(path)
if not raw:
# default, extract extensions
basename, ext = os.path.splitext(filename)
else:
# raw, don't extract extension
basename, ext = filename, ""
return (dirpath, basename, ext) | 56a42794bb709516f533314d88032fef80adfacc | 37,128 |
def read_contents_source(contents_path):
"""Reads all source file name entries (including ">"-depth-prefixes) from
the help-contents file."""
f = open(contents_path)
lines = [line.strip() for line in f.readlines()]
return [line for line in lines if not len(line) == 0] | 22bc1418fb67c046b2e57ca3ba124bc1b19ae4c0 | 37,129 |
def shunt(infix):
"""
Create a postfix regular expression from it's infix."
Parameters:
arg1 (String): Infix regular expression.
Returns:
String: The postfix equivalent.
"""
# Convert input to a stack like list
infix = list(infix)[::-1]
# Operator stack
opers = []
# Output list
postfix = []
# Operator precedence
prec = {'*': 100, '.': 90, '+': 80, '?': 70, '|': 60, ')': 40, '(': 20}
# Loop through the input one character at a time
while infix:
# pop a character from the input
c = infix.pop()
# Decide precedence
if c == '(':
opers.append(c)
elif c == ')':
# pop the opers stack until you find the )
while opers[-1] != '(':
postfix.append(opers.pop())
# Get rid of the '('
opers.pop()
elif c in prec:
# push any ops on the opers stack with higher prec to the output
while opers and prec[c] < prec[opers[-1]]:
postfix.append(opers.pop())
# Push c to the opers stack
opers.append(c)
else:
# push the character to the output
postfix.append(c)
# Pop all operators to the output
while opers:
postfix.append(opers.pop())
# convert output list to string
postfix = ''.join(postfix)
return postfix | 274d61148a9cd5be861d43ba86300d60af13583b | 37,131 |
def normalize_list(val, lst):
"""Normalize a unitary list"""
if len(lst) != 1:
raise RuntimeError("Cannot Normalize a non-unitary list")
return lst * val | fcee5a96b856e4a8ea4d570735f6269ae8c3823e | 37,133 |
def strip_quotes(arg):
""" Strip outer quotes from a string.
Applies to both single and double quotes.
:param arg: str - string to strip outer quotes from
:return str - same string with potentially outer quotes stripped
"""
quote_chars = '"' + "'"
if len(arg) > 1 and arg[0] == arg[-1] and arg[0] in quote_chars:
arg = arg[1:-1]
return arg | 97c8cefe2d0de2b3cecb8b6ed94a40ce4be89b94 | 37,134 |
import math
def BytesPerRowBMP(width, bpp):
"""Computes the number of bytes per row in a Windows BMP image."""
# width * bpp / 8, rounded up to the nearest multiple of 4.
return int(math.ceil(width * bpp / 32.0)) * 4 | 72f0df2951a27905bbf5b612d3f328696b6434f4 | 37,135 |
def is_completed(grid):
"""
Checks if a grid is completed.
Grids are completed when all cells in them contain non-zero values.
Arguments:
grid {number matrix} -- The matrix to check for unique values on rows and columns
Returns:
bool -- True if all numbers are unique on their respective rows and columns, otherwise, False
"""
return not any(0 in row for row in grid) | f81d788f2b50048a8e98c4819804c850c59ebba7 | 37,136 |
def str_to_bool(str_):
"""Convert string to bool."""
if str_ == "True":
return True
elif str_ == "False":
return False
else:
raise TypeError(str_) | 929a2fe459e43751a90f1bd918ef6447eb2b68c2 | 37,137 |
def generate_name(route, deployment):
"""
Generate the name for a route in a given deployment
:param route: the id of the route
:param deployment: the id of the route
:return: the unique name for the route
"""
return f"{route}_{deployment}" | 4f957cea79e5dc05af7cef5a490a56c2690c09ed | 37,138 |
def get_or_prompt(config, key, prompt_fn, *args, **kwargs):
"""
:param config: The configuration object to get the value from
:param key: The configuration key to retrieve
:type key: str
:param prompt_fn: The prompt function to use to prompt the value
:param args: Extra arguments for the prompt function
:param kwargs: Extra keyword arguments for hte prompt function
"""
value = config.get(key)
if value is None:
value = prompt_fn(*args, **kwargs)
config.set(key, value)
return value | 4c84b90d1e448d4e547984a9347ef143207ac2f8 | 37,139 |
def Binary_Search(Arr, find):
"""
type Arr: list[int]
type find: int
"""
left = 0
right = len(Arr)-1
while(left <= right):
mid = (left + right)//2
if( Arr[mid] == find):
return mid
elif(Arr[mid] < find):
left = mid + 1
else:
right = mid - 1
return -1 | ec9ead017c78fa0f573c12d601b1128674bc5321 | 37,140 |
import glob
import os
def _get_fastq_files(ldetail, read, fastq_dir):
"""Retrieve fastq files corresponding to the sample and read number.
"""
return glob.glob(os.path.join(fastq_dir, "Project_%s" % ldetail["project_name"],
"Sample_%s" % ldetail["name"],
"%s_*_%s_*.fastq.gz" % (ldetail["name"], read))) | 5845c2a96a6057b8fcc78e282a8aabd8e9bd800a | 37,145 |
from numpy import zeros
def pixelate_xy(x,y,pixel_length = 10, dtype = None, saturation_value = None):
"""
returns pixelated x,y-data. The length of x and y has to be divisable by pixel_size.
"""
x_len = x.shape[0]
y_len = y.shape[0]
if x_len != y_len:
raise ValueError('The shape of x and y input vectors has to be the same')
if (x_len%pixel_length)!=0 or (y_len%pixel_length)!=0:
raise ValueError('The length of the input data arrays has to be divisable by pixel_length')
if dtype is None:
x_dtype = x.dtype
y_dtype = y.dtype
else:
x_dtype = dtype
y_dtype = dtype
x_new_len = int(x_len/pixel_length)
y_new_len = int(y_len/pixel_length)
x_new = zeros((x_new_len,), dtype = x_dtype)
y_new = zeros((y_new_len,), dtype = y_dtype)
for i in range(int(x_new_len)):
x_new[i] = x[i*pixel_length:(i+1)*pixel_length].mean()
y_new[i] = y[i*pixel_length:(i+1)*pixel_length].mean()
if saturation_value is not None:
y_new[y_new>=saturation_value] = saturation_value
return x_new,y_new | 633b285ffa25e9146a8becb8a3a64497e7c27159 | 37,146 |
def limit(val: int) -> int:
"""
= min(max(val, 0), 255)
"""
tmp = 255 if val>255 else val
return 0 if tmp<0 else tmp | d69f236f4635d1a91a253dfe614e8a786c08753d | 37,147 |
import json
def get_message_content_messy_data(parsedData):
"""
spaCy is trained to attempt to handle messy data, including emoticons and other web-based features
:param parsedData: data after being parsed by the en parser
:return: messy_data
"""
messy_data = {}
# for token in parsedData:
for i, token in enumerate(parsedData):
messy_data[i] = token.orth_ + " " + token.pos_ + " " + token.lemma_
json_data = json.dumps(messy_data)
return json_data | adeab9070bd58f3cb4ab2a6511e08672cd2915ac | 37,148 |
import itertools
def normalize_padding(padding, rank):
"""
normalized format of padding should have length equal to rank+2
And the order should follow the order of dimension
ex. Conv2d (rank=2) it's normalized format length:2+2 ==>(left, right,top bottom)
Args:
padding (None, int, tuple):
rank (int):
Returns:
the normalized format of padding
Examples:
>>> normalize_padding(((1,0),(1,0)),2)
(1, 0, 1, 0)
>>> normalize_padding((1,0),2)
(0, 0, 1, 1)
"""
if padding is None:
padding = (0,) * (2 * rank)
elif isinstance(padding, int):
padding = (padding,) * (2 * rank)
elif isinstance(padding, (list, tuple)) and len(padding) == 1:
padding = padding * (2 * rank)
elif isinstance(padding, (list, tuple)) and len(padding) == rank and isinstance(padding[0], int):
# rank=2 (1,1)=>(1,1,1,1) (1,0)=>(0,0,1,1)
reversed_padding = list(padding)
reversed_padding.reverse()
return_padding = []
for i in range(rank):
return_padding.append(reversed_padding[i])
return_padding.append(reversed_padding[i])
padding = tuple(return_padding)
elif isinstance(padding, (list, tuple)) and len(padding) == rank and isinstance(padding[0], (list, tuple)):
# rank=2 ((1,0),(1,0)=>(1,0,1,0)
padding = tuple(list(itertools.chain(*list(padding))))
elif isinstance(padding, (list, tuple)) and len(padding) == 2 * rank and isinstance(padding[0], int):
padding = padding
return padding | 6e67d1a7ff408c013fe7fc122a0b0320025e2373 | 37,154 |
import math
def angle_changing(acc: tuple, alpha: float, beta: float, theta: float) -> tuple:
"""
Adjust acc by euler angles (radians).
:param acc: acc_x, acc_y, acc_z
:param alpha:
:param beta:
:param theta:
:return: adjusted acc
"""
# theta -> beta -> alpha
x, y, z = acc
z1 = z
x1 = x * math.cos(theta) - y * math.sin(theta)
y1 = x * math.sin(theta) - y * math.cos(theta)
z2 = z1 * math.cos(beta) - x1 * math.sin(beta)
x2 = x1 * math.cos(beta) + z1 * math.sin(beta)
y2 = y1
z3 = z2 * math.cos(alpha) + y2 * math.sin(alpha)
y3 = y2 * math.cos(alpha) - z2 * math.sin(alpha)
x3 = x2
y3 = -y3
return x3, y3, z3 | 1aca20e7ef326a55d1ed9574eacc65c8b80a1d16 | 37,155 |
import string
import secrets
def get_alphanumeric_unique_tag(tag_length: int) -> str:
"""Generates a random alphanumeric string (a-z0-9) of a specified length"""
if tag_length < 1:
raise ValueError("Unique tag length should be 1 or greater.")
use_chars = string.ascii_lowercase + string.digits
short_id = "".join([secrets.choice(use_chars) for i in range(tag_length)])
return short_id | 098c9888790faf6b771062e13a31f5e8f6eba4e0 | 37,156 |
from typing import Any
from typing import TypeGuard
import numbers
def is_complex(x: Any) -> TypeGuard[complex]:
"""Return true if x is complex."""
return isinstance(x, numbers.Complex) | fc1c98d9d8074ce5ffc1729a44d4daf463052e69 | 37,157 |
def is_obj(x):
"""
A quick (but maybe not perfect) check for object types
Returns:
bool: True if the object has __dict__ attribute, otherwise False
"""
try:
getattr(x, '__dict__')
return True
except:
return False | 324dccfd2d2c436940e5347055ec1b5aa5f021c0 | 37,158 |
import os
def getFileList(dir):
"""
Find all files in dir and its sub-dirs.
"""
if not os.path.exists(dir):
os.makedirs(dir)
fileList = []
newdir = dir
if os.path.isfile(dir):
fileList.append(dir)
elif os.path.isdir(dir):
for item in os.listdir(dir):
newdir = os.path.join(dir, item)
filelist = getFileList(newdir)
for it in filelist:
fileList.append(it)
return fileList | 8c31c2a1ae50c7e24cedf920a618f0cdc8371336 | 37,160 |
from typing import Any
from typing import Callable
def verify_operands_classes_and_forward_to_callable(
x_operand: Any,
y_operand: Any,
operand_class: type,
function: Callable[[Any, Any], Any],
) -> Any:
"""
verify_operands_classes_and_forward_to_callable verifies both operands are
of type operand_class before calling function.
:param x_operand: First operand.
:type x_operand: Any
:param y_operand: Second operand.
:type y_operand: Any
:param operand_class: type to compare against.
:type operand_class: type
:param function: callable to call.
:type function: Callable[[Any, Any], Any]
:raises TypeError: if either operand is not of operand_class.
:return: The return value of the callable, in any.
:rtype: Any
"""
if isinstance(x_operand, operand_class) and isinstance(y_operand, operand_class):
return function(x_operand, y_operand)
raise TypeError("x_operand and y_operand are not both ", operand_class) | 8553b0ae796641226724cc9b0557b59dfe7a8fbf | 37,161 |
def wrap_middleware(following, func):
"""Helper function to wrap middleware, adapts middleware
contract to::
def handler(request):
return response
``following`` - next middleware in the chain.
``func`` - middleware callable.
"""
return lambda request: func(request, following) | 9fcf519c07c36e4f0b0a12064df73fab55ccf475 | 37,162 |
def get_ideal_class_size(df, label):
"""For binary classes.
Work out what the balanced
class size would be"""
cl0 = df[label].value_counts()[0]
cl1 = df[label].value_counts()[1]
diff = abs(cl0 - cl1)/2
ideal = min(cl0, cl1) + diff
return ideal | 018a3eab2fdf9c66bb3ff5af55922ddad86fa405 | 37,164 |
from typing import List
def staircase(size: int) -> List[str]:
"""
>>> staircase(4)
[' #', ' ##', ' ###', '####']
"""
# ret = [f"{'#' * i:>{size}}" for i in range(1, size+1)]
ret = [('#' * i).rjust(size) for i in range(1, size+1)]
return ret | e75508019be90223a49aa5161876e34018b387e5 | 37,165 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.