content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
import argparse
def inputHandling():
"""Parsing the arguments.
Returns:
argparse.ArgumentParser: Argument parser from argparse.
"""
parser = argparse.ArgumentParser(description='Gets the SV consensus.')
parser.add_argument('-f', '--sv_folder', help='Folder containing folders of samples with raw outputs from SV callers (comma-separated). More information on the structure of the samples folder in readme.', required=True)
parser.add_argument('-mod', '--model', help='Model used for SV discovery (default pretrained.model).', required=False, default="pretrained.model")
parser.add_argument('-o', '--output', help='Output file prefix.', default="consensuSV_")
parser.add_argument('-of', '--output_folder', help='Output folder. Default in the ConsensuSV folder output/', default="output/")
parser.add_argument('-s', '--samples', help='Samples to include. By default all in the sv_folder. Comma-separated.', required=False, default=None)
parser.add_argument('-c', '--callers', help='Callers to include. By default all in the folders. Comma-separated.', required=False, default=None)
parser.add_argument('-m', '--min_overlap', help='File with minimum numbers of SVs in the neighbourhood for the SV to be reported (default min_overlaps).', default="min_overlaps")
parser.add_argument('-t', '--train', help='Creates new model. Requires truth.vcf to be present in all the sv folders. VCF file truth.vcf is preprocessed even if flag --no_preprocess is set. If you train the model, you need to rerun the program to get the consensus.', action="store_true", required=False)
parser.add_argument('-np', '--no_preprocess', help='Flag used for skipping the preprocessing process - all the preprocessed files should be in temp/ folder.', action="store_true", required=False)
args = parser.parse_args()
return args | 0e6dc285fdaf43aed8d8ed32cabfd8f5c42b7a7c | 47,924 |
def _make_recur(obj, cls, make_fn, **options):
"""
:param obj: An original mapping object
:param cls: Another mapping class to make/convert to
:param make_fn: Function to make/convert to
"""
return cls((k, None if v is None else make_fn(v, **options))
for k, v in obj.items()) | 87975c5e8128e6ff32c692bf7df532439f087fd8 | 47,925 |
def minmax(dmax_cup):
"""
Find min and max position of the all shots
Parameters
----------
dmax_cup: list of tuples
each tuples has shot parameters
returns: tuple of floats
shot Y min, Y max, Z min, Z max
"""
ymin = 10000.0
ymax = -10000.0
zmin = 10000.0
zmax = -10000.0
for shinfo in dmax_cup:
shot_y = shinfo[3]
shot_z = shinfo[4]
if ymin > shot_y:
ymin = shot_y
if ymax < shot_y:
ymax = shot_y
if zmin > shot_z:
zmin = shot_z
if zmax < shot_z:
zmax = shot_z
return (ymin, ymax, zmin, zmax) | fb337d87db03edcf2c95ac8690f0fce5561a5210 | 47,926 |
def create_json_from_stories(stories_and_location):
""" Convert the preprocessed stories into a list of dictionaries. This will help us with making
the 3d visualizations """
stories = []
for story, location, link, geo_coordinates, category, img_url, summary in stories_and_location:
story_dict = {}
story_dict["title"] = story
story_dict["locations"] = location
story_dict["link"] = link
story_dict["geo_coordinates"] = geo_coordinates
story_dict["category"] = category
story_dict["img_url"] = img_url
story_dict["summary"] = summary
stories.append(story_dict)
return stories | 3603d09d0f55c5abb275ae84498e9abee8d39fe1 | 47,927 |
def _getValueFormat(f, values, i):
"""Helper for buildPairPos{Glyphs|Classes}Subtable."""
if f is not None:
return f
mask = 0
for value in values:
if value is not None and value[i] is not None:
mask |= value[i].getFormat()
return mask | 1f6ea4a3ac3651775d93c9908029f40ab09e47ad | 47,928 |
import traceback
def log_exc_request_method(request, **kwargs):
"""
Method on requests to log exceptions.
"""
# It's unclear to me why this should be a request method and not just define.log_exc().
return request.environ.get('raven.captureException', lambda **kw: traceback.print_exc())(**kwargs) | 39b6e9e921cacf43e708c1b47e7746bebbf17a4f | 47,929 |
def rest_status(status):
"""
To give the response as per status code
Author: Ramprakash Reddy (ramprakash-reddy.kanala@broadcom.com)
:param status:
:return:
"""
if status in [200, 201, 204]:
return True
elif status in [400, 401, 403, 404, 405, 409, 415, 500]:
return False | e69538b407d58d7454d0e7556a6ad84cf2a999e4 | 47,930 |
from typing import Set
import inspect
def get_init_properties(cls, to_class=object) -> Set[str]:
"""Given a class, determine the properties that class needs.
Assumes that each sub-class will call super with **kwargs. (Which is not a
good general assumption, but should work well enough for Handlers.)
cls is the class to check, to_class is the final parent class to check.
Returns a set of all parameters found.
"""
result = set()
init = getattr(cls, '__init__', None)
if init is not None:
for param in inspect.signature(init).parameters.values():
if param.kind == param.VAR_KEYWORD:
# Ignore any **kwargs
continue
if param.name == 'self':
continue
result.add(param.name)
if issubclass(cls.mro()[1], to_class):
result |= get_init_properties(cls.mro()[1], to_class)
return result | 5fa747fd98d93885292cd71676435e30b7a9ec91 | 47,931 |
def ret_params(best_params_, rg=None):
"""
:param best_params_:
:param cl:
:return:
"""
if rg == 'rf_':
params = {'bootstrap': True,
'max_depth': int(best_params_['max_depth']),
'n_estimators': int(best_params_['n_estimators']),
'max_samples': float(best_params_['max_samples']),
'min_samples_split': int(best_params_['min_samples_split']),
'n_jobs': 24}
else:
print('Params error in utils/ret_params')
params = None
return params | 47f2f722b8c65a09d55854e79052a3e391be9d10 | 47,932 |
def read_file(path):
"""Reads file contents at path."""
with open(path, 'rt') as f:
return f.read() | 6379c396e2c9a3786533dd02c004e5fd046b6b51 | 47,933 |
def hms2stringTime(h, m, s, precision=5):
""" Convert a sexagesimal time to a formatted string.
Parameters
----------
hour, min, sec : int, float
precision : int
Returns
-------
String formatted HH:MM:SS.SSSSS
"""
if h < 0 or m < 0 or s < 0:
pm = '-'
else:
pm = '+'
formString = '%s%02d:%02d:%0' + str(precision + 3) + '.' + str(precision) + 'f'
return formString % (pm, abs(h), abs(m), abs(s)) | 0b3959ec2ac31e248ccb1c28b1e2cde03483ae26 | 47,934 |
def str_to_bin(s: str) -> str:
"""
'Hello' => '01001000 01100101 01101100 01101100 01101111'
"""
b = s.encode()
str_bytes = ["{:08b}".format(n) for n in b]
return " ".join(str_bytes) | 8ddcc70784aad1eafda08bed43abeb12e64243dc | 47,936 |
from datetime import datetime
def add_data(dict_list):
""" Receives a list of dictionaries and add time of crawling related data. Then returns them back
@param
dict_list: (list) a list of dictionaries. Each contains an IP field
@return
dict_list: (list) a list of enriched data dictionaries
"""
for dict_entry in dict_list:
datetime_utc_cti_string = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')
datetime_utc_cti = datetime.strptime(datetime_utc_cti_string, '%Y-%m-%d %H:%M:%S')
timestamp_utc_cti = datetime_utc_cti.timestamp()
dict_entry["Category"] = "WebBasedAttacks"
dict_entry["Entity-Type"] = "IP"
dict_entry["TimestampUTC-CTI"] = timestamp_utc_cti
dict_entry["mongoDate-CTI"] = datetime_utc_cti
dict_entry["DatetimeUTC-CTI"] = datetime_utc_cti_string
return dict_list | 948381f350a36bfb03ab1b80de807c692a140d3f | 47,937 |
def display_name_to_class(value):
""" Converts an aggregation display name to a string that is usable as a CSS class name """
return value.replace(" ", "_").lower() | cf09338ccb686c15132cc8b4f97f3c897dacb575 | 47,938 |
def get_lowest_price(price_dict):
"""Get Lowest Price.
Get the lowest price for a given premium good from all of the lowest prices of that tier's premium good.
Args:
price_dict (dict): See usage in code for format of dict.
Returns:
tuple: The item id, the lowest price, and the city with that lowest price.
"""
lowest_price = 999999
lowest_city_from = None
item = None
for i in range(len(price_dict)):
if price_dict[list(price_dict.keys())[i]][0] < lowest_price and price_dict[list(price_dict.keys())[i]][0] > 10:
lowest_price = price_dict[list(price_dict.keys())[i]][0]
lowest_city_from = price_dict[list(price_dict.keys())[i]][1]
item = list(price_dict.keys())[i]
return (item, lowest_price, lowest_city_from) | 563081621d4e74c25b4d25a47be621f34dc83a84 | 47,940 |
def end_step():
"""Generates the HTML for the end of a step, including breadcrumbs"""
return " </div><!--/.interactive-block-ui-->\n </div><!--/.interactive-block-inner-->\n</div><!--/.interactive-block-->" | b21aa9230f085b13a24b6f070c78ed05f2c83423 | 47,942 |
def _get_attachment_keys(zot, coll_id) -> list:
"""Retrieves attachment keys of attachments in provided collection.
Args:
zot (zotero.Zotero): Zotero instance
coll_id (str): Collection ID.
Returns:
list: List of attachment keys.
"""
attach = zot.everything(
zot.collection_items(coll_id, itemType='attachment'))
if len(attach) == 0:
raise KeyError(
'No attachments exist in this collection')
else:
attach_keys = list(set([x['key'] for x in attach]))
return attach_keys | e64c2ba21c5b98a3a1bfd7d7b82d9512a9da0a77 | 47,943 |
import sys
def clean_dict(dict):
"""Remove all keys with Nones as values
>>> clean_dict({'key': None})
{}
>>> clean_dict({'empty_s': ''})
{'empty_s': ''}
"""
if sys.version_info[0] < 3:
return {k: v for k, v in dict.iteritems() if v is not None}
else:
return {k: v for k, v in dict.items() if v is not None} | d6ded15910d78fe0ad1668a2ea6a1011c3deaa56 | 47,944 |
def maxSubArrayLen(nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
if len(nums) == 0:
return 0
max_len = 0
running_sum = {0: -1}
cur_sum = 0
for num_index in range(len(nums)):
num = nums[num_index]
cur_sum += num
if (cur_sum - k) in running_sum:
length = num_index - running_sum[cur_sum - k]
if length > max_len:
max_len = length
if cur_sum not in running_sum:
running_sum[cur_sum] = num_index
print(running_sum)
return max_len
"".index() | 9057e041d442950cb3b68b1f9de2e727bf87dba7 | 47,945 |
def capitalize(s: str) -> str:
""" Capitalizes the first character while keeping the rest the same case.
Replaces Jinja's default filter. """
return s[0].upper() + s[1:] | 635255030c2947e3814cb35642836f9b4cee9ad9 | 47,946 |
def instruction(request):
"""
Parametrized fixture which enables to run a test once for each instruction in params
"""
return request.param | c7435938f71a3fe6436a2bffaa2f25162c2481c7 | 47,947 |
def decrypt(datos_cifrados, clave_secundaria, clave_local):
"""
Se usa para descodificar los datos.
--------------------------------------------------------
Input = datos_cifrados(lista), clave_secundaria(string),
clave_local(lista)
========================================================
Output = texto(string)
---------------------------------------------------------
"""
if type(datos_cifrados) != type([]) or \
type(clave_secundaria) != type("") or \
type(clave_local) != type([]):
return -1
texto = [[],[]]
factor_exponencial = int(clave_secundaria.split("=")[2])
for i in range(len(datos_cifrados)):
datos_cifrados[i] = datos_cifrados[i] / factor_exponencial
texto[0].append(chr(int(datos_cifrados[i] + (clave_local[i]))))
texto[1].append(chr(int(datos_cifrados[i])))
texto = "".join(texto[0]) + "".join(texto[1])
return texto | e5faa494570c46ee37665f4cd5db84ae06093ba2 | 47,948 |
def uncolorize(msg):
"""
Strip ANSI color codes from a string.
"""
code = '\033[1;'
if msg.find(code) >= 0:
msg = msg.replace(code, '')[3:-1]
return msg | 7413f25fb372759f12f7f767e94f21de84a92e0f | 47,949 |
def set_transform_metric(method, metric):
"""Sets transformation metric for registration method
Args:
method (Sitk.ImageRegistrationMethod): registration computation method (sitk)
metric (str): cc, ants, mi or msq
Returns:
(Sitk.ImageRegistrationMethod): registration computation method (sitk)
"""
if metric == "cc":
method.SetMetricAsCorrelation()
elif metric == "ants":
method.SetMetricAsANTSNeighborhoodCorrelation(2)
elif metric == "mi":
method.SetMetricAsJointHistogramMutualInformation()
elif metric == "msq":
method.SetMetricAsMeanSquares()
return method | 275a369ed22656fc6a7ef3bc7beabd1673cd9aab | 47,950 |
def lastIndexOf(s, c):
"""
lastIndexOf(stringa, carattere) restituisce un intero
nell'intervallo 0 - len(stringa) che corrisponde all'indice dell'ultima
occorrenza di carattere in stringa. Se il risultato è esattamente
len(stringa) allora carattere non è presente in stringa.
"""
index = s[::-1].find(c)
if index == -1:
return len(s)
else:
return len(s) - 1 - index | 7c5027037c893c6ce843d0b39fcf88fc54e1c75f | 47,951 |
def lammps_cell_text(structure):
""" Write cell from structure object."""
cell_text = f"""
0.0 {structure.cell[0, 0]} xlo xhi
0.0 {structure.cell[1, 1]} ylo yhi
0.0 {structure.cell[2, 2]} zlo zhi
{structure.cell[1, 0]} {structure.cell[2, 0]} {structure.cell[2, 1]} xy xz yz
"""
return cell_text | ca33b23152af518759bd6b3d0a9331feb29ed21e | 47,954 |
def drop_while(pred):
"""Drop so long as pred is true -- then yield values even if pred becomes
true again (triggered once)."""
def generator(coll):
trigger = False
for item in coll:
if trigger:
yield item
elif not pred(item):
trigger = True
yield item
return generator | 8825b50816d4e484fc2f80a5970a1696222c596c | 47,955 |
def create_catalog_stub():
"""
Function that creates a json stub of the form: {'HashAlgorithm': 'SHA256', 'CatalogItems': {}}.
"""
json_stub = {}
json_stub["HashAlgorithm"] = "SHA256"
json_stub["CatalogItems"] = {}
return json_stub | f5a0838707743210c0a5307553165693e1876d15 | 47,956 |
def f2c(fahrenheit):
"""
Covert Fahrenheit to Celsius
:param fahrenheit: [float] Degrees Fahrenheit
:return: [float] Degrees Celsius
"""
return 5 / 9 * (fahrenheit - 32) | 0e03daebcc6286b4d1b92f3259b6b908e49c8bfd | 47,957 |
import subprocess
def subprocess_execute(args: list):
"""
execute args via subprocess
"""
return subprocess.call(args, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) | 02b13ccc26c3d87f5c69c0ccab319b47c4d9c58a | 47,958 |
import ast
def process_alias(ast_alias: ast.alias) -> str:
"""
Process an ast alias to return its alias or its name, if alias is not present.
:param ast_alias: An ast alias object
:return: A string with the resolved name
"""
if ast_alias.asname is None:
return ast_alias.name
else:
return ast_alias.asname | db33f839151c4825c1591e80e6058ffbf1b90441 | 47,960 |
def decode_http_header(raw):
"""
Decode a raw HTTP header into a unicode string. RFC 2616 specifies that
they should be latin1-encoded (a.k.a. iso-8859-1). If the passed-in value
is None, return an empty unicode string.
:param raw:
Raw HTTP header string.
:type raw:
string (non-unicode)
:returns:
Decoded HTTP header.
:rtype:
unicode string
"""
if raw:
return raw.decode('iso-8859-1', 'replace')
else:
return u'' | fdb322e1fc6b8d9ff5cfdc02001015e2ebeaf606 | 47,961 |
import torch
def restore_checkpoint(filename, model=None, optimizer=None):
"""restores checkpoint state from filename and load in model and optimizer if provided"""
print(f"Extracting state from {filename}")
state = torch.load(filename)
if model:
print(f"Loading model state_dict from state found in {filename}")
model.load_state_dict(state["model"])
if optimizer:
print(f"Loading optimizer state_dict from state found in {filename}")
optimizer.load_state_dict(state["optimizer"])
return state | 83e73f34cf28dc85b3abc26f42589df64b54fc9a | 47,963 |
def _is_rgb(img):
""" img (ndarray) """
return True if len(img.shape) == 3 and img.shape[2] == 3 else False | 9dfcec1b186b01d3cd859a4c59fb6b6068d71a68 | 47,965 |
import random
def QueueSelector(d, parallelism, counters):
"""The actual queue selection logic."""
if d != parallelism: # Separation necessary to reproduce SimPy base results (for same seed).
return random.sample(range(len(counters)), d)
else:
return range(parallelism) | 2681ef10d59fb7fafac443e85af4bdcc57890e86 | 47,966 |
def testa_nova_posicao(labirinto, caminhos_ja_passado, i_atual, j_atual):
"""
Retorna qual caminho seguir com base nas paredes do
labirinto (elementos com '#') e também com base em
um caminho que ele já passou
"""
if labirinto[i_atual+1][j_atual] != "#":
i_novo, j_novo = i_atual+1, j_atual
caminhos_ja_passado.append([i_novo, j_novo])
elif labirinto[i_atual][j_atual+1] != "#":
i_novo, j_novo = i_atual, j_atual+1
caminhos_ja_passado.append([i_novo, j_novo])
elif labirinto[i_atual][j_atual-1] != "#":
i_novo, j_novo = i_atual, j_atual-1
caminhos_ja_passado.append([i_novo, j_novo])
elif labirinto[i_atual-1][j_atual] != "#":
i_novo, j_novo = i_atual-1, j_atual
caminhos_ja_passado.append([i_novo, j_novo])
else:
caminhos_ja_passado.remove([i_atual, j_atual])
ultimo_caminho = caminhos_ja_passado[len(caminhos_ja_passado) - 1]
i_novo = ultimo_caminho[0]
j_novo = ultimo_caminho[1]
labirinto[i_novo][j_novo] = ' '
labirinto[i_atual][j_atual] = "#"
return i_novo, j_novo | f5f7413197a07829df8fd0a357a4883bedba3d24 | 47,967 |
def getDetailsName(viewName):
"""
Return Details sheet name formatted with given view.
"""
return 'Details ({})'.format(viewName) | 574c198a24a5ad1d4b6005af06a57999298a1e7f | 47,969 |
import os
def create_image_log_dict(image_log):
"""
Return a dictionary with frame numbers as key and (offset, size, is_camera_bottom) tuples of image data as values.
"""
# parse image log
width = 640
height = 480
bytes_per_pixel = 2
image_data_size = width * height * bytes_per_pixel
file_size = os.path.getsize(image_log)
images_dict = dict()
with open(image_log, 'rb') as f:
is_camera_bottom = False # assumes the first image is a top image
while True:
frame = f.read(4)
if len(frame) != 4:
break
frame_number = int.from_bytes(frame, byteorder='little')
offset = f.tell()
f.seek(offset + image_data_size)
# handle the case of incomplete image at the end of the logfile
if f.tell() >= file_size:
print("Info: frame {} in {} incomplete, missing {} bytes. Stop."
.format(frame_number, image_log, f.tell() + 1 - file_size))
print("Info: Last frame seems to be incomplete.")
break
images_dict[frame_number] = (offset, image_data_size, is_camera_bottom)
# next image is of the other cam
is_camera_bottom = not is_camera_bottom
return images_dict | 8c0e848420f201f059aae8513a30709c92c84934 | 47,970 |
import os
def getScriptDir():
""" Get root location this script is running from. Found this example
on google. Basically use the file path of THIS module, but go up one
directory """
upOne = (os.path.join( os.path.dirname ( __file__), os.pardir))
return os.path.abspath(upOne) | e27bcf1948dcb20e952ceea31868b45fd175af11 | 47,972 |
def get_releases(events):
"""
Get all target releases from a list of events.
:return: List of (major, minor) release tuples, sorted in ascending order
"""
return sorted({event.to_release for event in events}) | adbf665c3b7a0068810a44c3c916bfcad6a3ef14 | 47,973 |
def _load_fashion_item(in_file, coordi_size, meta_size):
"""
function: load fashion item metadata
"""
print('loading fashion item metadata')
with open(in_file, encoding='euc-kr', mode='r') as fin:
names = []
metadata = []
prev_name = ''
prev_feat = ''
data = ''
for l in fin.readlines():
line = l.strip()
w = line.split()
name = w[0]
if name != prev_name:
names.append(name)
prev_name = name
feat = w[3]
if feat != prev_feat:
if prev_feat != '':
metadata.append(data)
data = w[4]
for d in w[5:]:
data += ' ' + d
prev_feat = feat
else:
for d in w[4:]:
data += ' ' + d
metadata.append(data)
for i in range(coordi_size*meta_size):
metadata.append('')
# add null types
names.append('NONE-OUTER')
names.append('NONE-TOP')
names.append('NONE-BOTTOM')
names.append('NONE-SHOES')
return names, metadata | 85f37aaa331e62bba9b27af2be1502804c16a2f2 | 47,974 |
import re
def Remove_Suffixes(sentence, suffix_list):
"""
Removes suffixes in the list from the sentence
"""
for suffix in suffix_list:
sentence = re.sub(suffix, "", sentence)
return sentence | 8684abe0a1f6a8002370fc4c8d6271d5c4a15189 | 47,976 |
def get_N_RL(sym_list):
"""
Compute a value denoting the maximum "number of reachable locations", ($N_{r}$), over all possible locations.
From the paper:
Formally $N_{r}$ is calculated from an empirical symbolic time series $\mathcal{T} = \{s_{1}, s_{2}, \ldots, s_{m}\}$,
with the set of all possible spatial locations being $\Omega$, as $N_{r} = \max_{x \in \Omega} | \{ s_{i+1} : s_i = x \} |$.
:param sym_list: A list of location symbols
:type sym_list: list
"""
mapLocation = {}
ct_point = 0
for point in sym_list[:-1]:
idNextPoint = sym_list[ct_point+1]
try:
mapLocation[point].add(idNextPoint)
except KeyError:
mapLocation[point] = set([idNextPoint])
ct_point += 1
N = 0
for SetPoint in mapLocation.values():
nb = len(SetPoint)
if nb > N:
N = nb
return N | 67b89b1d74dd2e24c60e5958ae6e8a7f09e1eb19 | 47,977 |
import os
def setup(i):
"""
Input: {
ck_kernel - import CK kernel module (to reuse functions)
customize - updated customize vars from meta
customize.env_mapping - the structure of the dialogue
env - updated environment vars from meta
interactive - if 'yes', can ask questions, otherwise quiet and assume defaults
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
bat - prepared string for bat file
}
"""
ck = i['ck_kernel']
interactive_bool = i.get('interactive', '') == 'yes'
env_mapping = i.get('customize', {}).get('env_mapping', [])
env = i['env'] # target structure to deposit the future environment variables
for one_var_mapping in env_mapping:
var_name = one_var_mapping['variable']
# 1) Command line parameters have the highest precedence,
# 2) followed by the variables of the current environment
# 3) interactive ? interaction : default ? default : error
var_value = env.get(var_name) or os.environ.get(var_name)
if not var_value:
default_value = one_var_mapping.get('default_value')
if interactive_bool: # ask the question and collect the response:
display_name = one_var_mapping['display_name']
question = 'Please enter {}{}: '.format(display_name, " [hit return to accept the default '{}']".format(default_value) if default_value else '')
kernel_ret = ck.inp({'text': question})
if kernel_ret['return']:
return kernel_ret
else:
var_value = kernel_ret['string']
if var_value=='' and default_value!=None:
var_value = default_value
elif default_value!=None: # assume the default
var_value = default_value
else:
return {'return':1, 'error':'Non-interactive mode and no default for {} - bailing out'.format(var_name)}
env[var_name] = var_value
# Can add some general type checks and constraints if necessary (in response to "nonempty", "is_a_number", etc)
return {'return':0, 'bat':''} | 91c6ca1bbbceb9ee9e1227557d41f382a1c9f102 | 47,979 |
def ft2m(ft):
"""Convert feet to meters"""
return ft * 0.3048 | 972cc2f18b3910ff6d43fe093658f1b2b361464c | 47,980 |
def padStr(s, field=None):
""" Pad the begining of a string with spaces, if necessary.
"""
if field is None:
return s
else:
if len(s) >= field:
return s
else:
return " " * (field - len(s)) + s | e5d339a2aee7fe2a0a80d507f356840ea71254da | 47,983 |
def make_dlc_columns(
dlc_df, remove_header_rows=True, reset_index=True, drop=True
):
"""
Replaces the default column names (e.g 'DLC_resnet50_...'),
with more useful names, combining rows 0 and 1.
:param dlc_df: Dataframe loaded from DLC output
:param remove_header_rows: Remove the two rows used to make the column
names from the data
:param reset_index: Reset the dataframe index (after removing the header
rows)
:param drop: When resetting the dataframe index, do not try to insert
index into dataframe columns.
"""
dlc_df.columns = dlc_df.iloc[0] + "_" + dlc_df.iloc[1]
if remove_header_rows:
dlc_df = dlc_df.iloc[2:]
if reset_index:
dlc_df = dlc_df.reset_index(drop=drop)
return dlc_df | 8dde48349bd9eef8012ea34694d50bb6f205afed | 47,984 |
def _enum_getter(enum):
"""Return a function that converts a string to an enum member.
If ``name`` does not correspond to a member of the enum, it is returned
unmodified so that argparse can properly report the invalid value.
"""
def getter(name):
try:
return enum[name]
except KeyError:
return name
getter.__name__ = enum.__name__
return getter | 99bb3bb880870bb882e2fba93a16945ec6fdbc6b | 47,985 |
def humanize_time(time):
"""Convert a time from the database's format to a human readable string.
Args:
time::Int - The time to convert.
Returns:
_::String - The converted time.
"""
time_str = '{:04d}'.format(time, )
if time_str[2:] == '25':
return '{}:{}'.format(time_str[:2], 15)
if time_str[2:] == '50':
return '{}:{}'.format(time_str[:2], 30)
if time_str[2:] == '75':
return '{}:{}'.format(time_str[:2], 45)
return '{}:{}'.format(time_str[:2], time_str[2:]) | 899c2f4c11ce1d8880bb7769f1d080e3abf6433b | 47,987 |
def sum_root_helper(root, partial_sum):
"""
Helper to keep track of partial sums as
getting remainder of root to leaf sums
"""
if root is None:
return 0
partial_sum = partial_sum * 2 + root.val
if not root.right and not root.left:
return partial_sum
return sum_root_helper(root.right, partial_sum) + \
sum_root_helper(root.left, partial_sum) | acb3415207018722a4032e0cb3c836943523d0b3 | 47,988 |
from typing import Any
def is_sql(value: Any) -> bool:
"""Gets whether the specified value could be a raw SQL query."""
return isinstance(value, str) | beb3462005827ab2e3f32228748fdaae66f6ebde | 47,989 |
def get_str_from_list(message_list: list, cc: str = "and", punct: bool = True) -> str:
"""Returns list as a formatted string for speech.
message list: [list] of the components to be joined.
cc: [str] coordinating conjunction to place at end of list.
punct: bool - indicates if should include punctuation at end of list.
"""
speech_list = []
if not message_list:
return ''
elif len(message_list) == 1:
message = str(message_list[0])
if punct:
message += "."
return message
for i in range(len(message_list)):
if i == len(message_list) - 1:
speech_list.append(' and ')
speech_list.append( str(message_list[i]))
if i != len(message_list) - 1:
speech_list.append(', ')
if punct:
speech_list.append('.')
return ''.join(speech_list) | 224821fe0e0ca6b30001f467816df97bad37237a | 47,990 |
def read_yaml(path):
""" A function to read YAML file"""
result = {}
return result | 826690a2c50c582de122377e6ce7ebb69514a677 | 47,991 |
def dequeueMessage(queue):
"""
Dequeue a single message from the queue.
Returns None if no message found.
"""
m = queue.read()
if m is None:
return None
else:
queue.delete_message(m)
return m.get_body() | 7f8e3391d065736b49520b8d52e741629b35bfeb | 47,992 |
def _colnum(col):
"""translates a column address string into a column id"""
col_id = 0
multiple = 1
for letter in col:
temp = ord(letter) - ord('A') + 1
col_id *= multiple
multiple *= 26
col_id += temp
return col_id | 57cd847f4f6b92f8e00046d17bce70898eaedb6e | 47,993 |
from typing import Dict
def validate_batch_data(data: Dict) -> bool:
"""Validate request data for batch load"""
return 'field_id' in data \
and isinstance(data.get('rows', None), list) | dd2e9489f3abc36d95a4739e685698f7714af731 | 47,994 |
def flipBlackPieces(listBoard):
"""
Function that "flips" the positions of the black pieces to put them in their
corresponding place in the white player's side. This is necessary because the
lists of values for each piece (in the beginning of this module) are only valid
from the white player point of view (they are not symmetrical).
Parameters :
-listBoard : the current board (as a list of lists of strings)
Returns :
-listBoard : the current board but with the black positions flipped to be in their white correspondance
"""
for i,line in enumerate(listBoard):
for j,char in enumerate(line):
#if there is only one char in the cell, we can "work" on it. Thus we don't work with the cells
#where a black piece transposition has already been done.
if(len(char)==1):
#if the char is a lower case, it's a black piece
if(char.islower()):
#The black piece is added to it's new position, and if there's a dash, it's
#replaced with an underscore
listBoard[7-i][7-j]=listBoard[7-i][7-j].replace("-","_")+char
#The original cell of the black piece is filled by a single dash '-'
listBoard[i][j]='-'
return listBoard | 70adf833f321c3a63ba7075225b4f1c3a96911b3 | 47,997 |
import functools
def handle_top_exception(logger):
"""A decorator that will catch exceptions and log the exception's message
as a CRITICAL log."""
def decorator(fnc):
@functools.wraps(fnc)
def wrapped(*args, **kwargs):
try:
return fnc(*args, **kwargs)
except Exception as exc:
logger.critical(exc)
raise
return wrapped
return decorator | cb26776167bd6b452bf7891196a12144bd22f03d | 47,998 |
def load_properties(lines, sep="=", comment_marker="#"):
"""Creates a dictionary for properties provided as a list of lines. Split on the first found sep is conducted.
:param lines: (list[str]) lines of the file.
:param sep: (str) separator between property key and its value.
:param comment_marker: (str) marker signaling that this line is a comment. Must be the first character in the row excluding whitespaces.
:return: (dict[str,str]) dictionary representing a properties file.
"""
res = {}
for r in lines:
if sep not in r or r.strip()[0] == comment_marker:
continue
i = r.index(sep)
key = r[:i].strip()
content = "".join(r[i+1:]).strip()
res[key] = content
return res | 6ac731b955135f029734fb094d9fa91a3a08b291 | 47,999 |
def read_bb(filename):
"""Read ground truth bounding boxes from file"""
gt_bboxes = {}
with open(filename) as file:
for line in file:
frame = str(int(line.strip().split(".")[0]))
bb = line.strip().split(";")[1:6]
bb = list(map(int, bb))
if frame in gt_bboxes:
gt_bboxes[frame].append(bb)
else:
gt_bboxes[frame] = [bb]
return gt_bboxes | 89d9a8726c4f3a5b254cd4318051da5018991be3 | 48,000 |
def dirac_delta_function(x, steps=[(1, 1)]):
"""
Returns a Dirac delta function such that
f(x) = y_0 if x = x_0,
y_1 if x = x_1,
...
else 0
Parameters
============
x: the input value.
steps: a list of deltas.
"""
for x_n, y_n in steps:
if x == x_n:
return y_n
else:
return 0 | 5bb5ac50dc34f01f8ee64169aba18411af20ddbc | 48,001 |
def _true(param):
"""Always returns true for matching functionality."""
return True | 774fa496ae9910bdfeda55872642d2d121e53f49 | 48,002 |
def convert_to_ngsi(data: dict) -> dict:
"""NGSIフォーマットに変換
Args:
data (dict): 獣害データ
Returns:
dict: 書式変換した辞書
"""
latitude: float = data['location']['latitude']
longitude: float = data['location']['longitude']
payload: dict = {
'id': data['id'],
'type': data['type'],
'animalName': {
'type': 'Text',
'value': data['animalName']
},
'animalCategory': {
'type': 'Text',
'value': data['animalCategory']
},
'location': {
'type': 'geo:point',
'value': f'{latitude}, {longitude}'
},
'time': {
'type': 'DateTime',
'value': data['time'].isoformat()
}
}
return payload | 8c6f6508f3a711f256da36bc98ef360e14df9927 | 48,003 |
def t_LINE(t):
"""line"""
t.value = len(t.value)
return t | 5d73dc7e8c46d8aa953075256c17bd93d92342e7 | 48,004 |
def ValidateForeignPostJSON(post):
"""
Returns True if JSON conforms to the correct specs. Returns false otherwise.
"""
if "title" not in post:
return False
if "visibility" not in post:
return False
if "contentType" not in post:
return False
if "content" not in post:
return False
if "author" not in post:
return False
contentType = post["contentType"]
if(contentType != "text/plain" and contentType != "text/markdown" and
contentType != "application/base64" and contentType != "image/png;base64" and
contentType != "image/jpeg;base64"):
return False
for comment in post["comments"]:
commentContentType = comment["contentType"]
if (commentContentType != "text/plain" and commentContentType != "text/markdown"):
return False
return True | 315c9d16bcfe3e1b9c94a6468c1db005a30fc182 | 48,005 |
from typing import Dict
from typing import Any
from pathlib import Path
import json
def write_results_data_file(
results: Dict[str, Dict[str, Any]], raw_data_file: str
) -> str:
"""
Writes the raw data file containing all the results for an AWS account
:param results: Dictionary containing the scan results for a particular account
:param raw_data_file:
:return:
"""
# Write the output to a results file if that was specified. Otherwise, just print to stdout
Path(raw_data_file).write_text(json.dumps(results, indent=4))
return raw_data_file | 82f513bc16c219f11c407cce2a17ee1f4878d1ee | 48,006 |
from bs4 import BeautifulSoup
def strip_html(value):
"""
Strips HTML (tags and entities) from string `value`.
"""
# The space is added to remove BS warnings because value "http://django.pynotify.com"
# will be considered as URL not as string in BS. The space will be removed with get_text method.
return BeautifulSoup(' ' + value, 'lxml').get_text() | 4e917f2b315d59b7f97922edd892328dd1c12ea3 | 48,007 |
import os
def numberOffile(dirname, suffix):
"""
:dirname search folder path
:suffix search suffix
:return list of files with the specified suffix under the folder
"""
dirPATH = []
for path in os.listdir(dirname):
childDir = os.path.join(dirname, path)
if os.path.isdir(childDir):
numberOffile(childDir, suffix)
else:
if childDir.split(".")[-1] == suffix:
dirPATH.append(childDir)
return dirPATH | 6e66d7206b2a3c0a2e065bd9c0920086b815c8df | 48,010 |
def alternate_transformation(text):
"""
Alternates the capitalization of a string's characters
"""
return "".join(
[char.lower() if index % 2 else char.upper()
for index, char in enumerate(text)]
) | 00ef40d431e9b515d67eb15b6a68e055021a363e | 48,011 |
import collections
def char_distribution(string: str) -> collections.Counter:
"""Given an ascii string, return the distribution of its characters
as a percentage of the total string length.
:param string: the string to be analyzed
:returns: the characters distribution of the string
"""
assert string
string = string.lower()
c = collections.Counter(string)
return c | 70a245240d974bfded49b0d37e75a962365b4787 | 48,015 |
import inspect
def __determine_sub_cmd_map(mod_inst):
"""Determine the mapping for sub commands"""
arg_map = {}
for _, inst in inspect.getmembers(mod_inst):
if hasattr(inst, 'sub_cmd_name'):
arg_map[inst.sub_cmd_name] = (inst, inst.sub_cmd_usage)
return arg_map | 82041daaaef1fbcff54b973ea8ecb221a1d43a7a | 48,016 |
def get_complement(nucleotide):
""" Returns the complementary nucleotide
nucleotide: a nucleotide (A, C, G, or T) represented as a string
returns: the complementary nucleotide
>>> get_complement('A')
'T'
>>> get_complement('C')
'G'
>>> get_complement('G')
'C'
>>> get_complement('T')
'A'
>>> get_complement('L')
-1
"""
if nucleotide == 'A': #change A to T
return 'T'
elif nucleotide == 'T': # T to A
return 'A'
elif nucleotide == 'C': # C to G
return 'G'
elif nucleotide == 'G': # G to C
return 'C'
#should I have a negative one, you didn't feed me a valid dna sequence
else:
return -1 | c52348987db20456ed0a3a73fc09bd08d6370595 | 48,017 |
def get_virtual_cst(prec, value, language):
""" Generate coding of constant <value> in language assuming
format <prec> """
return prec.get_support_format().get_cst(
prec.get_base_format().get_integer_coding(value, language)) | 05a6cb7230a0f067e330e7e04ea59fe36e60bbb4 | 48,019 |
import json
def json_pretty_print(content):
"""
Pretty print a JSON object
``content`` JSON object to pretty print
"""
temp = json.loads(content)
return json.dumps(
temp,
sort_keys=True,
indent=4,
separators=(
',',
': ')) | e02721042ae2eda31bf4187ba49787e9a5e0a989 | 48,020 |
def InitMutator(current, value):
"""Initialize the value if it is None"""
if current is None:
return value
return current | 96a8c8778b307196f319c260fdd7b7cd2788b770 | 48,021 |
def autoincrement_tag(last_tag):
"""
autoincrement_tag('1') => 2
autoincrement_tag('1.2') => 1.3
autoincrement_tag('1.2.3') => 1.2.4
"""
tokens = last_tag.split('.')
r = int(tokens[-1]) + 1
if len(tokens) > 1:
return '%s.%s' % ('.'.join(tokens[0:-1]), r)
else:
return str(r) | 29524f933a59252c8824830e68e64a2f12b2c49d | 48,022 |
import numpy
def rand_register_sizes(nRegisters, pvals):
"""Return a randomly chosen list of nRegisters summing to nQubits
"""
v = numpy.random.multinomial(nRegisters, pvals)
return v[v.nonzero()] | cb121c2970018a7272941ef2323ee0b73f8f3340 | 48,023 |
def read_file(file):
"""Reads (file), returns text as a string. Returns -1 on failure."""
text = str()
try:
with open(file, 'r') as f:
text = str(f.read())
f.close()
return text
except:
return -1 | 444a0b660b2d01d58c8363fb83dbc01bce4b3b8d | 48,024 |
def kpathstr_to_xticks(path):
"""Split path into individual labels"""
tick_labels = []
combine_labels = False
for i, char in enumerate(path):
if char not in [',', '|', ' ']:
if combine_labels:
tick_labels[-1] = tick_labels[-1] + '|' + char
combine_labels = False
tick_labels.append('')
else:
tick_labels.append(char)
else:
combine_labels = True
return tick_labels | ab8804257935d3370220fdfc132ce1d67d808b4e | 48,026 |
def get_all_jobs_in_workflow(processed_cfg, wrkflw):
""" All jobs available under the specified workflow """
return processed_cfg['workflows'][wrkflw]['jobs'] | 54f4a2f9275791f5fbacdc826d40e596f2bf7de6 | 48,028 |
def split(numbers):
"""Only split the left most"""
[left, right] = numbers
done = False
if isinstance(left, int) and left >= 10 and not done:
left = [left // 2, -(-left // 2)]
done = True
elif isinstance(left, list) and not done:
left, done = split(left)
if isinstance(right, int) and right >= 10 and not done:
right = [right // 2, -(-right // 2)]
done = True
elif isinstance(right, list) and not done:
right, done = split(right)
return [left, right], done | b8fb6f84e3243250b01dc1386e43da0ddd355eaf | 48,029 |
from typing import Dict
from typing import Any
def update_config(config: Dict[str, Any], new_values: Dict[str, Any]) -> Dict[str, Any]:
"""
Updates a config with new values. The keys of ``new_values`` are the names of the
leaf nodes in ``config`` that should be updated. For example, if
``config["architecture_config"]["recurrent"]`` should be updated, that will appear
in ``new_values`` as ``new_values["recurrent"]``. This why leaf nodes must have
unique names.
"""
# Construct updated config.
new_config = dict(config)
# Loop over each parameter and check for changes, or check for recursion.
for param_name, value in config.items():
if param_name in new_values:
new_config[param_name] = new_values[param_name]
elif isinstance(value, dict):
new_config[param_name] = update_config(new_config[param_name], new_values)
return new_config | 26a14a22e2c1b77365fd1f2c8c8f5519a9be58b3 | 48,030 |
from datetime import datetime
def clean_track_info(track):
"""Create a more compact json object out of the lastfm track json"""
artist = track["artist"]["#text"]
song = track["name"]
if "@attr" in track.keys() and track["@attr"]["nowplaying"] == "true":
date_listened = datetime.utcnow()
else:
date_str = track["date"]["#text"]
date_listened = datetime.strptime(date_str, "%d %b %Y, %H:%M")
return {"artist": artist, "song": song, "date_listened": date_listened} | b71f8d95633418d3bc6d4fd87a26d8afb4724ac9 | 48,031 |
def grades_input(n_student):
"""Gets grade inputs from the user."""
grade_list = []
for _ in range(0, n_student):
grade_list.append(int(input('Enter a number: ')))
return grade_list | a4118afa760d5911f39b1ca0988f060c0dc630ef | 48,032 |
import time
def epochtime_to_string(epochtime=None, use_second=False):
"""
Given an epoch time (seconds since 1/1/1970), return a string useful
as a plot title. Set the use_second flag to also include seconds in
the string.
"""
try:
if use_second:
time_string = time.strftime('%m/%d/%Y %H:%M:%S UTC',
time.gmtime(epochtime))
else:
time_string = time.strftime('%m/%d/%Y %H:%M UTC',
time.gmtime(epochtime))
except:
time_string = ''
return time_string | 3327dc2b9f3f05b8d17a366c1d1d01b9c742cc06 | 48,033 |
def team_multisig(accounts) -> str:
"""The team multisig address."""
return accounts[4] | 87453312ee39b0c8fa40d52a52d9bcf0c7cfb1b4 | 48,034 |
from pathlib import Path
def getMount (path):
""" Get mount point info """
path = Path (path).resolve ()
if not path.is_mount ():
raise ValueError ('Not a mount point')
ret = None
with open ('/proc/mounts') as fd:
for l in fd:
source, dest, kind, attrib, _, _ = l.split (' ')
if Path (dest).resolve () == path:
ret = dict (source=source, dest=dest, kind=kind, attrib=attrib)
# return the last one, which overrides(?) any previous mounts
return ret | 1932a107574e026f42bd16fbcb0df7abc694df2e | 48,035 |
def spatial_agg(df, deg=2.5):
"""
For a given df calculate aggregation (count, mean, median, std, min, max)
for data variables (temperature, pressure, humidity, magnetic_tot)
grouped by latitude and longitude category
Parameters
----------
df: pandas DataFrame
Pandas dataframe with data for the desired sampling range (hourly, daily)
deg: int or float, default 2.5
Spatial degree interval for for latitude and longitude data
Returns
-------
data_agg: pandas DataFrame
DataFrame with aggregated data for every atmospheric variable
data_count: pandas Series
Series with count of data points for every location
"""
# Group data points by lat, lng categories
df = df.discretize_latlng(deg=deg)
# create a groupby object grouped by lat, lng categories
grouped = df.groupby(by=["lat_cat","lng_cat"])
# custom agg functions to calculate na count and percentage
na_pct = lambda df: df.isna().mean()
na_count = lambda df: df.isna().sum()
# group by custom functions
na_pct = grouped.agg([na_pct]).rename({"<lambda>":"na_pct"}, axis=1)
na_cnt = grouped.agg([na_count]).rename({"<lambda>":"na_count"}, axis=1)
# group by regular statistics
agg = grouped.agg(["mean","median","std","min","max","count"])
# join all groups and reshape dataframe so it has statistics as index not columns
agg = agg.join([na_cnt, na_pct]).T.unstack().T
# rename indices and columns for readability
agg.columns.names = ["atmos"]
agg.index.names = ["lat", "lng", "stat"]
return agg | 099f3af45b96d1bf46719d0839fa0f5b6e82d0d3 | 48,038 |
import os
def pathjoin(*args):
"""
This is like os.path.join but does some rclone-specific things because there could be
a ':' in the first part.
The second argument could be '/file', or 'file' and the first could have a colon.
pathjoin('a','b') # a/b
pathjoin('a:','b') # a:b
pathjoin('a:','/b') # a:/b
pathjoin('a','/b') # a/b NOTE that this is different
"""
if len(args) <= 1:
return ''.join(args)
root,first,rest = args[0],args[1],args[2:]
if root.endswith('/'):
root = root[:-1]
if root.endswith(':') or first.startswith('/'):
path = root + first
else:
path = f'{root}/{first}'
path = os.path.join(path,*rest)
return path | 914adffc3f0c388e8ab0632102fe59baccf4eb8d | 48,040 |
def normalize_value(value, min, max):
"""make sure value is in <min, max>, return normalized value"""
if value > max:
value = max
if value < min:
value = min
return value | 55e2753aae7c7e754cc1fca37dc08fca1c3d7008 | 48,041 |
import six
import json
def load_json_dict(json_body):
"""
converts a json body to a dictionary
:param json_body: the json body of a request might be string, unicode or dict
:return: the json body as a dictionary
"""
if isinstance(json_body, (str, six.text_type)):
json_body = json.loads(json_body)
if isinstance(json_body, dict):
return json_body
return load_json_dict(json_body) | 7cc491b0e2df18d0b61bbdeae3959879e35b2fab | 48,042 |
import subprocess
def map_drive(conn_string):
"""Maps a free network drive letter to the defined Azure file share
Parameters
----------
conn_string : str
Connection string with credentials to Azure file share
Returns
-------
drive_letter : str
Drive letter of the drive connected to the file share
drive_connection : str
Full path to the Azure file store connected to the drive
"""
# raw string should be passed to subprocess.run function
# backslashes in Python need to be escaped, ie, to run this '\\servername\path\resource.txt' you must do either '\\\\servername\\path\\resource.txt' or r'\\servername\path\resource.txt'
drive_connect = subprocess.run('net use * ' + conn_string, shell=True, capture_output = True,check = True)
# Save prompt response as a string
print(drive_connect.stdout.decode())
output_text = drive_connect.stdout.decode()
# Save the drive letter as a variable ready for disconnecting from network drive
# Split string by space delimiter
split_string = output_text.split()
# Find the drive letter which should be the second item
drive_letter = split_string[1]
drive_letter = drive_letter + '\\'
drive_connection = drive_letter + conn_string
print(drive_letter)
print(drive_connection)
sub_string = drive_connection.split(".net\\",1)[1]
datastore_name = sub_string.split(" /u",1)[0]
return(drive_letter, drive_connection,datastore_name) | 7c4a800f30859b746a1b8e2ebe13ae2ea1d5e815 | 48,043 |
def _embedding(x, weight):
"""(F.embedding())
:param x: shape[*]
:param weight: shape[NV, E]. NV: num_vocab, E: embedding_dim
:return: shape[*, E]
"""
return weight[x] | c19eefa5b2b4f43c5ffc8b27ecaa418557307cbb | 48,044 |
import torch
def state_dict_to_cpu(state_dict):
"""Converting state dict with cpu version
"""
for k, v in state_dict.items():
if isinstance(v, torch.Tensor):
state_dict[k] = v.to("cpu")
if isinstance(v, dict):
state_dict[k] = state_dict_to_cpu(v)
return state_dict | db516e03e064edb6eec632d6615c3827c172d1f7 | 48,045 |
def clean_images(gltf):
"""
未使用画像を削除
:param gltf: glTFオブジェクト
:return: 新しい画像リスト
"""
return map(lambda t: t['source'], gltf['textures']) | 4a1ffbe45d5a2ab7979248057d261bc4b65e4bbd | 48,047 |
from typing import BinaryIO
def read_c_string(fd: BinaryIO) -> bytes:
"""Reads a null-terminated string from the provided file descriptor."""
string = bytearray()
while True:
byte = fd.read(1)
if not byte or byte == b'\0':
return bytes(string)
string += byte | 358b7133216e14900e3247fb63482441c8118abc | 48,048 |
def get_V_fan_mid_H(q_hs_mid_H):
"""(13)
Args:
q_hs_rdt_H: 熱源機の中間暖房能力(W)
q_hs_mid_H: returns: 中間暖房能力運転時の送風機の風量(m3/h)
Returns:
中間暖房能力運転時の送風機の風量(m3/h)
"""
return (1.69 * q_hs_mid_H * 10 ** (-3) + 14.5) * 60 | 4af9e3cea17176d1d6881d80a9d767a2c3396452 | 48,049 |
def dataframe_count(df, count_type="row"):
""" dataframe行数/列数/单元格数 """
if count_type not in ("row", "column", "cell"):
raise ValueError(f"invaild count_type: {count_type}")
if count_type == "row":
return len(df)
if count_type == "column":
return len(df.columns)
if count_type == "cell":
return df.size | 407d2661da4dc945d00f99884c38f1f16ff7ef0e | 48,050 |
def replace_wildcards(value):
"""Replace wildcards with some possible big values"""
return value.replace('?', '9').replace('*', str(0xffffffff)) | 79c18e00a45039ca80b00ce0d90beb00045524cb | 48,052 |
def create_transfer_dict(dest_rse_id, request_type, scope, name, rule, lock=None, bytes_=None, md5=None, adler32=None, ds_scope=None, ds_name=None, lifetime=None, activity=None, retry_count=None, session=None):
"""
This method creates a transfer dictionary and returns it
:param dest_rse_id: The destination RSE id.
:param request_Type: The request type.
:param scope: The scope of the file.
:param name: The name of the file.
:param rule: The rule responsible for the transfer.
:param lock: The lock responsible for the transfer.
:param bytes_: The filesize of the file in bytes.
:param md5: The md5 checksum of the file.
:param adler32: The adler32 checksum of the file.
:param ds_scope: Dataset the file belongs to.
:param ds_name: Dataset the file belongs to.
:param lifetime: Lifetime in the case of STAGIN requests.
:param activity: Activity to be used.
:param session: Session of the db.
:returns: Request dictionary.
"""
attributes = {'activity': activity or rule.activity or 'default',
'source_replica_expression': rule.source_replica_expression,
'lifetime': lifetime,
'ds_scope': ds_scope,
'ds_name': ds_name,
'bytes': bytes_,
'md5': md5,
'adler32': adler32,
'priority': rule.priority,
# 'allow_tape_source': has_account_attribute(account=rule.account, key='admin', session=session)}
'allow_tape_source': True}
return {'dest_rse_id': dest_rse_id,
'scope': scope,
'name': name,
'rule_id': rule.id,
'attributes': attributes,
'request_type': request_type,
'retry_count': retry_count,
'account': rule.account,
'requested_at': lock.created_at if lock else rule.created_at} | 28aa0a1f693ce15724abae6e817b3a6b7b5a5fb4 | 48,053 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.