content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def calc_swath_at_nadir_cross(gsd_cross,
pixels_cross):
"""
Calculate the swath at nadir cross track.
https://en.wikipedia.org/wiki/Swathe
Returns
-------
double : meters
"""
return gsd_cross * pixels_cross | c5cdd2f08ea64884624bae6ae0497f850fe7d815 | 34,118 |
def buscador(myDictionary,buscar=" ",exacta=0,campoClave="",clavesString=[" "],clavesList="",returnKeys=[]):
"""
ESTA FUNCION AUN DEBE OPTIMIZARCE...
la funcion retornara un diccionario con los elementos que si coincidan con la busqueda
myDictionary dict : diccionario que sera revisado para encontrar las coincidencias
buscar str : este elemento puede ignorarse en caso de ser necesario. Sino debera ingresar: buscar='palabraParaBuscar -palabraParaDescartar'
obs:
si no se le asigna ningun string a 'buscar', la funcion retornara la variable 'myDictionary' completa, solo seran aplicados los cambios de 'returnKeys' y 'campoClave'
Puede pasarle mas de una palabra para buscar, solo debe pasarlos separados y sin el signo negativo al inicio
Tambien puede pasarle mas de una palabraParaDescartar, solo deben tener un negativo al comienzo y deben estar separados.
Se pueden pasar solo palabrasParaDescartar y varias a la ves
exacta int : 1 o 0 para hacer la busqueda de forma excata o no.
Si exacta es igual a 0, Se aplicara:
if 'PaLaB'.lower() in 'PALABRA'.lower(): obs: Esto sera True, ya que 'PaLaB'.lower() si 'pertenece' a 'PALABRA'.lower()
Si exacta es igual a 1, Se aplicara:
if 'PaLaB' == 'PALABRA': Obs: esto sera False, ya que 'PaLaB' no es exactamente igual a 'PALABRA'
campoClave str : la funcion tomara el valor asignado a la clave 'campoClave' de cada elemento de 'myDictionary', luego en el nuevo diccionario que retornara esta funcion, se identificara a cada elemento con el valor que contenga 'campoClave' en 'myDictionary'.
Un ejemplo para ver como se aplica 'campoClave', por que ni yo entiendi lo que escribi jeje. Para ver como afecta 'campoClave' a 'para_retornar_final' debes cambiar "id" por "barCode" que serian codigos de Barra o "proveedorID":
campoClave='id'
para_retornar_inicial={
0:{'id':10025,'proveedorID':16,'barCode':6549849464,'name':'Producto 5001','list':['stock','precios','proveedor','sucursal']},
1:{'id':46887,'proveedorID':28,'barCode'9878954:,'name':'Producto 8000','list':['stock','precios','proveedor','sucursal']},
2:{'id':89795,'proveedorID':6,'barCode':456548984984,'name':'Producto 30003','list':['stock','precios','proveedor','sucursal']}
}
para_retornar_final={}
if campoClave in [ clave for clave in para_retornar_inicial[0] ]:
for clave_inicial in para_retornar_inicial:
clave_final = para_retornar_inicial[clave_inicial][campoClave]
para_retornar_final[clave_final] = para_retornar_inicial[clave_inicial]
else:
para_retornar_final = para_retornar_inicial
print(para_retornar_final)
clavesString list : lista de las claves que estan en 'myDictionary', y deben tomarse esos valores como Strings para hacer la busqueda
clavesList list : lista de las claves que estan en 'myDictionary', y deben tomarse esos valores como Listas y aplicar la busqueda en cada elemnto de esta lista
returnKeys list : la funcion retornara un diccionario, y cada uno de los elementos tambien sera un dicionario que solo tendra las claves que esten en 'returnKeys'
"""
search=buscar.strip()
omitir=[]
filtrar=[]
for x in search.split(" "):
if ("-" in x):
omitir+=[x.replace("-",'')]
else:
filtrar+=[x.replace("+",'')]
ret={}
lista=[]
myDict=myDictionary
if filtrar:
for search in filtrar:
ret={}
if exacta:
for clave in [palabra for palabra in myDict]:
if ([etiqu for etiqu in myDict[clave][clavesList] if (search.lower()==etiqu.lower())] if clavesList else False) :
lista+=[myDict[clave]]
else:
for check in [str(myDict[clave][claveString]).lower().split(' ') for claveString in clavesString]:
if (search.lower() in check):
lista+=[myDict[clave]]
else:
for clave in [palabra for palabra in myDict]:
if ([etiqu for etiqu in myDict[clave][clavesList] if (search.lower()==etiqu.lower()[0:len(search.lower())])] if clavesList else False) :
lista+=[myDict[clave]]
elif ([check for check in [str(myDict[clave][claveString]).lower().split(' ') for claveString in clavesString] if (search.lower() in [ checking.lower()[0:len(search)] for checking in check])]):
lista+=[myDict[clave]]
for ok in lista:
pasar={}
for key in [keyword for keyword in ok]:
pasar.setdefault(str(key),ok[key])
ret.setdefault(str(ok[campoClave if (campoClave in [clave for clave in ok]) else [clave for clave in ok][0]]),pasar)
myDict=ret
if omitir:
for search in omitir:
ret={}
if exacta:
for clave in [palabra for palabra in myDict]:
if not ([etiqu for etiqu in myDict[clave][clavesList] if (search.lower()==etiqu.lower())] if clavesList else False) :
lista+=[myDict[clave]]
elif not ([check for check in [str(myDict[clave][claveString]).lower().split(' ') for claveString in clavesString] if (search.lower() in check)]):
lista+=[myDict[clave]]
else:
for clave in [palabra for palabra in myDict]:
if not ([etiqu for etiqu in myDict[clave][clavesList] if (search.lower()==etiqu.lower()[0:len(search.lower())])] if clavesList else False) :
lista+=[myDict[clave]]
elif not ([check for check in [str(myDict[clave][claveString]).lower().split(' ') for claveString in clavesString] if (search.lower() in [ checking.lower()[0:len(search)] for checking in check])]):
lista+=[myDict[clave]]
for ok in lista:
pasar={}
for key in [keyword for keyword in ok]:
pasar.setdefault(key,ok[key])
ret.setdefault(str(ok[campoClave if (campoClave in [clave for clave in ok]) else [clave for clave in ok][0]]),pasar)
myDict=ret
if returnKeys:
ret={}
lista=[ myDict[clave] for clave in [palabra for palabra in myDict]]
for ok in lista:
pasar={}
for key in returnKeys:
pasar.setdefault(key,ok[key])
ret.setdefault(ok[campoClave if (campoClave in [clave for clave in ok]) else [clave for clave in ok][0]],pasar)
return ret | ff7314bfcac047dc30d88cb3f1e3340c353f8ae3 | 34,119 |
import sys
def findLakeMWL(gdf, lakepoly):
"""
Finds lake mean water level attribute in the GeoDataFrame closest to lakepoly.
Not a foolproof implementation.
"""
if (len(gdf)==0):
return None
mwl = -1.0
mindist = -1.0
for i, text in enumerate(gdf['TEKSTI'].values):
try:
mwltemp = float(text)
dist = gdf.iloc[i].geometry.distance(lakepoly)
if (mindist<0.0) or (dist<mindist):
mindist = dist
mwl = mwltemp
except ValueError: # Skip erroneus mean water levels, e.g. controlled levels (127.1-128.5)
pass
if mwl<0.0:
print("Fatal error, mean water level not found.")
sys.exit(1)
return mwl | 549b008f4d703a5e6ccbf7ce390c8d72072281ba | 34,120 |
def matchAPIProfile(api, profile, elem):
"""Match a requested API & profile name to a api & profile attributes of an Element"""
match = True
# Match 'api', if present
if ('api' in elem.attrib):
if (api == None):
raise UserWarning("No API requested, but 'api' attribute is present with value '" +
elem.get('api') + "'")
elif (api != elem.get('api')):
# Requested API doesn't match attribute
return False
if ('profile' in elem.attrib):
if (profile == None):
raise UserWarning("No profile requested, but 'profile' attribute is present with value '" +
elem.get('profile') + "'")
elif (profile != elem.get('profile')):
# Requested profile doesn't match attribute
return False
return True | 147e8174d57fb05f43869075f7826c9a0fd4d484 | 34,121 |
def surface_runoff_flux(runoff, drain):
"""Surface runoff ``mrros`` [mm].
Computes surface runoff flux ``mrros`` from total runoff and drainage.
"""
return runoff - drain | 279ff35e40a45dc5876a33cf6f2b781906360352 | 34,122 |
def _plain_value_to_html(dict_, html=None):
"""Convert plain JSON-LD value to HTML."""
if html is None:
html = []
html.append(dict_['@value'])
return html | 8579318b1ada26e1259550ff925d400bc5c5fa15 | 34,123 |
def angular_velocity(v, r):
"""
Calculates the angular velocity of an
object whose linear velocity is 'v'
and radius of the path traced is 'r'
Parameters
----------
v : float
r : float
Returns
-------
float
"""
return v / r | f24c43711044d3a3ed5d10b3590eeb19dcd1820e | 34,124 |
def wavelength_filter(df, min_wav, max_wav):
"""return a material dataframe for wavelengths between min and max"""
mask = (df['wavelength']>min_wav)&(df['wavelength']<max_wav)
return df[mask] | 7ba3d34bc49da0d9e6206cc2500d850746f34010 | 34,125 |
def list_neighbors(cfg):
"""List neighbors from config.
Args:
cfg (dict): config from config_load.
Returns:
list: list of string with neighbor names.
Examples:
>>> list_neighbors(cfg)
['192.168.0.2', '192.168.0.1']
"""
return [neighbor["name"] for neighbor in cfg["neighbors"]] | c341ef5aa63fce5bf1f11ea8e615f70037448410 | 34,127 |
from typing import Counter
def CalculateSharedAddresses(symbols):
"""Checks how many symbols share the same memory space. This returns a
Counter result where result[address] will tell you how many times address was
used by symbols."""
count = Counter()
for _, _, _, _, address in symbols:
count[address] += 1
return count | ed7872872e9d538edb08957a5268b6ab21948534 | 34,128 |
def is_right(side1, side2, side3):
"""
Takes three side lengths and returns true if triangle is right
:param side1: int or float
:param side2: int or float
:param side3: int or float
:return: bool
"""
return False
# TESTS
#Feel free to add your own tests as needed! | bd8107f3faca0f36fc0888e74a4afbca0f0bc3b7 | 34,129 |
def create_dt_hparams(max_depth=None, min_samples_split=2):
"""
Creates hparam dict for input into create_DNN_model or other similar functions. Contain Hyperparameter info
:return: hparam dict
"""
names = ['max_depth','min_samples_split']
values = [max_depth, min_samples_split]
hparams = dict(zip(names, values))
return hparams | 7b0ddc46bdd5ace23402dc8d8f8a338a82c86cf9 | 34,130 |
import collections
def SortDictOfDict(Dict, key, reverseOrder=False):
"""
Sort Dictionary of Dictionaries
will parse a dictionary of dictionarys returning it in order.
dict = {'a': {'time_stamp': 100}, 'b': {'time_stamp': 50}, 'c': {'time_stamp': 150}}
key = 'time_stamp'
reverseOrder = True
>> {'b': {'time_stamp': 50}, 'a': {'time_stamp': '100'}, 'c': {'time_stamp': 150}}
"""
if not collections:
raise ImportError("collections was not found during boot, Install collections to use this function")
r = collections.OrderedDict(sorted(Dict.items(), key=lambda t:t[1][key], reverse=reverseOrder))
data = {}
for item in r:
data[item] = Dict[item]
return data | 0d3c0670cdb6e4fe9d01dddcaf9e71513e75a4ca | 34,131 |
def sizestr(s):
"""
Parses a CHANNELSxHEIGHTxWIDTH string into a tuple of int.
"""
return tuple(map(int, s.split('x'))) | bc7f21a5340792d5fd85ef9b420562dc39ea2f81 | 34,132 |
import subprocess
import shlex
import os
def configure_germline_strelka(bam_path, reference_path, cmd_args, working_dir):
"""
Configures strelka and creates runWorkflow.py
:param bam_path: Local path to bam file
:param reference_path: Local path to reference fasta
:param cmd_args: Additional command-line arguments to pass in
:param working_dir: Working directory
:return: path to the workflow runner (runWorkflow.py)
"""
cmd = 'python /strelka/bin/configureStrelkaGermlineWorkflow.py --bam %s --referenceFasta %s --runDir %s %s' % (
bam_path, reference_path, working_dir, cmd_args)
subprocess.check_call(shlex.split(cmd))
return os.path.join(working_dir, 'runWorkflow.py') | a2a0717d033d6e1b7ecf7ba6c0eb79fc0ceb7176 | 34,136 |
def calculate_area_per_pixel(resolution):
"""
Takes a resolution in metres and return the area of that
pixel in square kilometres.
"""
pixel_length = resolution # in metres
m_per_km = 1000 # conversion from metres to kilometres
area_per_pixel = pixel_length**2 / m_per_km**2
return area_per_pixel | 375b376cc9c2104d3298334b718cf51c9bbd23ab | 34,137 |
def exp_transformation(data, orig_col, new_col, power):
"""
Performs feature transformation by raising the indicated feature column to
the given power.
Parameters:
_________________
data: dataframe containing training data
orig_col: (str) column in data to be transformed
new_col: (str) header for new transformed column
power: (int) number to raise the original column by
Returns:
_________________
transformed_df: new dataframe with transformed column added as the last col
"""
transformed_df = data.withColumn(new_col, pow(data[orig_col], power))
print('The transformed DataFrame is:')
transformed_df.show()
return transformed_df | 0fb10d848d3bec1c6a8b2a3a0776a3f0bb501b0d | 34,139 |
import math
def tan(degrees) -> float:
"""Calculate the tangent of the given degrees."""
return math.tan(math.radians(degrees)) | 90d21f4618eed7ac5eefa6a7868f2bc25322e252 | 34,140 |
def MaybeToNumber(instance):
"""Convert to an int or float if possible."""
if isinstance(instance, (float, int)) or instance is None:
return instance
try:
return int(instance)
except (TypeError, ValueError):
pass
try:
return float(instance)
except (TypeError, ValueError):
pass
return instance | bed6406a845c9f012b59a5590237893d73cf0325 | 34,141 |
def SELECT(aid: str) -> dict:
"""SELECT(): generate APDU for SELECT command
"""
known_aid_list = {'MASTERCARD' : 'A0000000041010', 'MAESTRO' : 'A00000000410103060', 'VISA' : 'A0000000031010'}
return {'header' : '00A40400', 'data' : known_aid_list.get(aid, aid), 'Le' : '00'} | 5393571913c13557cd5d66bf60bf2997e31c68b6 | 34,142 |
def symmetric_difference(source_set, other_set):
"""
合并不同项,并赋新值, 相同项被排除
"""
return source_set.symmetric_difference(other_set) | 053fbdd22e23c8cbd433ad8d8987ea9281a36346 | 34,143 |
def fill_missing(df):
"""
Input: Pandas dataframe
Output: Pandas dataframe
Description: Impute missing values. Modify as needed.
"""
# Fill these NA categoricals with 0
fillna_with_zero = ["anticoagulants", "antipsychotics" , "bu_nal",
"chemo", "narc_ans", "narcotics",
"obese", "on_iv", "prev_rrt", "smoker"]
df[fillna_with_zero] = df[fillna_with_zero].fillna(0)
# Fill with a string
df['rrt_reason'] = df['rrt_reason'].fillna("Staff Concern/Unknown -- Imputation")
# Cast this binary string to 0 or 1
df['sex'] = df['sex'].apply(lambda x: 1 if 'M' else 0)
#fill with mean
for col in df.columns:
if df[col].isnull().sum() > 0:
mean_val = df[col].mean()
df[col] = df[col].fillna(mean_val)
return df | cc10dd25cc0f7fa9e3d1970f840cefc1c074547c | 34,144 |
import os
import random
def Single_Split(dir_to_search):
"""
Splits the dataset into three parts with 88:6:6 trin:test:valuation
:param dir_to_search: The directory to search
:return: The names of the files in each of the sets
"""
DataSet = []
for subdirectory, directory, files in os.walk(dir_to_search):
for file in files:
if file.endswith(".flac"):
DataSet.append(tuple([os.path.join(subdirectory, file), file]))
train_size = int(0.88 * len(DataSet))
test_size = int(0.06 * len(DataSet))
random.shuffle(DataSet)
print(DataSet)
train_dataset = DataSet[0:train_size]
test_dataset = DataSet[train_size:train_size + test_size]
validation_dataset = DataSet[train_size + test_size:]
return train_dataset, test_dataset, validation_dataset | c4b98106761ebc3d93e4a75ed38aa1c67179c805 | 34,145 |
import os
import torch
def read_label_file(path):
"""read box coordinates and labels from the label file and return them as a target dictionary.
Args:
path (str): path to label file
Returns:
dict: target dictionary containing the boxes tensor and labels tensor
"""
boxes = []
labels = []
if os.path.exists(path):
with open(path) as f:
for line in f.readlines():
values = line.split()
boxes.append([float(val) for val in values[:4]])
labels.append(int(values[4]))
boxes = torch.as_tensor(boxes, dtype=torch.float32)
labels = torch.as_tensor(labels, dtype=torch.int64)
return {'boxes': boxes, 'labels': labels} | 2736f269390e59e46179538c71fb197dd99bfbb8 | 34,146 |
def transpose(xss):
"""
Transpose a list of lists. Each element in the input list of lists is
considered to be a column. The list of rows is returned.
The nested lists are assumed to all be of the same length. If they are not,
the shortest list will be used as the number of output rows and other rows
wil be lost without raising an error.
"""
# get the number of rows in the input
N = min([len(xs) for xs in xss])
return [[xs[i] for xs in xss] for i in range(N)] | 655465216e3190c9ae08900c8790d46b65be0401 | 34,147 |
import uuid
def generate_uuid_filename(instance, filename):
"""
Generate a uuid filename keeping the file extension.
"""
extension = filename.split(".")[-1]
return "{}.{}".format(uuid.uuid4(), extension) | b7846db99992d019c63b87cbb680d1d0b941dacf | 34,148 |
def convert_bounding_boxes(bboxes):
"""Accepts a list of bounding boxes in format (xmin,ymin,xmax,ymax)
Returns the list of boxes in coco format (xmin,ymin, width, height)
"""
coco_boxes = []
all_tokens = []
for b, tokens in bboxes:
ymin, ymax, xmin, xmax = b
h = ymax - ymin
w = xmax - xmin
coco_boxes.append([xmin, ymin, w, h])
all_tokens.append(tokens)
assert len(coco_boxes) == len(all_tokens)
return coco_boxes, all_tokens | 873adfe784e39cbb4e0bcbd4eca966abc76221dd | 34,149 |
def _slice(tensor, size, i):
"""Gets slice of columns of the tensor"""
if tensor.ndim == 2:
return tensor[:, i*size:(i+1)*size]
elif tensor.ndim == 1:
return tensor[i*size:(i+1)*size]
else:
raise NotImplementedError("Tensor should be 1 or 2 dimensional") | 52993054367236b60145dd66c775d2cf1da09f87 | 34,151 |
def defval(val, default=None):
"""
Returns val if is not None, default instead
:param val:
:param default:
:return:
"""
return val if val is not None else default | eb945de18a125b3465fb5716df4f87487a5d7205 | 34,152 |
def fixup(ocr_text, pattern):
"""Fix spacing and numerics in ocr text"""
cleaned_pattern = pattern.replace("4", "A").replace("1","l").replace(" ","")
#print(f"{pattern}\t\t{cleaned_pattern}")
#print("{0:<30} {1:<10}".format(pattern, cleaned_pattern))
return ocr_text.replace(pattern, cleaned_pattern) | 8aaef7a24a7599a5ddbfc5d3b2af3b6377bbe749 | 34,153 |
def get_metric_name(metric):
"""
Get metric name
Args:
metric:
Returns:
"""
if not metric:
return ""
if isinstance(metric, str):
return metric
label = metric.get("label")
return label or None | e3f8bdea2a5981046196b5b8c9d9a78b501793da | 34,155 |
def true_text(variable):
"""
Deal with variables that should be True or False even when a string
(defaults to False)
i.e. returns True if variable = True, 1, "True", "T", "t", "true" etc
:param variable:
:return:
"""
# if variable is a True or 1 return True
if variable in [True, 1]:
return True
# if variable is string test string Trues
if isinstance(variable, str):
if variable.upper() in ['TRUE', 'T', '1']:
return True
# else in all other cases return False
return False | aafd64921aa64d0e2d66e2e429005f91f1e53b9c | 34,156 |
def minimum(evaluator, ast, state):
"""Evaluates "min(left, right)"."""
res = min(evaluator.eval_ast(ast["left"], state), evaluator.eval_ast(ast["right"], state))
return res | c64e9444bff000a2ae73caf6efb717ea6164ad81 | 34,157 |
import logging
def _verify_bool(val, default, name, instance):
"""
Validate the boolean valued input.
Parameters
----------
val
The prospective value.
default : None|bool
The default value.
name : str
The bound variable name.
instance
The instance to which the variable belongs.
Returns
-------
None|bool
"""
if val is None:
return default
else:
try:
return bool(val)
except Exception:
logging.error(
'Tried to convert value {} to a boolean value for attribute {} '
'of class {}'.format(val, name, instance.__class__.__name__))
raise | 5592f5d6ce2d87cb491266904bd95f2c7575a5a7 | 34,158 |
def icon(icon_class):
"""
Render an icon
"""
return '<span class="{icon}"></span>'.format(icon=icon_class) | 37cad40b5749a30751cdca4c7b675e5eaf4d4c53 | 34,159 |
def pad_list(orig_list, pad_length):
"""
Pads a list with empty items
Copied from http://stackoverflow.com/a/3438818/3710392
:param orig_list: the original list
:param pad_length: the length of the list
:return: the resulting, padded list
"""
return orig_list + [''] * (pad_length - len(orig_list)) | 89c2f1d0908fcfaf7fc699bf095e2faff7a7b987 | 34,160 |
import ast
from typing import Any
def json_to_py(json: dict, replace_comma_decimal: bool = True, replace_true_false: bool = True) -> Any:
"""Take json and eval it from strings. If string to string, if float to float, if object then to dict.
When to use? - If sending object as parameter in function.
Args:
json (dict): JSON with various formats as string.
replace_comma_decimal (bool, optional): Some countries use comma as decimal separator (e.g. 12,3).
If True, comma replaced with dot (Only if there are no brackets (list, dict...)
and if not converted to number string remain untouched) . For example '2,6' convert to 2.6.
Defaults to True
replace_true_false (bool, optional): If string is 'false' or 'true' (for example from javascript),
it will be capitalized first for correct type conversion. Defaults to True
Returns:
dict: Python dictionary with correct types.
Example:
>>> json_to_py({'one_two': '1,2'})
{'one_two': 1.2}
"""
evaluated = json.copy()
for i, j in json.items():
replace_condition = isinstance(j, str) and "(" not in j and "[" not in j and "{" not in j
if replace_comma_decimal and replace_condition:
j = j.replace(",", ".")
if replace_true_false and replace_condition:
if j == "true":
evaluated[i] = True
if j == "false":
evaluated[i] = False
if j == "true" or j == "false":
continue
try:
evaluated[i] = ast.literal_eval(j)
except Exception:
pass
return evaluated | f4929ef26ac3d69ad124180dada8994e5873e842 | 34,162 |
def prefetch_data_for_score_calculation(reviews_qs):
"""
Format of displayed score can depend on other models.
Modify a default queryset of `Review.objects` by pre-fetching data
so that the function `display_score` won't need any additional database queries.
"""
return reviews_qs | 695171a049207245bf7bf10c1c225e9af9cf4269 | 34,163 |
import numpy
def range_plot(hands):
"""Take a list of strings describing hands. Return 13 lines of dots
and stars representing the hands on a grid.
"""
M = numpy.array([[0]*13]*13)
ranks = 'AKQJT98765432'
for h in hands:
if 's' in h:
row = ranks.find(h[0])
col = ranks.find(h[1])
else:
row = ranks.find(h[1])
col = ranks.find(h[0])
M[row][col] = 1
M_str = "\n".join(map(str, M)).replace('[','').replace(', ','')
M_str = M_str.replace(']','').replace('0','.').replace('1','*')
return M_str | 94e4d8226217ae3ca36fac7dffed08fda3f392a7 | 34,166 |
def format_quantum_numbers(info: tuple):
"""Takes an expression like
('S', 1, 0, 2, ('3b', 1), ('y', 2/3))
and returns a string like
S(3, 3, 2/3)(3b: 1)
"""
lorentz, su3_up, su3_down, su2, *charges = info
su3_dim = lambda m, n: 0.5 * (m + 1) * (n + 1) * (m + n + 2)
# For now just add the bar for more lowered than raised indices, but for
# larger reps this will be problematic
su3_dim_format = lambda m, n: str(int(su3_dim(m, n))) + ("b" if n > m else "")
charges_dict = dict(charges)
return f"{lorentz}({su3_dim_format(int(su3_up), int(su3_down))}, {str(int(su2) + 1)}, {charges_dict['y']})({charges_dict['3b']})" | 6fa8835fdbf4fe3d3c914dec10e12573070b58ad | 34,167 |
def rename_table_clause(old_table, new_table):
""" Create a RENAME table clause string for SQL.
Args:
old_table: The table to be renamed.
new_table: The new table name.
Returns:
A string with the crafted clause.
"""
return 'ALTER TABLE {} RENAME TO {}'.format(old_table, new_table) | d984cbce2b6c0b3451c8927a61e40fbdbc330c7c | 34,170 |
from typing import List
def median(lst: List[int]) -> float:
"""Takes an ordered list of numbers, and returns the median of the numbers.
If the list has an even number of values, it computes the mean of the two
center values.
Args:
lst - an ordered list of numbers
Returns:
the median of the passed in list
"""
lenlst = len(lst)
counter = lenlst / 2
counter = int(counter)
if lenlst % 2 == 0:
return (lst[counter - 1] + lst[counter]) / 2
else:
return lst[counter] | 830c5122536c73ec154be3e231d1bf54a66d977c | 34,172 |
def get_word_length(word):
""" Returns number of letters in the word
:param word: word
:return: int
"""
return len(word) | 0e9eeca8f3cb35178484575303a4bddae7d08adb | 34,173 |
def split_rowcol(matrix_dim):
"""
Split the input matrix dimensions into row and columns
matrix_dim: String
Input concatenated string with row and column
return: Tuple
Row and column split based on suffix
"""
k = str(0) * 3
M = str(0) * 6
replace_M = matrix_dim.replace('M', str(M))
replace_k = replace_M.replace('k', str(k))
row, col = replace_k.split('_')
return row, col | e5d27349c0cf9db28841f4bdd6e606257ecd9ca5 | 34,174 |
import binascii
def number2bin(number, size):
"""Convert a number to a byte-string."""
# If the number is > than maxSize, number = maxSize
if number > 2 ** (8 * size):
number = (2 ** (8 * size)) - 1
strval = hex(number)[2:]
diff = size * 2 - len(strval)
if diff > 0:
strval = ("0" * diff) + strval
newval = ""
for i in range(len(strval)):
if i % 2 == 0:
newval = strval[i : i + 2] + newval
return binascii.a2b_hex(newval) | a957ebe40094e1f361643c8776a8ad478496bfb2 | 34,175 |
def check_bounds(value):
"""Check if the given value is out-of-bounds (0 to 4095)
:args: the value to be checked
:returns: the checked value
"""
try:
if int(value) > 4095:
return 4095
elif int(value) < 0:
return 0
else:
return value
except ValueError:
print("Value was not a number, returned default value of 0")
return 0 | 53e5d9f7196f3e33328d3182a6395277ac42b7e0 | 34,176 |
from typing import List
import glob
def get_html_files(src: str) -> List[str]:
"""Get all the HTML file names in the source directory"""
_html_files = glob.glob(f"{src}/*.html")
return _html_files | 7c8f48166f28eb51dcc6d31ed6bdd9ca25b7218c | 34,177 |
def RGBDimtoRGB(R, G, B, Dim):
""" convert RGBDim to RGB color
:warning: When Dim is 0, no more color component information encoded
:warning: Prefer RGBDimtoHSV function
:param R: red value (0;255)
:param G: green value (0;255)
:param B: blue value (0;255)
:param Dim: brightness value (0.0;100) -> can be specified over 100% if needed (beware of saturation)
:return: RGB tuple (0;255) """
bright = Dim / 100.0
return tuple(int(i * bright) for i in (R, G, B)) | c1996c8c27a445f623ef5566497318bf90cd3c97 | 34,179 |
def _format_datetime_for_js(stamp):
"""Formats time stamp for Javascript."""
if not stamp:
return None
return stamp.strftime("%Y-%m-%d %H:%M:%S %Z") | a305c9c64f1ec9de0bd99181f14dedebae8cf940 | 34,182 |
def constrain(value, min_value, max_value):
"""
Constrains the `value` to the specified range `[min_value, max_value]`
Parameters
----------
`value` : The value to be constrained
`min_value` : The lower limit of range (inclusive)
`max_value` : The upper limit of range (inclusive)
Returns
-------
`min_value` : if `value` is less than `min_value`
`max_value` : if `value` is greater than `max_value`
`value` : otherwise
Examples
--------
>>> constrain(10, 20, 40)
20
>>> constrain(30, 20, 40)
30
>>> constrain(50, 20, 40)
40
"""
return min(max(value, min_value), max_value) | 230cb07a564f78c75ebe7978e6c3877722efd34a | 34,183 |
import torch
def _find_lengths(messages):
"""
>>> messages = torch.tensor([[1, 1, 0, 0, 0, 1], [1, 1, 1, 10, 100500, 5]])
>>> lengths = _find_lengths(messages)
>>> lengths
tensor([3, 6])
"""
positions = []
for i in range(messages.size(0)):
zero_pos = (messages[i, :] == 0).nonzero()
if zero_pos.size(0) > 0:
position = zero_pos[0].item() + 1
else:
position = messages.size(1)
positions.append(position)
return torch.Tensor(positions).long().to(messages.device) | 9c392fe6c9331b50453ed7f693d7b3ec396636f3 | 34,184 |
def get_news(soup):
"""Gets bs object, returns list of bs object representing the articles in
the news section of the webpage"""
results = soup.find_all("article", class_="clanek col-12 mb-3 mt-3")
return results | c6b55f49f468fd3cf93895cac13bd7a8683de2b9 | 34,185 |
def execSQL(conn,sql):
"""
If necessary, user must invoke conn.commit().
Do *NOT* violate that API here without considering
the existing callers of this function.
"""
cursor=conn.cursor()
cursor.execute(sql)
return cursor | 4353dd7e407d0560791fcc3c4e9dd8c077864397 | 34,186 |
import math
def get_sample_batches(samples_file, num_batches):
"""Return a list of sample batches.
This returns a list containing num_batches lists of sample URIs.
"""
all_lines = []
with open(samples_file, 'r') as f:
for line in f:
line = line.strip()
if len(line) > 0:
all_lines.append(line)
num_samples = len(all_lines)
samples_per_batch = int(math.ceil(num_samples / float(num_batches)))
batches = [all_lines[i: i + samples_per_batch] for i in
range(0, num_samples, samples_per_batch)]
return batches | b7f01ef09ba49a3a7b176056803874a825e548d0 | 34,187 |
import pickle
def load_ranks_from_file(filename):
"""
Given a pickled rank file, returns ranks and inverse_ranks.
"""
with open(filename, 'rb') as f:
content = pickle.load(f)
return content['ranks'], content['inverse_ranks'] | ff6823241c16bcc5f51dc788366291e47a9c1142 | 34,188 |
import json
def global_extent(tiles):
"""
Compute the global raster extent from a list of tiles
Args:
tiles: list of config files loaded from json files
Returns:
(min_x,max_x,min_y,max_y) tuple
"""
min_x = None
max_x = None
min_y = None
max_y = None
# First loop is to compute global extent
for tile in tiles:
with open(tile, "r") as file:
tile_cfg = json.load(file)
size_x = tile_cfg["roi"]["x"]
size_y = tile_cfg["roi"]["y"]
size_w = tile_cfg["roi"]["w"]
size_h = tile_cfg["roi"]["h"]
if min_x is None or size_x < min_x:
min_x = size_x
if min_y is None or size_y < min_y:
min_y = size_y
if max_x is None or size_x + size_w > max_x:
max_x = size_x + size_w
if max_y is None or size_y + size_h > max_y:
max_y = size_y + size_h
return (min_x, max_x, min_y, max_y) | 81e16f22fc1fce75b9583c5037a10d19851097ec | 34,189 |
import yaml
def read_yaml_data(yaml_file):
"""
read yaml file.
Args:
yaml_file: yaml file
Returns:
dict: yaml content of the yaml file
"""
with open(yaml_file, 'r', encoding="utf-8") as file:
file_data = file.read()
data = yaml.safe_load(file_data)
return data | 225c3caa8a8060eb3eab7af3b2e556dffb4c1a6f | 34,190 |
def get_unique_peptides(peptides:list) -> list:
"""
Function to return unique elements from list.
Args:
peptides (list of str): peptide list.
Returns:
list (of str): list of peptides (unique).
"""
return list(set(peptides)) | 15ad1a70c0bf4269ce2f6d08032b384080e7c655 | 34,191 |
import re
def text_prepare(text):
"""
Performs tokenization and simple preprocessing.
"""
# replace_by_space_re = re.compile('[/(){}\[\]\|@,;]')
# bad_symbols_re = re.compile('[^0-9a-z #+_]')
# stopwords_set = set(stopwords.words('english'))
# tags_re = re.compile('<[^>]*>')
# arrow_re = re.compile('[<>]')
# text = text.lower()
# text = replace_by_space_re.sub(' ', text)
# text = bad_symbols_re.sub('', text)
# text = tags_re.sub('', text)
# text = arrow_re.sub('', text)
# text = ' '.join([x for x in text.split() if x and x not in stopwords_set])
GOOD_SYMBOLS_RE = re.compile('[^0-9a-z ]')
REPLACE_BY_SPACE_RE = re.compile('[/(){}\[\]\|@,;#+_]')
REPLACE_SEVERAL_SPACES = re.compile('\s+')
text = text.lower()
text = REPLACE_BY_SPACE_RE.sub(' ', text)
text = GOOD_SYMBOLS_RE.sub('', text)
text = REPLACE_SEVERAL_SPACES.sub(' ', text)
return text.strip() | 45bd9514fbb1472c5e65a87f9ba783b1416c00c0 | 34,192 |
def is_mutation(word, genes):
"""
Checks whether a word is a mutation or not. This method assumes a mutation is not a gene and at
least one of the next conditions:
- has a _ character
- has digits and 2 or more upper case letters
- has 3 digits and at least 1 upper case letter
- has at least 1 digits, at least 1 upper case letters and at least 1 symbols
- has at least 1 digits and at least 1 lower case letter
- has a _ character and 2 or more upper case letters
- has lower case letters and 2 or more upper case leters
:param str word: The word to check
:param List[str] genes: The list of genes
:return bool: True if the word is mutation False otherwise
"""
word = word.strip()
if len(word) >= 3 and word not in genes:
has_hyphen_minus = '_' in word
has_hyphen = '-' in word
has_digits = any(ch.isdigit() for ch in word)
has_three_digits = sum(1 for ch in word if ch.isdigit()) > 2
has_upper_case = any(ch.isupper() for ch in word)
has_two_upper_case = sum(1 for ch in word if ch.isupper()) > 1
has_lower_case = any(ch.islower() for ch in word)
has_symbols = any(not ch.isalnum() for ch in word)
return has_hyphen_minus or \
(has_digits and has_two_upper_case) or \
(has_three_digits and has_upper_case) or \
(has_digits and has_upper_case and has_symbols) or \
(has_digits and has_lower_case) or \
(has_hyphen and has_two_upper_case) or \
(has_lower_case and has_two_upper_case)
return False | 7d8ab18e7a733b7f1dff718953cc6499eb3839c9 | 34,193 |
import logging
def get_log_level(ll):
"""Get the logging level from the logging module. Defaults to ERROR if not found.
:param ll: `str` The log level wanted.
:returns: `int` The logging level.
"""
return getattr(logging, ll.upper(), logging.ERROR) | 6fbb3ab5cdbef507faa883121a49a12529008441 | 34,195 |
def date_parser(dates):
"""Date Parser - changes dates to date format 'yyyy-mm-dd'
Parameters
----------
dates: list of dates and times strings in the format 'yyyy-mm-dd hh:mm:ss'
Returns
-------
list:
A list of only the dates in 'yyyy-mm-dd' format.
"""
data_format = [] #Empty list.
for i in dates: #Iterate over elements of dates.
x = i.split(" ")[0] #Split the strings in dates to get rid of the times.
data_format.append(x) #append splitted strings to the empty list 'data_format'
return data_format #Return the new list 'data_format'. | 20a91662129edc38a3bd36ea4f39cd4dcb966ab3 | 34,196 |
def intfloat(x):
"""int . float (for convenience in command-line specification)"""
return int(float(x)) | 9b75d32e209f30415d826ac7ea2e59f7a162cad7 | 34,198 |
def create_health_check(lock):
"""
Return health check function that captures whether the lock is acquired
"""
def health_check():
d = lock.is_acquired()
return d.addCallback(lambda b: (True, {"has_lock": b}))
return health_check | fc77f8c42f271fd98051f91771d99ccc8aec7a9e | 34,199 |
def check_batch_state(state, max_len, pad_token):
"""Check batch of states and left pad or trim if necessary.
Args:
state (list): list of of L decoder states (B, ?, dec_dim)
max_len (int): maximum length authorized
pad_token (int): padding token id
Returns:
final (list): list of L decoder states (B, pred_len, dec_dim)
"""
final_dims = (len(state), max_len, state[0].size(1))
final = state[0].data.new(*final_dims).fill_(pad_token)
for i, s in enumerate(state):
curr_len = s.size(0)
if curr_len < max_len:
final[i, (max_len - curr_len) : max_len, :] = s
else:
final[i, :, :] = s[(curr_len - max_len) :, :]
return final | c4b4f16fe71daa61ac6afba4935ae94b51a3516d | 34,200 |
def get_clicked_pos(pos, rows, width):
"""
Identify spot which was clicked on.
Basically we need to find the position of mouse on the screen
in the terms of our grid.
:param pos: position of click from pygame
:param rows:
:param width:
:return:
"""
gap = width // rows
y, x = pos
row = y // gap
col = x // gap
return row, col | def409071e95b5a48f287be6d3c7d4e2b7c4adf3 | 34,201 |
def stringTransform(mathData):
"""Transform data back into string form with ',' delimiter"""
stringData = []
for i in range(0, len(mathData)):
line = ""
for j in range(0, len(mathData[i])):
line += str(mathData[i][j]) + "," # Add each float term to the string with the delimiter
line = line[:-1] + "\n" # Remove extra ',' at the end and add newline
stringData.append(line)
return stringData
# End stringTransform() | b690144860bf1eaf010f78ea5bf9ce57a9b741e1 | 34,202 |
def httpPost(url, *args):
"""Retrieves the document at the given URL using the HTTP POST
protocol.
If a parameter dictionary argument is specified, the entries in the
dictionary will encoded in "application/x-www-form-urlencoded"
format, and then posted. You can post arbitrary data as well, but
you'll need to specify the MIME type. The document is then returned
as a string.
Args:
url (str): The URL to post to.
*args: Variable length argument list.
Returns:
str: The content returned for the POST operation.
"""
print(url, args)
return "" | 1c8e2985869a962530dcde39c74129f457c35e54 | 34,203 |
def target_label_split(label):
"""
Split a target label into a tuple of it's parts: (fsname, target type, index)
"""
a = label.rsplit("-", 1)
if len(a) == 1:
# MGS
return (None, a[0][0:3], None)
return (a[0], a[1][0:3], int(a[1][3:], 16)) | 58e7333f555712f3a1afffe3d31e35db4bb8c40b | 34,204 |
def _ResolveMsg(msg_type, msg_map):
"""Fully resolve a message type to a name."""
if msg_type in msg_map:
return msg_map[msg_type]
else:
return '[Unknown message %d (0x%x)]x' % (msg_type, msg_type) | b8bd2b9aa29bbb88ee6f70038f5f97cf2bea908e | 34,205 |
def round_digits(
v: float, num_digits: int = 2, use_thousands_separator: bool = False
) -> str:
"""
Round digit returning a string representing the formatted number.
:param v: value to convert
:param num_digits: number of digits to represent v on
None is (Default value = 2)
:param use_thousands_separator: use "," to separate thousands (Default value = False)
:returns: str with formatted value
"""
if (num_digits is not None) and isinstance(v, float):
fmt = "%0." + str(num_digits) + "f"
res = float(fmt % v)
else:
res = v
if use_thousands_separator:
res = "{0:,}".format(res) # type: ignore
res_as_str = str(res)
return res_as_str | 38873d5dd6d1cee6cf48b9ff2f046e59007822fa | 34,206 |
from typing import Sequence
from typing import Hashable
def filter_dict(src: dict, keys_to_filter: Sequence[Hashable]) -> dict:
"""
Filters dictionary by keys_to_filter set.
Parameters
----------
src: dict
Source dictionary.
keys_to_filter: Sequence[Hashable]
Set of keys that should be in the final dictionary.
Returns
-------
dict
Filtered source dictionary.
"""
return {key: value for key, value in src.items() if key in keys_to_filter} | 92ee67c92e07b110122a2adb0b0084991e978415 | 34,208 |
def oct_to_decimal(oct_string: str) -> int:
"""
Convert a octal value to its decimal equivalent
>>> oct_to_decimal("12")
10
>>> oct_to_decimal(" 12 ")
10
>>> oct_to_decimal("-45")
-37
>>> oct_to_decimal("2-0Fm")
Traceback (most recent call last):
...
ValueError: Non-octal value was passed to the function
>>> oct_to_decimal("")
Traceback (most recent call last):
...
ValueError: Empty string was passed to the function
>>> oct_to_decimal("19")
Traceback (most recent call last):
...
ValueError: Non-octal value was passed to the function
"""
oct_string = str(oct_string).strip()
if not oct_string:
raise ValueError("Empty string was passed to the function")
is_negative = oct_string[0] == "-"
if is_negative:
oct_string = oct_string[1:]
if not oct_string.isdigit() or not all(0 <= int(char) <= 7 for char in oct_string):
raise ValueError("Non-octal value was passed to the function")
decimal_number = 0
for char in oct_string:
decimal_number = 8 * decimal_number + int(char)
if is_negative:
decimal_number = -decimal_number
return decimal_number | ef933850d533c499b126d024d1cae1f86944248e | 34,209 |
def crop_image_nparts(input_image, n_parts_x, n_parts_y=None):
"""
Divide an image into n_parts_x*n_parts_y equal smaller sub-images.
"""
# if n_parts_y not specified, assume we want equal x,y
if not n_parts_y:
n_parts_y = n_parts_x
xsize, ysize = input_image.size
x_sub = int(xsize / n_parts_x)
y_sub = int(ysize / n_parts_y)
sub_images = []
for ix in range(n_parts_x):
for iy in range(n_parts_y):
box = (ix * x_sub, iy * y_sub, (ix + 1) * x_sub, (iy + 1) * y_sub)
region = input_image.crop(box)
sub_images.append(region)
return sub_images | f12e4f1ee85411f1f2b38042d47894f3d8dafda6 | 34,210 |
def _remap_constraint(name, con_table, fk_statement, table):
""" Produce ALTER TABLE ... statements for each constraint."""
alterations = ['''
ALTER TABLE {_table} DROP CONSTRAINT {conname}
'''.format(_table=con_table, conname=name)]
# Constraint applies to the table we're replacing
if con_table == table:
alterations.append('''
ALTER TABLE {table} ADD {stmnt}
'''.format(table=con_table, stmnt=fk_statement))
# Constraint references the table we're replacing. Point it at the new
# one.
else:
tokens = fk_statement.split(' ')
# Point the constraint to the new table.
reference_idx = tokens.index('REFERENCES') + 1
table_reference = tokens[reference_idx]
match_old_ref = '{}('.format(table)
new_ref = 'temp_import_{}('.format(table)
new_reference = table_reference.replace(match_old_ref, new_ref)
tokens[reference_idx] = new_reference
con_definition = ' '.join(tokens)
create_constraint = '''
ALTER TABLE {table} ADD {definition}
'''.format(table=con_table, definition=con_definition)
alterations.append(create_constraint)
return alterations | d12cbe0518410a144d1647691f30a04f78f17661 | 34,212 |
from datetime import datetime
import os
def timestamp_file(file_loc: str) -> str:
"""Adds a date timestamp to the filename.
:param file_loc: location of file to write to
:type file_loc: str, bool
:return: the file location with the timestamp appended to the end of the file
:rtype: str
"""
now = datetime.now()
date_time = now.strftime("_%Y-%m-%d-%H-%M-%S")
file, extension = os.path.splitext(file_loc)
return file + date_time + extension | 38e3cbc8836f690ae972eca6b1695eabd1dbc741 | 34,213 |
from typing import Union
from typing import Sequence
def format_as_iter(vals: Union[int, Sequence[int]], iters_per_epoch: int, time_scale: str):
"""Format data to be at iteration time scale.
If values are negative, they correspond to the opposite time scale.
For example if `time_scale="epoch"` and `val=-1`, `val` corresponds to
1 iteration.
Args:
vals (`int(s)`): Values to format.
iters_per_epoch (int): Number of iterations per epoch.
time_scale (str): Time scale of current values.
Returns:
int: Time (positive int) formatted in iteration format
"""
assert time_scale in ["epoch", "iter"]
single_value = not isinstance(vals, Sequence)
if single_value:
vals = [vals]
epoch_convention = (time_scale == "epoch" and all(x >= 0 for x in vals)) or (
time_scale == "iter" and all(x <= 0 for x in vals)
)
if epoch_convention:
vals = type(vals)([iters_per_epoch * abs(x) for x in vals])
else:
vals = type(vals)([abs(x) for x in vals])
if single_value:
return vals[0]
else:
return vals | f9971ad6227b497834e4667f101a56c49b71507e | 34,216 |
import copy
import operator
def script_generator(sentences_dict, diphone_wishlist, number_of_sentence, addition_args):
"""
Generates a script with the most phonetically rich sentences. Rarer diphones are prioritised over more common ones.
To ensure that very uncommon diphones don't skew the script too much, an addition factor is used to
'artificially' change the diphone frequency after a sentence has been added to the script containing that diphone.
:param sentences_dict: Sentence keys with sentences as values
:param sentence_diphone_dict: Sentence keys with diphones of each sentence as values
:param diphone_wishlist: Diphone wishlist with diphone as key and number of time it occurs in the sentences as value
:param number_of_sentence: The number of sentences the script should contain
:return: Dictionary with 'best' sentences for the script
"""
if addition_args is None:
addition_factor = 10
else:
addition_factor = addition_args[0]
diphone_inventory = copy.deepcopy(diphone_wishlist)
dict_final = {}
for n in range(number_of_sentence): # need to ensure the script doesn't fail when this range is higher
# than sentence_diphone_dict.items()
score_dict = {}
discores = {}
for k, v in sentences_dict.items():
discores[k] = []
score = 0
for i, diphone in enumerate(v[0]):
try:
score += 1 / diphone_wishlist[diphone]
individual_score = 1 / diphone_wishlist[diphone]
discores[k].extend([diphone, individual_score])
except:
pass
score2 = score / len(v)
score_dict[k] = score2
best = max(score_dict.items(), key=operator.itemgetter(1))[0] # finding max value in dict
best_sentence = sentences_dict[best][1] # actual sentence
print('phones left: ', len(diphone_inventory))
for diphone in sentences_dict[best][0]:
try:
diphone_wishlist[diphone] = diphone_wishlist[diphone] + addition_factor # used to find a flatter distribution
diphone_inventory.pop(diphone)
except:
pass
print('Sentence', n+1, '({} diphones)'.format(len(sentences_dict[best][0])))
dict_final['z001_' + str(n).zfill(4)] = best_sentence
sentences_dict.pop(best)
return dict_final | 532692a8aae33bcf328d3db6a067622f7e91c8fe | 34,217 |
import math
def deg2rad(degrees):
"""Degrees to radians
"""
radians = math.pi * degrees/180.0
return radians | ffb796655e1181310d222297071187c2865b1d24 | 34,219 |
def fire(obj, name, *args, **kwargs):
"""
Arrange for `func(*args, **kwargs)` to be invoked for every function
registered for the named signal on `obj`.
"""
signals = vars(obj).get('_signals', {})
return [func(*args, **kwargs) for func in signals.get(name, ())] | c8394f58f910126e6ed23596db20eebd31d78629 | 34,221 |
def get_value(config, key):
"""Get value from (possibly nested) configuration dictionary by using a
single string key as used for the commandline interface (list of keys
delimited by '-' or '_').
"""
keys = key.replace('-', '_').split('_')
for key in keys[:-1]:
config = config[key]
return config[keys[-1]] | d0ef4c16cb3fb202956c0ab14d8ade843d68d7cc | 34,222 |
import re
def check_domain_valid(submission):
"""
Check if a submission is a valid domain.
:param submission: The submission to be checked
:return: True if 'submission' is a valid domain, otherwise False
"""
return re.match("^([A-Za-z0-9-]+(?:\\.[A-Za-z0-9-]+)*(?:\\.[A-Za-z]{2,}))$", submission) is not None | c6f011242cad39d187cbc723f006ae0cb3087dea | 34,223 |
def compute_chi_eff(m1,m2,s1,s2):
""" Compute chi effective spin parameter (for a given component)
--------
m1 = primary mass component [solar masses]
m2 = secondary mass component [solar masses]
s1 = primary spin z-component [dimensionless]
s2 = secondary spin z-component [dimensionless]
"""
return (m1*s1+m2*s2)/(m1+m2) | ee7d619c282a2d48a68651b896024c3c2e3b4d72 | 34,224 |
import uuid
def create_token():
"""Generates a 32 character unique identifier
Returns:
str: unique identifier
"""
return uuid.uuid4().hex | 4573d5135032038096a1bd10765138e00eb185bf | 34,225 |
import subprocess
def bedtools_version(bedtools_exe="bedtools"):
"""
Returns the version number for bedtools
"""
bedtools_version = subprocess.check_output([bedtools_exe,
'--version'])
return bedtools_version.decode().strip() | 93dbb23583c6dc0d589ae81ed240117561e6c3bd | 34,226 |
def appropriate_partition(distance):
"""Find appropriate partition of a distance into parts.
Parameters
----------
distance : float
Traveled distance in meters
Returns
-------
segment_distance : float
Appropriate length of segments in which we split the total distance
"""
if distance < 5000:
return 400
elif distance < 20000:
return 1000
elif distance < 40000:
return 2000
elif distance < 100000:
return 5000
else:
return 10000 | 35d7888166ea416d470e106fbbfc3e07af6671e1 | 34,227 |
import itertools
def all_subsets(A, strict=True, null=True):
"""
Return all subsets of A (list/tuple/etc). If strict is False, result
includes A, if null is True, includes ().
"""
n = len(A)
min = 0 if null else 1
max = n if strict else n + 1
out = []
for m in range(min, max):
out.append(itertools.combinations(A, m))
return itertools.chain(*out) | c26e035d6d1b38c6a6ac153ee883566d8ed513fb | 34,228 |
def AV_to_EBpmRp(A_V):
"""
Convert A_V to E(Bp-Rp). NOTE: assumes A_V has been "corrected" for
distance, galactic latitude, etc. So if you pull A_V from a total
extinction in a LOS map (e.g., SF98) that doesn't do this correction,
you'll get wrong answers.
"""
# E(B-V) = A_V/R_V
R_V = 3.1
E_BmV = A_V/R_V
# Stassun+2019 calibration
E_BpmRp = 1.31*E_BmV
return E_BpmRp | 29518fede4067929aefcfbc118b1dd63030cc78a | 34,229 |
def filter_repos_by_stars(repos, min_stars=10):
""" filter """
return [repo for repo in repos if repo.get('stargazers_count')>min_stars] | 339e094275d54f4ec530aea080cbea493f25112d | 34,230 |
from typing import Mapping
from typing import Tuple
def mapping_sort_key(prediction: Mapping[str, str]) -> Tuple[str, ...]:
"""Return a tuple for sorting mapping dictionaries."""
return (
prediction["source prefix"],
prediction["source identifier"],
prediction["relation"],
prediction["target prefix"],
prediction["target identifier"],
prediction["type"],
prediction["source"],
) | ce506f536ea2321f7c67b2703cbf09fde9d13275 | 34,232 |
import base64
import hashlib
def compute_md5_for_string(string):
"""Compute MD5 digest over some string payload"""
return base64.b64encode(hashlib.md5(
string.encode('utf-8')).digest()).decode('utf-8') | 6f4af502ec5a14551ba5a22cd7df048ccd7fcc8a | 34,233 |
def get_inverse_mat(matrix):
"""
:param matrix: could be a square or non-square matrix
:return: pseudo inverse of the matrix
"""
return matrix.pinv() | 1d1bc293b3a108596f182439a179ef7842fcab5a | 34,234 |
def calc_portfolio_cov(df):
"""
Calculate portfolio covariance
:param df: Daily prices of portfolio of stocks
:return: Covariance of the portfolio, adjusted for weighting
"""
df = df.pct_change()
return (df.cov()) | 1d4e6a3baa714c4ed8f07e253807d686c2cd7dec | 34,235 |
def fits_identify(origin, *args, **kwargs):
"""Check whether given filename is FITS."""
return (isinstance(args[0], str) and
args[0].lower().split('.')[-1] in ['fits', 'fit']) | 10257681795a4f0979caf33775e700b1666e156a | 34,236 |
def copy_column(column, schema):
"""
Safely create a copy of a column.
"""
return column.copy(schema=schema) | 1842db07939a3d2aee7923e72230819afacd7751 | 34,238 |
def available_tensors(graph):
"""All tensors in the model's graph."""
tensors = []
ops = graph.get_operations()
for m in ops:
for tensor in m.outputs:
tensors.append((tensor.name, m.type, tensor.shape.as_list()))
return tensors | 38387cf7ff8c772191037bc04ee8efa5b49ffdfa | 34,244 |
def expected_args(*args):
"""
Decorator to apply args expected to be extracted
"""
def _wrapper(func):
func.expected_args = list(args)
return func
return _wrapper | 33b60e01f25ac840ef3b02dd3a58bc0082789ca4 | 34,245 |
def calculate_input_voltage(excitation, Rdc, nominal_impedance):
"""Simplify electrical input definition to input voltage."""
val, type = excitation
if type == "Wn":
input_voltage = (val * nominal_impedance) ** 0.5
elif type == "W":
input_voltage = (val * Rdc) ** 0.5
elif type == "V":
input_voltage = val
else:
print("Input options are [float, ""V""], \
[float, ""W""], [float, ""Wn"", float]")
return(None)
return(input_voltage) | a11b8eef8f3f5c7218eeb7f7e7f868033e976ece | 34,246 |
def _unembed(tree_list):
"""Unembeds (or deletes) extra spaces at the end of the strings."""
unembedded = []
for line in tree_list:
unembedded.append(line.rstrip())
return unembedded | c692f2f440b88a2a079b48f7ee1928f529136a15 | 34,247 |
import os
import jinja2
def render(tpl_path, context):
"""render haconfig from template
Parameters
-----------------
tpl_path : str
location of the template file to render
context : dict
information to include in the template i.e proxy details
Returns
----------------
str
rendered haproxy config as string
"""
path, filename = os.path.split(tpl_path)
template = jinja2.Environment(
loader=jinja2.FileSystemLoader(path or './'), autoescape=True
).get_template(filename)
return template.render(context) | 1be07fac841ddc53663b23468edf8d1bfe5e0880 | 34,248 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.