content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def strip_yaml(text):
"""
strip starging yaml, between first --- and next --- from text
:param text:
:return:
"""
if text[:3] != '---':
return text
else:
stext = text.split('\n')
stext.pop(0)
n = 0
for ln in stext:
if ln != '---':
n += 1
else:
n += 1
return '\n'.join(stext[n:]) | 288ed60333b90acb6387746760415aa281fb2dd0 | 46,264 |
def unique_test_name(request):
"""Generate unique test names by prepending the class to the method name"""
if request.node.cls is not None:
return request.node.cls.__name__ + "__" + request.node.name
else:
return request.node.name | c7d0f732ad69c5a1e528cf7f2a64789c3080f215 | 46,266 |
def accidental2string(acc_number):
"""Return a string repr of accidentals."""
if acc_number is None:
return ""
elif acc_number > 0:
return "#" * int(acc_number)
elif acc_number < 0:
return "b" * int(abs(acc_number))
else:
return "n" | 622b0952ba9c9bcc2428761da8b44dbef991911c | 46,268 |
import copy
def put_on_top_list(top_elements_list, raw_list, forceinsertion=False):
"""
modify and return raw_list with elements of top_elements_list as first elements
forceinsertion: True insert anyway elements in top_elements_list not present in raw_list
"""
all_elements = copy.copy(raw_list)
chosen_elements = top_elements_list
for chosen_element in chosen_elements[::-1]:
if chosen_element in all_elements:
index = all_elements.index(chosen_element)
ce = all_elements.pop(index)
all_elements.insert(0, ce)
elif forceinsertion:
all_elements.insert(0, chosen_element)
return all_elements | 7f76279f198076a60705bfb8919d78131dd78c0d | 46,269 |
def _IsRemoteBetter(new_name, old_name):
"""Indicates if a new remote is better than an old one, based on remote name.
Names are ranked as follows: If either name is "origin", it is considered
best, otherwise the name that comes last alphabetically is considered best.
The alphabetical ordering is arbitrary, but it was chosen because it is
stable. We prefer "origin" because it is the standard name for the origin
of cloned repos.
Args:
new_name: The name to be evaluated.
old_name: The name to compare against.
Returns:
True iff new_name should replace old_name.
"""
if not new_name or old_name == 'origin':
return False
if not old_name or new_name == 'origin':
return True
return new_name > old_name | 49ca158c596a173b881db169fe30531d8a77b9ae | 46,271 |
def dev(default):
"""Function which is used by entry points for getting settings for dev environment.
:param default: it is dependency which will be calculated in moment of getting settings from this env.
"""
default['ANSWER'] = 42
return default | de37c723efb4f70a5de62ca59e433c26ec57ff69 | 46,272 |
def calculate_amount(amount, currency="GTQ", exchange_rate=1):
"""
Calcula el monto a cobrar segun el monto de ventas
"""
SEMILLA = 0.01 # 1%
EMPRESA = 0.000133335 # 0.0133335%
ESCALADA = 0.02 # 2%
ESTABLECIMIENTO = 4000
total_due = 0
if amount <= 25000:
total_due = amount * SEMILLA
if amount > 25000 and amount <= 100000:
amt = amount * (SEMILLA + ((amount/1000) * EMPRESA))
# Si el monto sobre pasa los 2000 se retorna 2000
total_due = amt if amt <= 2000 else 2000
if amount > 100000 and amount <= 200000:
total_due = amount * ESCALADA
if amount > 200000:
total_due = ESTABLECIMIENTO
return total_due | cad03ad8a33348cb70cf1b4d1390cf04588d5888 | 46,274 |
def vac_to_air_sdss(vac):
""" converts air wavelength [AA] into vacuum wavelengths. Tested. """
return vac / (1.0 + 2.735182*10**-4 + 131.4182 / vac**2 + 2.76249*10**8 / vac**4) | ff7e7899236ee9532808ab62b67d036e5546d08c | 46,275 |
from typing import Tuple
def get_chunk_type(tok: str) -> Tuple[str, str]:
"""
Args:
tok: Label in IOB format
Returns:
tuple: ("B", "DRUG")
"""
tag_class = tok.split('-')[0]
tag_type = tok.split('-')[-1]
return tag_class, tag_type | 67a112c4f3f5c3d9594572a4f28ad83dcb53404a | 46,276 |
import re
def replace(regex_pattern: str, replacement: str, input_string: str, *, ignore_case: bool = True, **kwargs):
"""."""
if ignore_case:
if 'flags' not in kwargs:
kwargs['flags'] = re.IGNORECASE
else:
kwargs['flags'] = kwargs["flags"] | re.IGNORECASE
return re.sub(regex_pattern, replacement, input_string, **kwargs)
else:
return re.sub(regex_pattern, replacement, input_string, **kwargs) | db47d733f376c51f7204b7b7a6601396083e3f68 | 46,277 |
def check_dist(dist, filename):
"""
Verifies that the JS code has been bundled correctly
"""
# Support the dev bundle.
if filename.endswith("dev.js"):
return True
return any(
filename in x
for d in dist
for x in (
[d.get("relative_package_path")]
if not isinstance(d.get("relative_package_path"), list)
else d.get("relative_package_path")
)
) | 90954aec57f030169cc7ab54ca71b127d39efe11 | 46,278 |
import logging
def get_default_logger():
"""
Get the default logging instance
:return:
"""
return logging.getLogger("pynonymizer") | 7e5ddf9592430271c3ab4f47fbbac7f15d181782 | 46,280 |
def subtract_arrays(array, subtract):
"""
Subtract an array from another array.
Args:
array (list):
subtract (list):
Returns:
list: List
"""
return [item for item in array if item not in subtract] | 15ae6c4ab266255514bc058676dca093c20101e4 | 46,281 |
import asyncio
async def future_wrapper(traits_future):
"""
Wrap a Traits Futures future as a schedulable coroutine.
"""
def set_result(event):
traits_future = event.object
asyncio_future.set_result(traits_future.result)
# Once we can assume a minimum Python version of 3.7, this should
# be changed to use get_running_event_loop instead of get_event_loop.
asyncio_future = asyncio.get_event_loop().create_future()
traits_future.observe(set_result, "done")
return await asyncio_future | 93edbc809a9d0f2c8c9cccac330f0590e68e7578 | 46,282 |
from typing import Tuple
def validate_number_of_replace_rule(strings_before_change: list, strings_after_change: list) -> Tuple[bool, str]:
"""
置換規則が正しく記述されているか確認する関数
置換前の文字列と置換後の文字列の数を確認する。
Parameters
----------
strings_before_change: list
strings_after_change: list
Returns
-------
bool, error_msg: tuple[bool, str]
"""
if len(strings_before_change) == len(strings_after_change):
return True, ""
else:
return False, "【エラー】置換前の文字列と置換後の文字列の数が合いません。" | 32cbbcd2347ea52c64f12322d61b410c6cf322c0 | 46,283 |
def boll(kline):
"""计算 BOLL 指标
:param kline: pd.DataFrame
K线,columns = ["symbol", "dt", "open", "close", "high", "low", "vol"]
:return: pd.DataFrame
在原始数据中新增 BOLL 指标结果
"""
kline['boll-mid'] = kline['close'].rolling(26).mean()
kline['boll-tmp2'] = kline['close'].rolling(20).std()
kline['boll-top'] = kline['boll-mid'] + 2 * kline['boll-tmp2']
kline['boll-bottom'] = kline['boll-mid'] - 2 * kline['boll-tmp2']
kline['boll-mid'] = kline['boll-mid'].apply(round, args=(2,))
kline['boll-tmp2'] = kline['boll-tmp2'].apply(round, args=(2,))
kline['boll-top'] = kline['boll-top'].apply(round, args=(2,))
kline['boll-bottom'] = kline['boll-bottom'].apply(round, args=(2,))
return kline | 34e0281d455877bfa6c616c05c41964169a6bb2f | 46,284 |
import time
def show_time(f):
"""
декоратор, замеряющий время исполнения декорируемой функции
:param f:
:return: возвращает обертку
"""
def wrapper():
start_time = time.time()
f()
end_time = time.time()
print(end_time - start_time)
return wrapper | 1bfa8040557fd04443cc044b90185d3aebf65247 | 46,285 |
def el (name, content):
"""Write a XML element with given element tag name (incl. element attributes) and the element's inner content."""
return "<%s>%s</%s>" % (name, content, name.partition(" ")[0]) | 8100779aa7935eb3ca0ea0c720508c1728c1874d | 46,287 |
def lbs_to_kg(lbs):
"""
Convert kilograms to pounds
"""
return lbs / 2.204623 | 5184f1d79d196074fcc2c0a03ffeddfd43468c33 | 46,288 |
import math
def add_radians(a, b):
"""Adds two radian angles, and adjusts sign +/- to be closest to 0
Parameters:
:param float a: Angle a
:param float b: Angle b
Returns: The resulting angle in radians, with sign adjusted
:rtype: float
"""
return (a + b + math.pi) % (2 * math.pi) - math.pi | 43f33ba646899e2431fee4967335b8a724cad194 | 46,289 |
import requests
def api_response(args, api_uri, method, extend, header):
"""
Function that generate api response
:param args:
parsed args which includes address, username and password by Cisco FMC
:param api_uri:
path of call Cisco API
:param method:
HTTP method
:param extend:
enable or disable list of objects with additional attributes
:param header:
dictionary of request headers
:return:
api response
"""
url = "https://" + args.addr + api_uri
if extend:
url = url + "?expanded=true"
# response = requests.request(
# method,
# url,
# verify=False,
# headers=header,
# auth=HTTPBasicAuth(args.username, args.password)
# )
response = requests.request(
method,
url,
verify=False,
headers=header
)
return response | a035dc73efe0c730ffd2dae09cfaabd527675337 | 46,293 |
def isFuse(obj):
"""
Is obj a Part.MultiFuse or not.
"""
if obj.isDerivedFrom('Part::MultiFuse'):
return True | 2585245b68c0419d57b59d5e95b92443a2701315 | 46,294 |
def functionOnPython(message):
"""Simple function to illustrate the use of python
generated using Python Docstring Generator
Arguments:
message {String} -- Message that you want to print
Returns:
String -- Message to be printed
"""
message = message
return print(message) | 5b59449b6317a2bc2c4094766ad90f0ed25bf5b4 | 46,295 |
def str2int(string):
"""Convert input string into integer. If can not convert, return 0"""
try:
value = int(string)
except ValueError:
value = 0
return value | 20b370a9243b05240ffb39b968d139d8c67c7fea | 46,296 |
def tanh_backward(dout, cache):
"""
backward of tanh, dx = 1 - (tanh(x))^2
:param dout:
:param cache:
:return:
"""
tanh_x = cache
return (1 - tanh_x ** 2) * dout | 607d624c5a73538f472fdc49cf75d4513af96b7a | 46,298 |
import torch
def fft_rec_loss2(tgt, src, mask):
"""Calculate FFT loss between pair of images. The loss is masked by `mask`. FFT loss is calculated on GPU and, thus, very fast. We calculate phase and amplitude L1 losses.
Args:
tgt (torch.Tensor): Target image. Shape [B,C,H,W]
src (torch.Tensor): Source image. Shape [B,C,H,W]
mask (torch.Tensor): Boolean mask for valid points
Returns:
torch.Tensor: Scalar sum of phase and amplitude losses.
"""
# Apply 2D FFT on the last two dimensions, e.g., [H,W] channels.
fft_tgt = torch.fft.rfftn(
tgt, s=tgt.shape[-2:], dim=[-2, -1], norm="forward") # [B,C,H,W,2]
fft_src = torch.fft.rfftn(
src, s=tgt.shape[-2:], dim=[-2, -1], norm="forward") # [B,C,H,W,2]
# fft_diff = torch.fft.rfftn(tgt-src, s=tgt.shape[-2:], dim=[-2,-1], norm="ortho") # [B,C,H,W,2]
# fft_diff = fft_tgt - fft_src
# fft_diff = torch.view_as_real(fft_diff)
# mag_diff = fft_diff[...,0].abs().sum() #20*torch.log10(fft_diff[...,0]) # mag2db
# pha_diff = fft_diff[...,1].abs().sum()
# Convolution over pixels is FFT on frequencies.
# We may find a more clever way.
# fft_conv = fft_tgt*fft_src
# inv_fft_conv = torch.fft.irfftn(fft_conv, s=tgt.shape[-2:], dim=[-2,-1], norm="forward") # [B,C,H,W,2]
# mask_diff = fft_diff #fft_tgt-fft_src
# print(mask_diff.shape)
# print(mask.shape)
# mask_diff = mask_diff * mask
# l = 20*torch.log10(fft_diff.abs()) # mag2db 20*log10(abs(complex))
# Derivative for angle is not implemented yet.
# pha_diff = torch.abs(fft_tgt.angle() - fft_src.angle())
mag_diff = torch.abs(fft_tgt.abs() - fft_src.abs())
fft_tgt = torch.view_as_real(fft_tgt)
fft_src = torch.view_as_real(fft_src)
pha_tgt = torch.atan2(fft_tgt[..., 1], fft_tgt[..., 0])
pha_src = torch.atan2(fft_src[..., 1], fft_src[..., 0])
# mag_tgt = torch.sqrt(fft_tgt[...,1]**2 + fft_tgt[...,0]**2)
# mag_src = torch.sqrt(fft_src[...,1]**2 + fft_src[...,0]**2)
pha_diff = torch.abs(pha_tgt-pha_src)
# mag_diff = torch.abs(mag_tgt - mag_src)
# print(pha_diff.sum())
# print(mag_diff.sum())
l = 1e-4*mag_diff.sum() + pha_diff.sum()
return l | 5ab462edb99cab347bc7e57c66a604d10dd3e98f | 46,302 |
def all_messages():
"""
keep all messages in id
Returns:
all messages in JSON
"""
return \
{
"0": "Mesin Nettacker mulai ...\n\n",
"1": "python nettacker.py [options]",
"2": "Tampilkan Nettacker Help Menu",
"3": "Harap baca lisensi dan perjanjian https://github.com/viraintel/OWASP-Nettacker\n",
"4": "Mesin",
"5": "Pilihan masukan mesin",
"6": "pilih bahasa {0}",
"7": "scan semua IP di kisaran",
"8": "cari dan pindai subdomain",
"9": "nomor benang untuk koneksi ke host",
"10": "nomor benang untuk memindai host",
"11": "simpan semua log dalam file (results.txt, results.html, results.json)",
"12": "Target",
"13": "Targetkan pilihan masukan",
"14": "daftar target, terpisah dengan \",\"",
"15": "baca target dari file",
"16": "Pindai pilihan metode",
"17": "pilih metode pemindaian {0}",
"18": "pilih metode pemindaian untuk mengecualikan {0}",
"19": "daftar username (s), terpisah dengan \",\"",
"20": "baca username dari file",
"21": "daftar kata sandi, terpisah dengan \",\"",
"22": "baca kata sandi dari file",
"23": "daftar port, terpisah dengan \",\"",
"24": "baca kata sandi dari file",
"25": "waktu untuk tidur di antara setiap permintaan",
"26": "Tidak dapat menentukan target",
"27": "Tidak dapat menentukan target (s), tidak dapat membuka file: {0}",
"28": "lebih baik menggunakan nomor benang lebih rendah dari 100, BTW kita lanjutkan ...",
"29": "set timeout ke {0} detik, itu terlalu besar, bukan? dengan cara kita melanjutkan ...",
"30": "modul pemindaian ini [{0}] tidak ditemukan!",
"31": "modul pemindaian ini [{0}] tidak ditemukan!",
"32": "Anda tidak dapat mengecualikan semua metode pemindaian",
"33": "Anda tidak dapat mengecualikan semua metode pemindaian",
"34": "modul {0} yang Anda pilih untuk dikecualikan tidak ditemukan!",
"35": "masukkan metode input, contoh: \"ftp_brute_users =test,admin&ftp_brute_passwds="
"read_from_file: /tmp/pass.txt&ftp_brute_port=21\"",
"36": "tidak bisa membaca file {0}",
"37": "Tidak dapat menentukan nama pengguna (s), tidak dapat membuka file: {0}",
"38": "",
"39": "Tidak dapat menentukan kata kunci, tidak dapat membuka file: {0}",
"40": "file \"{0}\" tidak dapat ditulis!",
"41": "silahkan pilih metode scan anda!",
"42": "menghapus file temp!",
"43": "menyortir hasil!",
"44": "selesai!",
"45": "mulai menyerang {0}, {1} dari {2}",
"46": "modul ini \"{0}\" tidak tersedia",
"47": "Sayangnya versi software ini hanya bisa dijalankan di linux / osx / windows.",
"48": "Versi Python Anda tidak didukung!",
"49": "Lewati target duplikat (beberapa subdomain / domain mungkin memiliki IP dan Rentang yang sama)",
"50": "target sasaran yang tidak diketahui [{0}]",
"51": "memeriksa {0} rentang ...",
"52": "memeriksa {0} ...",
"53": "TUAN RUMAH",
"54": "NAMA PENGGUNA",
"55": "KATA SANDI",
"56": "PELABUHAN",
"57": "MENGETIK",
"58": "DESKRIPSI",
"59": "Tingkat mode verbose (0-5) (default 0)",
"60": "tampilkan versi software",
"61": "memeriksa pembaruan",
"62": "",
"63": "",
"64": "Coba lagi saat timeout koneksi (default 3)",
"65": "ftp ke {0}: {1} batas waktu, lewati {2}: {3}",
"66": "LOGGED IN SUKSES!",
"67": "LOGGED IN SUKSES, PERMISSION DITOLAK UNTUK DAFTAR PERINTAH!",
"68": "koneksi ftp ke {0}: {1} gagal, melewatkan seluruh langkah [proses {2} dari {3}]!"
" pergi ke langkah selanjutnya",
"69": "target masukan untuk modul {0} harus berupa DOMAIN, HTTP, atau SINGLE_IPv4, melewatkan {1}",
"70": "user: {0} pass: {1} host: {2} port: {3} ditemukan!",
"71": "(NO PERMISSION FOR List FILES)",
"72": "mencoba {0} dari {1} dalam proses {2} dari {3} {4}: {5}",
"73": "koneksi smtp ke {0}: {1} batas waktu, lewati {2}: {3}",
"74": "koneksi smtp ke {0}: {1} gagal, melewatkan seluruh langkah [proses {2} dari {3}]! "
"pergi ke langkah selanjutnya",
"75": "target masukan untuk modul {0} harus berupa HTTP, melewatkan {1}",
"76": "ssh ke {0}: {1} batas waktu, lewati {2}: {3}",
"77": "koneksi ssh ke {0}: {1} gagal, melewatkan seluruh langkah [proses {2} dari {3}]! "
"pergi ke langkah selanjutnya",
"78": "koneksi ssh ke% s:% s gagal, melewatkan seluruh langkah [proses% s dari% s]! pergi "
"ke langkah selanjutnya",
"79": "PORT TERBUKA",
"80": "host: {0} port: {1} ditemukan!",
"81": "target {0} dikirimkan!",
"82": "tidak bisa membuka file daftar proxy: {0}",
"83": "tidak dapat menemukan file daftar proxy: {0}",
"84": "Anda menjalankan versi Nettacker OWASP {0} {1} {2} {6} dengan nama kode {3} {4} {5}",
"85": "fitur ini belum tersedia! silahkan jalankan \"git clone https://github.com/viraintel"
"/OWASP-Nettacker.git\" atau \"pip install -U OWASP-Nettacker\" untuk mendapatkan"
" versi terakhir.",
"86": "Buat grafik semua aktivitas dan informasi, Anda harus menggunakan output HTML. "
"grafik yang tersedia: {0}",
"87": "untuk menggunakan fitur grafik, filename keluaran Anda harus diakhiri dengan "
"\".html\" atau \".htm\"!",
"88": "membangun grafik ...",
"89": "selesai membangun grafik!",
"90": "Grafik Pengujian Penetrasi",
"91": "Grafik ini dibuat oleh OWASP Nettacker. Grafik berisi semua aktivitas modul, "
"peta jaringan dan informasi sensitif, Tolong jangan bagikan file ini dengan "
"siapa pun jika tidak dapat diandalkan.",
"92": "Laporan Nettacker OWASP",
"93": "Detail Perangkat Lunak: Versi Nettacker OWASP {0} [{1}] di {2}",
"94": "tidak ada port terbuka ditemukan!",
"95": "tidak ada user / password ditemukan!",
"96": "{0} modul dimuat ...",
"97": "modul grafik ini tidak ditemukan: {0}",
"98": "modul grafik ini \"{0}\" tidak tersedia",
"99": "ping sebelum memindai host",
"100": "melewatkan seluruh target {0} dan metode pemindaian {1} karena --ping-before-scan benar "
"dan tidak merespons!",
"101": "Anda tidak menggunakan versi terakhir Nettacker OWASP, tolong perbarui.",
"102": "tidak bisa mengecek update, silahkan cek koneksi internet anda.",
"103": "Anda menggunakan versi terakhir dari Nettacker OWASP ...",
"104": "daftar direktori ditemukan di {0}",
"105": "silahkan masukkan port melalui -g atau --methods-args switch alih-alih url",
"106": "koneksi http {0} batas waktu!",
"107": "",
"108": "tidak ada direktori atau file yang ditemukan untuk {0} di port {1}",
"109": "tidak dapat membuka {0}",
"110": "dir_scan_http_method value must be GET or HEAD, set default to GET.",
"111": "daftar semua metode args",
"112": "tidak bisa mendapatkan {0} modul args",
"113": "",
"114": "",
"115": "",
"116": "",
"117": ""
} | 10d167b1f49819fe2a2b7e8b17bc5a938d080ca8 | 46,303 |
def extract_gamma(data):
"""
Extracts gamma ray energies with intensities of at least 1%.
"""
for i in range(len(data)):
if '(calc)' in data[i]:
data[i] = data[i][:-6]
if '(' and ')' in data[i]:
data[i] = data[i][1:-1]
if '>' in data[i]:
data[i] = '0'
if '<' in data[i]:
data[i] = '0'
if data[i] == '':
data[i] = '0'
data[i] = float(data[i])
if data[i] < float(1):
if i%2 != 0:
data[i] = 0
data[i-1] = 0
while 0 in data: data.remove(0)
gamma_energies = []
gamma_intensities_percent = []
gamma_intensities = []
for i in range(len(data)):
if (i+1)%2 == 0:
gamma_intensities_percent.append(data[i])
else:
gamma_energies.append(data[i])
for value in gamma_intensities_percent:
gamma_intensities.append(value*0.01)
return(gamma_energies, gamma_intensities) | e675d2f6dbab11a9b19b1e24b93416aff8b5ffd4 | 46,304 |
def area_to_capacity(statistical_roof_model_area_based, power_density_flat, power_density_tilted):
"""Maps area shares to capacity shares of statistical roof model.
The statistical roof model defines roof categories (e.g. south-facing with tilt 10°) and their
shares in a population of roofs. This function maps areas shares to shares of installable pv
capacity. It discriminates between flat and tilted roofs, i.e. the power density of flat roofs
can be different than the one from tilted roofs.
Parameters:
* statistical_roof_model_area_based: model as described above, values are shares of total roof area
* power_density_flat: power density of flat pv installations, unit must be consistent with next
* power_density_tilted: power density of tilted pv installations, unit must be consistent with previous
Returns:
* statistical_roof_model_cpacity_based: model as described above, values are shares of total
installable capacity
"""
cap_based = statistical_roof_model_area_based.copy()
flat_roofs = cap_based.index.get_level_values(0) == "flat"
tilted_roofs = cap_based.index.get_level_values(0) != "flat"
cap_based[flat_roofs] = cap_based[flat_roofs] * power_density_flat
cap_based[tilted_roofs] = cap_based[tilted_roofs] * power_density_tilted
return cap_based / cap_based.sum() | 0e57c01bfa7c44743edb260b6a1b406ebf0fb82b | 46,305 |
import argparse
def parse_options(args):
"""Parses the options passed to this script.
Args:
args: list of options passed on command line
Returns:
An argparse.ArgumentParser object containing the passed options and
their values.
"""
parser = argparse.ArgumentParser(
description='Compares the results from two mlab-ns instances.')
parser.add_argument(
'--instance_one_url',
dest='instance_one_url',
default='http://mlab-ns.appspot.com',
help='Base URL for the first mlab-ns instance to query.')
parser.add_argument(
'--instance_two_url',
dest='instance_two_url',
default='http://locate-dot-mlab-staging.appspot.com',
help='Base URL for the second mlab-ns instance to query.')
parser.add_argument('--tool_id',
dest='tool_id',
default='ndt',
help='The tool_id to query e.g., ndt.')
parser.add_argument('--samples',
dest='samples',
default=60,
help='Number of times to sample mlab-ns data.')
parser.add_argument('--interval',
dest='interval',
type=float,
default=60,
help='Seconds to wait between samples.')
parser.add_argument(
'--all_fields',
dest='all_fields',
action="store_true",
help='Compare all fields for equality, not just "fqdn".')
parser.add_argument(
'--infer_queueing',
dest='infer_queueing',
action="store_true",
help='Attempts to determine if queueing accounts for differences.')
parser.add_argument('--verbose',
dest='verbose',
action="store_true",
help='Whether to print extra information.')
parser.add_argument('--ignore_pattern',
dest='ignore_patterns',
nargs='*',
default=[],
help='Regex pattern to ignore, matched against "fqdn".')
args = parser.parse_args(args)
return args | 8645bf72ed6a3244808be5b002842cfe4e06f9ee | 46,307 |
import codecs
def getFileLineNums(filename):
"""
计算行数,就是单词数。
"""
f = codecs.open(filename, 'r',"utf-8")
count = 0
for _ in f:
count += 1
return count | 0cc4983ebb507deb55a85cd3bec3762d3c12e228 | 46,308 |
import io
def gen_repr(cls, template, *args, **kwargs):
"""Generates a string for :func:`repr`."""
buf = io.StringIO()
buf.write(u'<')
buf.write(cls.__module__.decode() if kwargs.pop('full', False) else u'etc')
buf.write(u'.')
buf.write(cls.__name__.decode())
if not kwargs.pop('dense', False):
buf.write(u' ')
buf.write(template.format(*args, **kwargs))
options = kwargs.pop('options', [])
for attr, value in options:
if value is not None:
buf.write(u' %s=%s' % (attr, value))
buf.write(u'>')
return buf.getvalue() | 48e2169232bed247430c3f449e840f7891379aea | 46,309 |
import torch
def kron(A, B):
"""
Kronecker Product.
Works with batch dimemsion(s) - requires both A and B have same batch dims.
"""
A_shp, B_shp = A.shape, B.shape
assert A_shp[:-2] == B_shp[:-2]
kron_block = torch.matmul(
A[..., :, None, :, None], B[..., None, :, None, :]
)
kron = kron_block.reshape(
A_shp[:-2] + (A_shp[-2] * B_shp[-2], A_shp[-1] * B_shp[-1],)
)
return kron | 065b5c5b49c509f74733b5af6dfd7e76649cadfd | 46,310 |
import os
def dir_exists(dpath):
"""
@param[in] path Path to the folder whose existance you want to check.
@returns true if folder exists, otherwise returns false.
"""
return True if os.path.isdir(dpath) else False | 960623fefa6bf82f2c9cceed2e952b0e44b67e2b | 46,311 |
import pwd
import os
def prepare_paths(user):
"""
We will assume that SSH being configured to user '~/.ssh/authorized_keys'
for SSH keys.
"""
try:
user_lookup = pwd.getpwnam(user)
except KeyError:
print('User {} not found.'.format(user))
return
ssh_user_path = '{}/.ssh'.format(user_lookup.pw_dir, user)
authorized_keys_file = '{}/authorized_keys'.format(ssh_user_path)
# Create ~/.ssh if it doesn't exist
# and then set the appropriate permission.
if not os.path.exists(ssh_user_path):
os.mkdir(ssh_user_path)
# Create authorized_keys file if empty
if not os.path.exists(authorized_keys_file):
os.mknod(authorized_keys_file)
# Make sure permission is right
os.chmod(ssh_user_path, 0o700)
os.chown(ssh_user_path, user_lookup.pw_uid, user_lookup.pw_gid)
os.chmod(authorized_keys_file, 0o600)
os.chown(authorized_keys_file, user_lookup.pw_uid, user_lookup.pw_gid)
return authorized_keys_file | 6e5f77ff5d5961a217801488fd7a78e573c48561 | 46,312 |
def get_all_model_fields(connector,model_name):
"""Utility function to get the full list of field names from a model,
INCLUDING field names that are eventually derived fro mthe connector's UNPACKING list in the manifest."""
model = connector.MODELS[model_name]
unpacking = connector.UNPACKING
base_model_fields = set(x['dbname'] for x in model['fields'].values())
full_model_fields = base_model_fields
# get teh list of fields that are in the model and potentially unpacked
unpackable_fields = set(x for x in unpacking.keys() if x in model['fields'].keys())
for fieldname in unpackable_fields:
unpack = unpacking[fieldname]
# if the field actually is unpacked into another field
if unpack:
full_model_fields.add(unpack['dbname'])
return full_model_fields | 4985f67ce565ed6d0f309387d4123ce9a594dc61 | 46,316 |
def j_map_to_dict(m):
"""Converts a java map to a python dictionary."""
if not m:
return None
r = {}
for e in m.entrySet().toArray():
k = e.getKey()
v = e.getValue()
r[k] = v
return r | 401bc3ceeafbbdd6f07b309cdef227caf56680d4 | 46,317 |
def _create_union_types_specification(schema_graph, graphql_types, hidden_classes, base_name):
"""Return a function that gives the types in the union type rooted at base_name."""
# When edges point to vertices of type base_name, and base_name is both non-abstract and
# has subclasses, we need to represent the edge endpoint type with a union type based on
# base_name and its subclasses. This function calculates what types that union should include.
def types_spec():
"""Return a list of GraphQL types that this class' corresponding union type includes."""
return [
graphql_types[x]
for x in sorted(list(schema_graph.get_subclass_set(base_name)))
if x not in hidden_classes
]
return types_spec | 3b72a6865982638a280d0678f7092f564180ccb9 | 46,319 |
def inv_dict(mydict):
"""
Reverse key -> val into val -> key.
"""
new_dict = {}
for k in mydict:
new_dict[mydict[k]] = k
return new_dict | 394d0ce12cbf25acbf899f7eb250ae62880a2774 | 46,320 |
def _ends_in_by(word):
"""
Returns True if word ends in .by, else False
Args:
word (str): Filename to check
Returns:
boolean: Whether 'word' ends with 'by' or not
"""
return word[-3:] == ".by" | d6a080f8d3dcd5cab6ad6134df3dd27b3c2ceeea | 46,321 |
def is_subnet_of(a, b):
"""
Check if network-b is subnet of network-a
"""
if a.network_address != b.network_address:
return False
return a.prefixlen >= b.prefixlen | 32ae825937aa4e48098884e121f9238fbbd2ffec | 46,322 |
def str_join(lst, sep=' '):
"""join(list, [sep]) -> string
Behaves like string.join from Python 2."""
return sep.join(str(x) for x in lst) | 145520980df426fc84bda1eec0ef0eadcdaeaaec | 46,323 |
def get_slots(slot_line, utterance, slot_dict):
"""
Formats slot labels for an utterance. Ensures the multiword
slot labels are grouped together. For example the words
'birthday party' should be grouped together under the
same event_name label like event_name(birthday party)
instead of event_name(birthday), event_name(party).
"""
# Get slots and their labels
utterance_words = utterance.split()
slots_and_labels = []
prev_slot_label = 'O'
prev_word_idx = 0
current_word = ""
if len(utterance_words) != len(slot_line):
slot_line = slot_line[1:]
for word_idx, slot_label_idx in enumerate(slot_line):
word = utterance_words[word_idx]
slot_label = slot_dict[int(slot_label_idx)].strip()
# Only care about words with labels
if slot_label != 'O':
# Keep multiword answers together
if prev_slot_label == slot_label and prev_word_idx == word_idx - 1:
current_word += " " + word
# Previous answer has ended and a new one is starting
else:
if current_word != "":
slots_and_labels.append(f"{prev_slot_label}({current_word})")
current_word = word
prev_word_idx = word_idx
prev_slot_label = slot_label.strip()
# Add last labeled word to list of slots and labels if the utterance is over
if current_word != "" and prev_slot_label != 'O':
slots_and_labels.append(f"{prev_slot_label}({current_word})")
# Format slot labels
if not slots_and_labels:
slot_labels = "None"
else:
slot_labels = ", ".join(slots_and_labels)
return slot_labels | 52b68870d51ef394871ea77470fd10a0fddeea3a | 46,324 |
def get_retrain_options(defaults=None):
"""Retrain-related options
"""
if defaults is None:
defaults = {}
options = {
# Resource ID
'--id': {
'dest': 'resource_id',
'default': defaults.get('resource_id', None),
'help': ("ID for the resource to be retrained.")},
# path to the data file to be added
'--add': {
'dest': 'add',
'default': defaults.get('add', None),
'help': ("Path to the data file to be added.")},
# maximum number of datasets to be used when retraining
'--window-size': {
'type': int,
'dest': 'window_size',
'default': defaults.get('window_size', -1),
'help': ("Maximum number of datasets to be used in retraining."
" When not set, the new dataset will be added to the"
" last one used.")}
}
return options | cce786bc47d2dc8a9bd914e0b6295cc3b96cd7b0 | 46,326 |
import math
def normal_approximation_to_binomial(n, p):
"""이항분포(n, p)를 정규 분포로 근사했을 때 평균, 표준편차"""
mu = n * p
sigma = math.sqrt(n * p * (1 - p))
return mu, sigma | b378318f1284f53737587a0293aade87900278d1 | 46,327 |
def get_current_period(root_arr: list):
"""
Iterates through all xml-root objects in given list and returns the highest period incremented by 1 (since uploading the result of period `x` indicates that user wants to simulate period `x+1`).
Parameters
-----------
`root_arr`: list
List of xml root objects that is retrieved by `parse_all_xml()` method.
"""
if(len(root_arr) == 0):
return 1
periods = []
for root in root_arr:
periods.append(int(root.get('period'))+1)
return max(periods) | 7c31f6943d4a79b8b80d02d0b132b21b13581794 | 46,328 |
def get_labels(labels_path):
"""
Extract classes from label.txt
:param labels_path: str, path to file with labels
:return : list with labels
"""
labelsfile = open(labels_path, 'r')
labels = []
line = labelsfile.readline()
while line:
labels.append(line.split(' ', 1)[1].rstrip())
line = labelsfile.readline()
labelsfile.close()
return labels | 09ac5ed89733c1454874816fb353f2f00ae143f1 | 46,330 |
def genty_dataprovider(builder_function):
"""Decorator defining that this test gets parameters from the given
build_function.
:param builder_function:
A callable that returns parameters that will be passed to the method
decorated by this decorator.
If the builder_function returns a tuple or list, then that will be
passed as *args to the decorated method.
If the builder_function returns a :class:`GentyArgs`, then that will
be used to pass *args and **kwargs to the decorated method.
Any other return value will be treated as a single parameter, and
passed as such to the decorated method.
:type builder_function:
`callable`
"""
datasets = getattr(builder_function, 'genty_datasets', {None: ()})
def wrap(test_method):
# Save the data providers in the test method. This data will be
# consumed by the @genty decorator.
if not hasattr(test_method, 'genty_dataproviders'):
test_method.genty_dataproviders = []
test_method.genty_dataproviders.append(
(builder_function, datasets),
)
return test_method
return wrap | d6af7d0d1dd5c74c90eb144430fe1a68aab4cf0a | 46,331 |
def validate_cache_seconds(string):
"""A number of seconds to cache a Resource in RAM."""
string = (string or '').strip()
if string:
return float(string)
return 1.0 | 84c18c1204bc865a14f6fb358b975a9b2570cb0a | 46,332 |
def split(target_list, num_anchor_list):
"""For each target and num_anchors: split target to a list of sub-target"""
target_list = [
label_targets.split(num_anchors, 0)
for label_targets, num_anchors in zip(target_list, num_anchor_list)
]
return target_list | 3c8e834e2f654c1c434f5f4ecd43b5ed2f54450d | 46,333 |
def calcular_recaudo_horas_extra(horas_1,horas_2,horas_3,horas_4):
"""
num -> num
num -> num
num -> num
num -> num
Multiplica las horas por 100000 y le incrementa un 25% para después sumarla
:param horas_1: Cantidad de horas uno
:param horas_2: Cantidad de horas dos
:param horas_3: Cantidad de horas tres
:param horas_4: Cantidad de horas cuatro
:return: La sumatoria de las horas extras
>>> calcular_recaudo_horas_extra(3,4,5,6)
2250000.0
>>> calcular_recaudo_horas_extra(4,5,4,1)
1750000.0
"""
if (horas_1 <= 0 or horas_2 <= 0 or horas_3 <= 0 or horas_4 <= 0):
return 'El numero de horas no debe ser menor o igual a cero.'
recaudo_extras = ((horas_1*100000)*0.25+(horas_1*100000))+((horas_2*100000)*0.25+(horas_2*100000))+((horas_3*100000)*0.25+(horas_3*100000))+((horas_4*100000)*0.25+(horas_4*100000))
return float(recaudo_extras) | 692ea043b97357adeeb2d4542410b71c1924b0a7 | 46,336 |
def new_board():
"""Returns an empty board"""
return [[" ", " ", " ", " ", " ", " "], [" ", " ", " ", " ", " ", " "], [" ", " ", " ", " ", " ", " "], [" ", " ", " ", " ", " ", " "],
[" ", " ", " ", " ", " ", " "], [" ", " ", " ", " ", " ", " "]] | 022a9a00b6e39bfe3d31cdc77cd9ab1af796e522 | 46,337 |
def epilog(text):
"""Adds an 'epilog' property to a CMD function.
It will be shown in the epilog. Usually useful for examples.
"""
def hook(fn):
fn.epilog = text
return fn
return hook | 87e8b23f319a73b8568d9290e9c366893a602b7b | 46,338 |
from pathlib import Path
def doRelocateFile(src, dst, overwrite=False):
"""
Relocate file `src` to the path `dst`.
If dirname(dst) is not exist yet, mkdir'ed silently.
If file is already exist as `dst`, raise ``FileExistsError``, unless
``overwrite=True``.
Parameter
---------
s : path-like: source file
d : path-like: destination file
overwrite(optional) : logical : overwrite file in destination, or not.
Return
------
path-like : pathname of relocated file.
"""
sf = Path(src)
df = Path(dst)
if (df.is_file()):
if overwrite:
pass
else:
raise FileExistsError(f'destination is existing file: {df}')
print(sf, '->', df)
df.parent.mkdir(parents=True, exist_ok=True)
sf.replace(df)
return df | 380c3d2ad8f95eb4e4065a8a49699705c655eef7 | 46,339 |
async def health():
"""Check integrity of the server."""
return "The {{ cookiecutter.library_name }} API server is healthy." | a9140a5183cbb331700a5df05e789df7900298a9 | 46,341 |
def preprocess_call_name(name):
""" map an event name to a preprocess call name """
return 'preprocess_{}'.format(name) | b6e24ab1403f0b17d1970a6d2b8abacc9df094ea | 46,342 |
def generate_markdown_from_cli_args(cli_dict_for_command):
"""
Turn the dict into a simple text representation of the cli args and options.
:param cli_dict_for_command:
:return str:
"""
# def arg_md(opt, long_opt, default, help):
# return f"*) {opt}, {long_opt}, {help}\n"
text = ""
# eval_output = ""
if "usage" in cli_dict_for_command:
text += "\nusage: " + str(cli_dict_for_command["usage"])
if "epilog" in cli_dict_for_command:
text += "\n" + cli_dict_for_command["epilog"]
if "args" in cli_dict_for_command:
text += "\n\n#### Command Line Options\n"
for arg in cli_dict_for_command["args"]:
text += f"\n* {arg[0]}"
# eval_cmd = f"arg_md({arg[0]})"
# eval_output = eval(eval_cmd) + "\n"
# print("EVAL_OUT: " + eval_output)
# eval(eval_text)
return "\n\n" + text | 4b9fce6fbb299bcdcd92022d0501aa572923bcb4 | 46,344 |
def get_catalog_path(layer):
"""Get the catalog path for the designated layer if possible. Ensures we can pass map layers to the subprocess.
If it's already a string, assume it's a catalog path and return it as is.
Args:
layer (layer object or string): Layer from which to retrieve the catalog path.
Returns:
string: Catalog path to the data
"""
if hasattr(layer, "dataSource"):
return layer.dataSource
else:
return layer | c32bf25878d3e2633461549e9a762997a5cbc1ab | 46,345 |
def get_instance_fields(instance):
"""Return parameter attributs managed by django-algolia
Tests:
>>> class ManagedClass(object): ALGOLIA_INDEX_FIELDS = ['some', 'fields']
>>> managed_instance = ManagedClass()
>>> get_instance_fields(ManagedClass)
['some', 'fields']
>>> get_instance_fields(managed_instance)
['some', 'fields']
>>> class AnotherGoodClass(object): ALGOLIA_INDEX_FIELDS = ('other', 'attrs')
>>> another_good = AnotherGoodClass()
>>> get_instance_fields(AnotherGoodClass)
('other', 'attrs')
>>> get_instance_fields(another_good)
('other', 'attrs')
>>> random_stuff = object()
>>> get_instance_fields(random_stuff)
[]
>>> get_instance_fields()
Traceback (most recent call last):
TypeError: get_instance_fields() takes exactly 1 argument (0 given)
"""
return getattr(instance, 'ALGOLIA_INDEX_FIELDS', []) | 989ff1a1fe59ad63397151832be2416acff2e4c8 | 46,347 |
import math
def round_repeats(depth_coefficient, repeats):
""" Round number of filters based on depth coefficient.
Args:
depth_coefficient: Coefficient to scale number of repeats.
repeats: Number to repeat mb_conv_block.
From tensorflow implementation:
https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet
/efficientnet_model.py
"""
return int(math.ceil(depth_coefficient * repeats)) | bafa99e8a406e068406806703be122573c68b084 | 46,348 |
from datetime import datetime
def timestamp(dt:int, fmt:str):
"""Helper function to convert timestamp to string
Arguments:
dt {int} -- timestamp
fmt {str} -- datetime format
Returns:
str -- datetime string
"""
return datetime.fromtimestamp(int(dt)).strftime(fmt) | 49b54e934a90eaac415738bc609cd129fbfbc002 | 46,349 |
def calcLossExceedance(data, p, forplot = True):
"""
This will calculate the loss exceedence for a given set of data
and a loss value p
This expects a single column of data
if forplot is True, will scale probabilities to percents for clean plotting
Othewise, leaves probability values as < 1
This will accept p as a linspace, or p as a discrete
"""
# p = 100-p
# loss = percentile(data, p)
# return loss
# countif = (data>p).sum()
# perc = countif/float(len(data))
lenOfData = float(len(data))
#
# if ((type(p) == int) or (type(p) == float)):
# lec = (([data>p]).sum()/lenOfData)
# if forplot:
# lec = lec * 100
if forplot:
lec = [((data>pi).sum()/lenOfData) * 100.0 for pi in p]
else:
lec = [((data>pi).sum()/lenOfData) for pi in p]
return lec | 9eda1d856dee8f8b0bcbc19d219ddf5dbd182cc4 | 46,350 |
def is_retriable_exception(e):
"""
Is the exception a retriable exception?
:type e: :py:class:`botocore.exceptions.ClientError`
:param e: The ClientError caught.
:rtype: bool
:return: True if the exception is a retriable exception. False otherwise.
"""
is_retriable = e.response['ResponseMetadata']['HTTPStatusCode'] == 500 or \
e.response['ResponseMetadata']['HTTPStatusCode'] == 503 or \
e.response['Error']['Code'] == 'NoHttpResponseException' or \
e.response['Error']['Code'] == 'SocketTimeoutException'
return is_retriable | 61d811f8a9aa3fb92ab85b1493c846b2fd62e7e3 | 46,352 |
import torch
def build_loss(property_map, loss_tradeoff):
"""
Build the loss function.
Args:
property_map (dict): mapping between the model properties and the
dataset properties
loss_tradeoff (dict): contains tradeoff factors for properties,
if needed
Returns:
loss function
"""
def loss_fn(batch, result):
loss = 0.
for p, tgt in property_map.items():
if tgt is not None:
diff = batch[tgt] - result[p]
diff = diff ** 2
err_sq = torch.mean(diff)
if p in loss_tradeoff.keys():
err_sq *= loss_tradeoff[p]
loss += err_sq
return loss
return loss_fn | 0aa23419d845df460f3ef534eff304de4af436aa | 46,353 |
def get_objects(graph, predicate, subject=None):
"""Return a set of all the objects that match a predicate (and subject).
:graph: The policy graph.
:predicate: The predicate of the rules to match.
:subject: The subject of the rules to match (defaults to any).
:return: A set of all the objects that match the parameters in the graph.
"""
triples = graph.triples((subject, predicate, None))
return set(obj for (subj, pred, obj) in triples) | 0da1a08e4da38e2de05920d92f65ecbc627bc06f | 46,354 |
import hashlib
def convert_hash(v):
"""Convert md5 from string.
Steps:
1. Use hashlib to convert md5.
2. Perform "And 255" then "Or 256" to ensure
the length of hashed code is limited within 511 (0b111111111).
Parameters
----------
v : str
Value to be converted.
Returns
-------
str
Converted hash code.
"""
md5 = hashlib.md5(v.encode('utf8')).digest()
code = [hex((i & 0xff) | 0x100)[3:5] for i in md5]
return ''.join(code) | 0b164e078a42d46b1d7de042f850a33ace835c61 | 46,356 |
import re
def music_title(text):
"""Converts a string into a "title"
Handles uppercasing only the first leter of words.
Keywords such as EP and LP as well as reoman numeralse are caps
Args:
text: The string to 'titlize'
Returns:
The transformed text
"""
ROMAN_NUMS = ["I", "II", "III", "IV", "V", "VI", "VII", "VIII", "IX", "X"]
ROMAN_NUMS.extend(["X" + n for n in ROMAN_NUMS])
UPPER_KEYWORDS = ["EP", "LP"]
words = text.split()
for i, word in enumerate(words):
if word.upper() in UPPER_KEYWORDS or word.upper() in ROMAN_NUMS:
word = word.upper()
elif word.find('.') != -1 or word.find('(') != -1: # parens or abreviations
word = word.title()
else:
word = word.capitalize()
words[i] = word
# Replace underscores with colons because Windows doens't allow colon characters in name
text = " ".join(words).replace("_", ":")
# Search for text inside parens and recursively call music_title
g = re.search('(.*)\((.*)\)(.*)', text, re.IGNORECASE)
if g:
text = "{0}({1}){2}".format(g.group(1), music_title(g.group(2)), g.group(3))
return text | 9697fa47e9ba7ce7168a4f21757a2d97f15ed9de | 46,358 |
def twoNumberSum(array : list, targetSum : int) -> list:
"""Finds the two numbers in the array needed to get targetSum
This solution has O(n) time complexity | O(n) space complexity
Args:
array: A list containing all the candidate numbers
targetSum: The target number we want to get by adding two numbers from the array
Returns:
A list containing the two numbers that added give targetSum as a result
"""
sum = []
diff = []
for e in array:
if e in diff:
sum.append(e)
sum.append(array[diff.index(e)])
break
else:
diff.append(targetSum - e)
return sum | ed0ffe1efeb2072b2f96f231bfc1d396723c98ec | 46,359 |
def to_page_count(n_entries, entries_per_page):
"""
Calculate the number of pages given a page count.
"""
if n_entries == 0:
return 0
# How this works:
# Say the number of entries per page was 10. Thus we'd want entries 1-10
# to appear on page 1, 11-20 to appear on page 2, and so on. Naively, the
# number of pages is `n_entries / entries_per_page`, but as we're using
# integer arithmetic, this rounds down, so we need to adjust it up one.
# Also, if the number of entries is divisible by the number of page, (e.g.
# there are ten entries), the naive arithmetic will end up stating there
# are two pages, not just one. Thus to compensate for that, we subtract
# one from the number of entries before dividing.
return ((n_entries - 1) / entries_per_page) + 1 | 402c3f6de4be3b11ed42607402e5fbb9257916de | 46,360 |
def get_json_field(json_data, field_names):
"""This function retrieves a value for a specific field from the JSON data for a user.
.. versionchanged:: 3.1.0
Refactored the function to be more efficient.
:param json_data: The JSON data from which the field value must be retrieved
:type json_data: dict
:param field_names: The field name along with any parent field paths
:type field_names: tuple, list
:returns: The value for the specific field in its original format
:raises: :py:exc:`ValueError`, :py:exc:`TypeError`, :py:exc:`KeyError`
"""
field_value = None
if isinstance(field_names, str):
field_value = json_data.get(field_names)
elif isinstance(field_names, tuple) or isinstance(field_names, list):
if len(field_names) == 2:
field_one, field_two = field_names
field_value = json_data.get(field_one).get(field_two)
elif len(field_names) == 3:
field_one, field_two, field_three = field_names
field_value = json_data.get(field_one).get(field_two).get(field_three)
elif len(field_names) == 4:
field_one, field_two, field_three, field_four = field_names
field_value = json_data.get(field_one).get(field_two).get(field_three).get(field_four)
return field_value | 08bb967bd57c47a6b6556dfb6eed33b0d345d649 | 46,362 |
def alternatingCharacters(s1):
"""
Args:
s1 (str): first string
Returns:
int: how many elements need to be deleted"""
del_count = 0
# check every pair of elements in a loop
for i in range(len(s1) - 1):
if s1[i] == s1[i + 1]:
del_count += 1
return del_count | 67c55bc453d2f8b2c81525f3311dad535eb69d2a | 46,363 |
def freqs2probs(freqs):
"""Converts the given frequencies (list of numeric values) into probabilities.
This just normalizes them to have sum = 1"""
freqs = list(freqs)
total = float(sum(freqs))
return [f/total for f in freqs] | 6d82756e428f289b804ebeed68262b34568b2720 | 46,366 |
def _expand_as_id(as_id_tail):
""" Helper to avoid repeating the ffaa:0:-part of the ASes a gazillion times."""
return 'ffaa:0:%x' % as_id_tail | 01e9ace05e50d6c70d2ab56d84eb5fcf699a6922 | 46,368 |
import re
def get_info(var, entry):
"""Return a value for a user selected field in a line from a vcf (provided as a list split by whitespace)"""
ret_val = None
try:
#Loop through vcf fields backwards
for field in var[::-1]:
#First try fields seperated with an ':' e.g. GT:AO 0/1:23
found = re.search("['\t', ':']{}['\t', ':']".format(entry), field)
if found:
field_split = field.split(':')
entry_index = field_split.index('{}'.format(entry))
field_index = var.index(field)
ret_val = var[field_index+1].split(':')[entry_index]
break
#Second try fields with an '=' e.g. AO=23;RO=45
found = re.search("['\t', ';']{}=".format(entry), field)
if found:
ret_val = re.split("['\t', ';']{}=".format(entry), field)[1].split(';')[0]
break
except:
pass
return ret_val | 4d8e785a8e3576f81bdfbd436af4a7f8239ca1cc | 46,370 |
import itertools
import ast
def parse_template(instr, value_getter, found=True):
"""
MPC-like parsing of `instr` using `value_getter` callable to retrieve the
text representation of placeholders.
"""
instr = iter(instr)
ret = []
for char in instr:
if char in '%{':
endchar = '%' if char == '%' else '}'
key = ''.join(itertools.takewhile(lambda e: e != endchar, instr))
value = value_getter(key)
if value:
found = True
ret.append(value)
else:
found = False
elif char == '#':
ret.append(next(instr, '#'))
elif char == '\\':
ln = next(instr, '\\')
if ln in 'abtnvfr':
ret.append(ast.literal_eval('"\\{}"'.format(ln)))
else:
ret.append(ln)
elif char == '[':
subret, found = parse_template(instr, value_getter, found)
subret = ''.join(subret)
ret.append(subret)
elif char == ']':
if found:
ret = ''.join(ret)
return ret, True
else:
return '', False
elif char == '|':
subret, subfound = parse_template(instr, value_getter, found)
if found:
pass
elif subfound:
ret.append(''.join(subret))
found = True
else:
return '', False
elif char == '&':
subret, subfound = parse_template(instr, value_getter, found)
if found and subfound:
subret = ''.join(subret)
ret.append(subret)
else:
return '', False
else:
ret.append(char)
ret = ''.join(ret)
return ret, found | 138a4e0dcd5b0bab700f4deab02e41d079005d5e | 46,371 |
def trim_from_start(s, substring):
"""Trims a substring from the target string (if it exists) returning the trimmed string.
Otherwise returns original target string."""
if s.startswith(substring):
s = s[len(substring) :]
return s | a4d68169a50672159af939855734fabff1fd8426 | 46,373 |
import os
def get_filename_for_channel(directoryPath, sourceRoot, channel_type):
""" combine the directory path and filename root to guess the filename for a source image """
# e.g. "pkfg22_4K_" + "Roughness.jpg"
return os.path.join(directoryPath, sourceRoot + channel_type) | e772564e63d5e8adb5669caba28b7e73821d9569 | 46,375 |
def nz(i:int, y=0):
""" Same as the nz() function of Pinescript, for ints:
Returns y if i is None, or 0 if y is None too """
if i is None:
if y is None:
return 0
return y
return i | b6331d4ef5030968b2df188ff649d4eb695de30f | 46,376 |
def subject(headers):
"""
Searches for the key 'Subject' in email headers
then returns the value of this key (the email
subject title).
"""
for header in headers:
if header['name'] == 'Subject':
return header['value'] | 8ef3f1b9cd105f6d3fb9e0a739d07bfcbf510ce4 | 46,377 |
def decimal_to_binary(num=2):
"""
Input: num, an int
Returns binary representation of num
"""
if num < 0:
is_neg = True
num = abs(num)
else:
is_neg = False
result = ''
if num == '0':
result = '0'
while num > 0:
result = str(num % 2) + result
num //= 2
if is_neg:
result = '-' + result
return result | f9e4adecf7aa07ef6499300cf524bec388d930df | 46,379 |
import random
def generate_cheat_list(field, n):
"""
Create cheat list.
Returns list with specified number of tuples of coordinates, which
contain ship based on specified field.
ARGUMENTS:
field - field from which to create cheat_list
n - number of points to return, min 1, max - number of ships in field
"""
cheat_list = []
ship_dict = {}
# step through whole field
for x in range(10):
for y in range(10):
# if spot is ship, add point to ship_dict
if field[x][y]["content"] == "ship":
if field[x][y]["shipID"] in ship_dict.keys():
ship_dict[field[x][y]["shipID"]].append((x, y))
else:
ship_dict[field[x][y]["shipID"]] = [(x, y)]
# pick random coord from every ship_dict item
for i in range(n):
temp = ship_dict.pop(random.choice(list(ship_dict.keys())))
if len(temp) == 1:
cheat_list.append(temp[0])
else:
cheat_list.append(temp[random.randint(0, len(temp)-1)])
return cheat_list | 76c4889babd079e03482025245b8c7aa75e87e3b | 46,381 |
def Prediction_Classification_Model(Classifier, Testing=None):
""" Function created to receive a classification model from Scikit-Learn library and perform the prediction step to be
used in the Ensemble Stacking Meta Estimator built for parallelization.
INPUT: classification model and testing examples' features (default None) """
# try:
Predictions = Classifier.predict(Testing)
return Predictions | 95ecba8d86e876e40160ada307add4df06051684 | 46,383 |
def join_str_list(str_list):
"""Join a list of strings, handling spaces appropriately"""
return "".join(s[2:] if s.startswith("##") else " " + s for s in str_list) | c25dcc78bbb94767a02a33fe54439289f23975de | 46,384 |
def __count_ge(array, th):
"""
numba implementation of count_ge
"""
result = 0
for i in range(array.size):
if array.flat[i] >= th:
result += 1
return result | 98f5b5a0fb71f6a9bb01045bdf7e22574681cda5 | 46,385 |
import argparse
def get_parser():
"""
Initialize command-line parser with default and optional arguments.
:return: parser
"""
# Enable command-line parsing
parser = argparse.ArgumentParser()
# Optional arguments
parser.add_argument('-p', '--project',
help='project main directory')
parser.add_argument('-m', '--mutations-config',
help='mutations configuration file',
default='./statamutations.xml')
parser.add_argument('-r', '--report-directory',
help='report output directory (generated report)',
default='./target/statam-report')
parser.add_argument('-t', '--tests-directory',
help='tests directory (output when tests are executed)',
default='./target/surefire-reports')
parser.add_argument('-g', '--original',
help='original (not mutated) tests directory',
default='_original_')
parser.add_argument('-k', '--keep-temp',
help='enable/disable temporary file cleaning',
action='store_true')
parser.add_argument('-o', '--open-browser',
help='open the report file in the default browser after generation',
action='store_true')
parser.add_argument('--disable-spoon',
help='disable Spoon (only the report will be computed)',
action='store_true')
parser.add_argument('--disable-report',
help='disable report generation (only Spoon will be applied)',
action='store_true')
return parser | 55db689314bb6475c7f68b7fe4cff8efe8934c69 | 46,390 |
import torch
def get_mention_token_dist_tensors(m1, m2):
""" Returns distance in tokens between two mentions """
succ = m1[0] < m2[0]
first = m1 if succ else m2
second = m2 if succ else m1
d = second[0] - (first[1] - 1)
if d < 0:
return torch.tensor(0, dtype=torch.long, device=m1.device)
return d | f2bf78ed5d2f1c743aeda67a2cb19e1029991407 | 46,391 |
def _lenlastline(s):
"""Get the length of the last line. More intelligent than
len(s.splitlines()[-1]).
"""
if not s or s.endswith(('\n', '\r')):
return 0
return len(s.splitlines()[-1]) | 68b425e28818fccabac61803b05a2967a2ccc93b | 46,392 |
def dict2string(d,url=''):
"""
构造url
:param d:
:param url:
:return:
"""
requests_string = url
sort_key = []
for key,value in d.items():
sort_key.append(key)
sort_key.sort()
for key in sort_key:
if d.get(key):
requests_string = requests_string + key+'='+str(d[key])+'&'
return requests_string[:-1] | fb99b209f20f8542e2d1049d99d19adcef76883e | 46,393 |
def compile_krass_conditional(krass_conditional):
"""
Compile Krass conditional statements to Python conditional statements.
"""
# change true to True, && to and etc.
changes = [
("true", "True"),
("false", "False"),
("&&", " and "),
("||", " or "), # keep an eye on this one, for regex or non
("!", " not ")
]
for change in changes:
krass_conditional = krass_conditional.replace(change[0], change[1])
return krass_conditional | b29c01f4e62cfe4b1ffcbde37e3d0599db1565d3 | 46,395 |
def get_exception_kwargs(e):
""" Extracts extra info (attributes) from an exception object. """
kwargs = {}
for attr, value in vars(e).items():
if not attr.startswith('_') and attr not in ('args', 'message'):
kwargs[attr] = value
return kwargs | 45b5a78c766c02ee49a3af4ed793a61025e9424a | 46,396 |
import torch
def ranking_loss(z_A, z_B, relation):
"""
if relation is +1 or -1, loss is equal to torch.log(1+torch.exp(relation*(z_A-z_B)))
when relation is +1 which means closer, we want z_A < z_B
if z_A-z_B = -Inf, then loss = 0, if z_A > z_B, loss getting larger
when relation is -1 which means further, we want z_A > z_B, the analysis is the same as above
when relation is 0 which means no relation, we set loss to be common L2 loss
"""
abs_relation = torch.abs(relation)
return abs_relation*torch.log(1+torch.exp(relation*(z_A-z_B)))+(1-abs_relation)*(z_A-z_B)**2 | 99b072306bc2e167835cc4fec6c985b505c45935 | 46,397 |
def is_valid_name(name: str) -> bool:
"""Returns Ture if a given string represents a valid name (e.g., for a
dataset). Valid names contain only letters, digits, hyphen, underline, or
blanl. A valid name has to contain at least one digit or letter.
"""
allnums = 0
for c in name:
if c.isalnum():
allnums += 1
elif c not in ['_', '-', ' ']:
return False
return (allnums > 0) | ef59211145ea8172b2a8796ca4ce2b204f200c53 | 46,398 |
def scaled_l2(X, C, S):
"""
scaled_l2 distance
Args:
X (b*n*d): original feature input
C (k*d): code words, with k codes, each with d dimension
S (k): scale cofficient
Return:
D (b*n*k): relative distance to each code
Note:
apparently the X^2 + C^2 - 2XC computation is 2x faster than
elementwise sum, perhaps due to friendly cache in gpu
"""
assert X.shape[-1] == C.shape[-1], "input, codeword feature dim mismatch"
assert S.numel() == C.shape[0], "scale, codeword num mismatch"
b, n, d = X.shape
X = X.view(-1, d) # [bn, d]
Ct = C.t() # [d, k]
X2 = X.pow(2.0).sum(-1, keepdim=True) # [bn, 1]
C2 = Ct.pow(2.0).sum(0, keepdim=True) # [1, k]
norm = X2 + C2 - 2.0 * X.mm(Ct) # [bn, k]
scaled_norm = S * norm
D = scaled_norm.view(b, n, -1) # [b, n, k]
return D | 4cf5a0c7fe364ac818a1b1574b3c938d8886fa00 | 46,399 |
def generate(text_model, size, bound):
"""Makes 140 character tweets"""
return [text_model.make_short_sentence(size) for i in range(0, bound)] | 49f385b9e3d21380d1862f8bc639b414e61f1ed9 | 46,402 |
def _rec(sol, cnts):
"""
Recursive call for creating sandwich numbers.
:param sol: A string containing the currently constructed number.
:param cnts: A `dict` associating digits (as strings) with the number of
times each of them is yet to be used.
:return: Either a string sandwich number or `None` if one does not exist.
"""
# Check if we've used all the digits.
if all(cnt == 0 for cnt in cnts.values()):
return sol
# Which digit(s) *must* be added here.
must = [
d
for i, d in enumerate(sol)
if i + int(d) + 1 == len(sol) and cnts[d] > 0
]
if len(must) > 1:
# We can't put more than one digit anywhere, so this is a dead end.
return None
elif len(must) == 1:
# Exactly one candidate, which is awesome!
# No trial and error, just use that one.
items = ((must[0], cnts[must[0]]),)
else:
# We'll need to try all of those we didn't use up completely.
items = tuple((d, cnt) for d, cnt in cnts.items() if cnt > 0)
# Let's append one digit.
for d, cnt in items:
if d in sol:
# If we already used `d`, it has to conform with the sandwich
# requirement (i.e., we cannot add it if it wouldn't wrap exactly d
# other digits).
try:
if sol[-(int(d) + 1)] != d:
continue
except IndexError:
continue
# We're gonna use this one, so reduce its count...
cnts[d] -= 1
# ...and call the recursion to fill up the next one.
res = _rec(sol + d, cnts)
# If we got a solution, that's it!
if res is not None:
return res
# If not, we just got our digit back.
cnts[d] += 1
# Getting here means we didn't get a solution, so we return `None`.
return None | bd03b89ce3518a741907bac607f77149fda1eccb | 46,403 |
from io import StringIO
def decompress(compressed):
# compressed is a list of codewords
"""Decompress a list of output ks to a string."""
dict_size = 256
dictionary = dict((i, chr(i)) for i in range(dict_size))
# use StringIO, otherwise this becomes O(N^2)
# due to string concatenation in a loop
result = StringIO()
w = chr(compressed.pop(0))
result.write(w)
for k in compressed:
if k in dictionary:
entry = dictionary[k]
elif k == dict_size:
entry = w + w[0]
else:
raise ValueError('Bad compressed k: %s' % k)
result.write(entry)
# Add w+entry[0] to the dictionary.
dictionary[dict_size] = w + entry[0]
dict_size += 1
w = entry
return result.getvalue() | bff105f97b08f93ff4baa4d55fe0cc06bb39f03c | 46,404 |
from pathlib import Path
import os
def prep_dir(clean=False):
"""Prepare temporary test directory"""
tmpdir = Path("./temp").resolve().absolute()
tmpdir.mkdir(parents=True, exist_ok=True)
main_file = tmpdir / "main.yaml"
ep_file = tmpdir / "endpoints.yaml"
chain_file = tmpdir / "chains.json"
directory_file = tmpdir / "directory.json"
if clean:
if main_file.exists():
os.remove(main_file)
if ep_file.exists():
os.remove(ep_file)
if chain_file.exists():
os.remove(chain_file)
if directory_file.exists():
os.remove(directory_file)
return tmpdir | 0d3eaff140adc2c6a20d67ea1580367c185c7c9e | 46,406 |
from pathlib import Path
from typing import List
def glob_database_files(source_database: Path) -> List[Path]:
"""
List any of the temporary database files (and the database itself)
"""
files: List[Path] = [source_database]
for temp_db_file in source_database.parent.glob(source_database.name + "-*"):
# shm should be recreated from scratch -- safer not to copy perhaps
# https://www.sqlite.org/tempfiles.html#shared_memory_files
if temp_db_file.name.endswith("-shm"):
continue
files.append(temp_db_file)
return files | ed3ae6e3498f376c5509175c06d8c410f97aae0c | 46,408 |
def method(cls):
"""Decorator to add the function as a method to a class.
Args:
cls (type): Class to add the function as a method to.
"""
def decorator(f):
setattr(cls, f.__name__, f)
return f
return decorator | ddabeb637a72f7f229ae44fa838ced0da6d13a52 | 46,409 |
import platform
import subprocess
def has_ssh() -> bool:
"""
Check that the user has ssh access to github.mmm.com
First it will verify if ssh is installed in $PATH
then check if we can authenticate to github.mmm.com
over ssh. Returns false if either of these are untrue
"""
result = None
if 'windows' in platform.platform().lower():
ssh_test = subprocess.run(['where', 'ssh'])
else:
ssh_test = subprocess.run(['which', 'ssh'])
if ssh_test.returncode == 0:
result = subprocess.Popen(
['ssh', '-Tq', 'git@github.mmm.com', '&>', '/dev/null'])
result.communicate()
if not result or result.returncode == 255:
return False
return True | d2a2445e304f574433bf3f4a34062a8723a98b6b | 46,410 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.