content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def catalog_deletion_payload(catalog_list):
""" Returns payload to delete catalog """
return {
"CatalogIds": catalog_list
} | 206adc724db589130137c32357b54936b9f437a7 | 51,025 |
import re
def is_snake_case(test_string):
"""
test if a string is in 'snake_case'
"""
ptrn = "(^[a-z])([a-z0-9]+_?[a-z0-9]?)+([a-z0-9]$)"
res = re.search(ptrn, test_string)
return bool(res) | a2ece5cc4aab0a7d54b96b1d9d7950d230bdb5eb | 51,026 |
def first_trade_date_in_month(df):
"""找到每个月第一个交易日"""
month_first_date = set()
pre_year, pre_month = 0, 0
for index, row in df.iterrows():
if pre_year != index.year or pre_month != index.month:
month_first_date.add(index)
pre_year = index.year
pre_month = index.month
return month_first_date | 7d2f5259e5a56b26ac37ab2b7dc3b57b36f4ac12 | 51,028 |
def ADD(*expressions):
"""
Adds numbers together or adds numbers and a date.
If one of the arguments is a date, $add treats the other arguments as milliseconds to add to the date.
See https://docs.mongodb.com/manual/reference/operator/aggregation/add/
for more details
:param expressions: The numbers or fields of number
:return: Aggregation operator
"""
return {'$add': list(expressions)} | ef1cc072f73915e1f228dfeee20258fc51a3149e | 51,029 |
def getAllContribsOutputStrFromSpectraOutput( spectraOutput, energyFmt=None, intensityFmt=None ):
""" Gets a str to write all contributions to spectraOutput (e.g. fragA-S-3p contribution)
Args:
spectraOutput: (GenSpectraOutput object) This contains all information for a generated spectrum
energyFmt: (Str, optional) The format string for the energies. Default = "{:.9g}"
intensityFmt: (Str, optional) The format string for the intensities. Default = "{:.9g}"
Returns
outStr: (Str) String containing data on the contributions to the spectrum
"""
energyFmt = "{:.9g}" if energyFmt is None else energyFmt
intensityFmt = "{:.9g}" if intensityFmt is None else intensityFmt
labelList = spectraOutput.label
dataList = spectraOutput.spectralContributions
#Get the labels
labelStrs = ", ".join( ["{}-{}-{}".format(x.fragKey, x.eleKey, x.aoKey) for x in labelList] )
labelStrs = "#labels = " + labelStrs
outStrList = [labelStrs]
outStrList.append( "#Energy, Intensities" )
dataStrFmt = energyFmt + ", " + ", ".join( [intensityFmt for x in range(len(dataList))] )
for idx,x in enumerate(dataList[0]):
energy = x[0]
currData = [ a[idx][1] for a in dataList ]
outStrList.append( dataStrFmt.format( energy, *currData) )
return "\n".join(outStrList) | 03b10fa3bcdc9a0458478da0b602a8eaaa770d33 | 51,031 |
def adjust_confidence(score):
"""Adjust confidence when not returned.
"""
if score is None:
return 1.0
return score | 0d53d9b7f0cc919d48fb0c91f9d174dd59eef56a | 51,032 |
def hash_string(key, bucket_size=1000):
"""
Generates a hash code given a string.
The have is given by the `sum(ord([string])) mod bucket_size`
Parameters
----------
key: str
Input string to be hashed
bucket_size: int
Size of the hash table.
"""
return str(sum([ord(i) for i in (key)]) % bucket_size) | f62852f7401227a6a998cc10a3b7518a5f9294dc | 51,033 |
import torch
def camera_matrix(pinholes, eps=1e-6):
""" Returns the intrinsic matrix as a tensor.
Args:
pinholes (list): List of fx, cx, fy, cy camera parameters.
eps (float, optional): A small number for computational stability. Defaults to 1e-6.
Returns:
torch.Tensor: Intrinsic matrix as a [4,4] Tensor.
"""
k = torch.eye(4, device=pinholes.device, dtype=pinholes.dtype) + eps
# k = k.view(1, 4, 4).repeat(pinholes.shape[0], 1, 1) # Nx4x4
# fill output with pinhole values
k[..., 0, 0] = pinholes[0] # fx
k[..., 0, 2] = pinholes[1] # cx
k[..., 1, 1] = pinholes[2] # fy
k[..., 1, 2] = pinholes[3] # cy
return k | 513b23943a019a764533323509f2687df81b08d5 | 51,034 |
def biopdbresid_to_pdbresseq(biopdb_residueid,ignore_insertion_codes=False):
"""
Give a Bio.PDB Residue id tupe (hetatm, resseqnum, icode), return
the PDB residue sequence number string consisting of the sequence
number and the insertion code, if not blank.
Parameters:
biopdb_residueid - tuple (hetatm, resseqnum, icode) from Residue.get_id()
ignore_insertion_codes - If True, a hack to make it work with
PMML (only) which does not report insertion codes
unlike DSSP and STRIDE
Return value:
string residue PDB sequence number e.g. '60' or '60A'.
"""
# Residue.get_id() gives tuple (hetatm, resseqnum, icode)
res_seq = str(biopdb_residueid[1])
if not ignore_insertion_codes:
if biopdb_residueid[2] != ' ':
res_seq += biopdb_residueid[2]
return res_seq | 4bb023feb7bbca24f514e041a657752f34d533e0 | 51,036 |
import six
def check_utf8(string):
"""
Validate if a string is valid UTF-8 str or unicode and that it
does not contain any null character.
:param string: string to be validated
:returns: True if the string is valid utf-8 str or unicode and
contains no null characters, False otherwise
"""
if not string:
return False
try:
if isinstance(string, six.text_type):
encoded = string.encode('utf-8')
decoded = string
else:
encoded = string
decoded = string.decode('UTF-8')
if decoded.encode('UTF-8') != encoded:
return False
# A UTF-8 string with surrogates in it is invalid.
#
# Note: this check is only useful on Python 2. On Python 3, a
# bytestring with a UTF-8-encoded surrogate codepoint is (correctly)
# treated as invalid, so the decode() call above will fail.
#
# Note 2: this check requires us to use a wide build of Python 2. On
# narrow builds of Python 2, potato = u"\U0001F954" will have length
# 2, potato[0] == u"\ud83e" (surrogate), and potato[1] == u"\udda0"
# (also a surrogate), so even if it is correctly UTF-8 encoded as
# b'\xf0\x9f\xa6\xa0', it will not pass this check. Fortunately,
# most Linux distributions build Python 2 wide, and Python 3.3+
# removed the wide/narrow distinction entirely.
if any(0xD800 <= ord(codepoint) <= 0xDFFF
for codepoint in decoded):
return False
return b'\x00' not in encoded
# If string is unicode, decode() will raise UnicodeEncodeError
# So, we should catch both UnicodeDecodeError & UnicodeEncodeError
except UnicodeError:
return False | a43c95cc3263bd092f5a430d80a3d772aa227036 | 51,037 |
def is_ignored(mod_or_pkg, ignored_package):
"""
Test, if this :class:`docfly.pkg.picage.Module`
or :class:`docfly.pkg.picage.Package` should be included to generate
API reference document.
:param mod_or_pkg: module or package
:type mod_or_pkg: typing.Union[Module, Package]
:param ignored_package: ignored package
**中文文档**
根据全名判断一个包或者模块是否要被包含到自动生成的API文档中。
"""
ignored_pattern = list()
for pkg_fullname in ignored_package:
if pkg_fullname.endswith(".py"):
pkg_fullname = pkg_fullname[:-3]
ignored_pattern.append(pkg_fullname)
else:
ignored_pattern.append(pkg_fullname)
for pattern in ignored_pattern:
if mod_or_pkg.fullname.startswith(pattern):
return True
return False | 705b6dc58dfaf18fdf2dd3dbff67df5324f9422b | 51,038 |
import argparse
import os
def parse_arguments():
"""
Argument parser requires a yara rule folder and optionally a scan directory otherwise the current
directory is used. Verbose option will print invalid rules that are not scanned with.
"""
parser = argparse.ArgumentParser(usage="Scan Files in a Directory with Yara Rules")
parser.add_argument("-v", "--verbosity", action="store_true", dest="verbose",
help="Print verbose information")
parser.add_argument('-y', '--yara_dir',
action='store',
help='Path to Yara rules directory')
parser.add_argument('-s', '--scan_dir',
action='store',
default=os.getcwd(),
help='Path to the directory of files to scan (optional otherwise current dir is scanned)')
parser.add_argument('-f', '--scan_file',
action='store',
help='Path to file scan')
return parser | afeac96c442286845764ce961e0f4bd79b9c46a8 | 51,039 |
import math
def binomial(n, k):
"""Calculates the binomial coefficient."""
return math.gamma(n + 1) / (math.gamma(k + 1) * math.gamma(n - k + 1)) | dee2c1466e61c5a5397b7eb2ec7b74238b19bec9 | 51,040 |
def is_kind_of_class(obj, a_class):
"""Function that returns True if obj isinstance of,
or if the object is an instance of a class that inherited
from, the specified class; otherwise False
"""
if isinstance(obj, a_class):
return True
else:
return False | 9e219e20baa85a1687e989c7017f9c4fcfc7b2fb | 51,041 |
import csv
def load_dictionary(filename):
"""Get a dictionary from a file."""
word = {}
with open(filename, newline="") as file_dict:
dict_reader = csv.reader(file_dict)
for row in dict_reader:
word[row[0].lower()] = row[1]
return word | 201c99e5b74e0f3fe3f081ba9b7b4d7450d3a9b1 | 51,042 |
def default_setter(attribute=None):
"""a default method for missing parser method
for example, the support to read data in a specific file type
is missing but the support to write data exists
"""
def none_importer(_x, _y, **_z):
"""docstring is assigned a few lines down the line"""
raise NotImplementedError("%s setter is not defined." % attribute)
none_importer.__doc__ = "%s setter is not defined." % attribute
return none_importer | bb3938ee3e9c5b9f31b08a2f1180d34f184baba4 | 51,043 |
import os
def apply_file_collation(args, fname, apply_keeproot=False):
"""Apply collation path to a remote filename
Parameters:
args - arguments
fname - file name
apply_keeproot - apply keep rootdir transformation
Returns:
remote filename
Raises:
No special exception handling
"""
remotefname = fname.strip(os.path.sep)
if apply_keeproot and not args.keeprootdir:
rtmp = remotefname.split(os.path.sep)
if len(rtmp) > 1:
remotefname = os.path.sep.join(rtmp[1:])
if args.collate is not None:
remotefname = remotefname.split(
os.path.sep)[-1]
if args.collate != '.':
remotefname = os.path.sep.join(
(args.collate, remotefname))
return remotefname | 3adc4fbc9b87a573a7d4fddd9c6c5164cab78817 | 51,044 |
def call_if_callable(v):
""" Preprocess a value: return it ; but call it, if it's a lambda (for late binding) """
return v() if callable(v) else v | 14ac5ef104338685b592a58e813cf9289bfa6327 | 51,045 |
import random
def random_swap(im):
"""Random swap channel
Args:
im(numpy.array): one H,W,3 image
"""
perms = ((0, 1, 2), (0, 2, 1), (1, 0, 2), (1, 2, 0), (2, 0, 1), (2, 1, 0))
if random.random() < 0.5:
swap = perms[random.randrange(0, len(perms))]
im = im[:, :, swap]
return im | 5050d4bb28e8b1a5b41ac4c07f1b4ca9dd325643 | 51,047 |
def is_number(n):
"""
function to check if the first character of a string is a number. This
function only checks the first character to cover cases like "3-4" or "3/4".
"""
if len(n) == 0:
return False
try:
float(n[0])
except ValueError:
return False
return True | 6bb4beb6efd398e5835be5c533ad46285c66d9be | 51,048 |
import os
def get_file(folder='./',is_folder=True,suffix="*",lists=[],append=False):
"""获取文件夹下所有文件夹和文件
folder 要获取的文件夹路径
is_folder 是否返回列表中包含文件夹
suffix 获取指定后缀名的文件 默认全部
"""
if not append:
lists=[]
lis=os.listdir(folder)
for files in lis:
if not os.path.isfile(folder+"/"+files):
if is_folder:
zd={"type":"folder","path":folder+"/"+files,'name':files}
lists.append(zd)
get_file(folder+"/"+files,is_folder,suffix,lists,append=True)
else:
if suffix=='*':
zd={"type":"file","path":folder+"/"+files,'name':files}
lists.append(zd)
else:
if files[-(len(suffix)+1):]=='.'+str(suffix):
zd={"type":"file","path":folder+"/"+files,'name':files}
lists.append(zd)
return lists | 15278270ec4a834623763c83c1be5b13cae86524 | 51,050 |
def stream_is_client_initiated(stream_id: int) -> bool:
"""
Returns True if the stream is client initiated.
"""
return not (stream_id & 1) | fe320e14f1b11c230903828f83a29a7f809aead7 | 51,051 |
def squared_difference_between(a, b):
"""find the straight line difference between the points
Args:
x and y are both arrays(?) with 2 values (of y and x)
"""
d = (a[0]-b[0])**2 + (a[1]-b[1])**2
return d | 4e6ef0c2a9ca080365786665e8c59c967d125632 | 51,052 |
import pickle
def lire(pkl_file_name="Quandl_data.pkl"):
"""
lire = lire(pkl_file_name="Quandl_data.pkl")
lire (Fr.read) reads in the pkl file and does the processing
where
INPUTS
pkl_file_name is a string
which
OUTPUTS
processed_data Python object that had been processed
"""
data_file = open(pkl_file_name,'rb')
data = pickle.load(data_file)
data_file.close()
# Processing part
processed_data = {}
for desc in data.keys():
x = [ pt[0] for pt in data[desc] ]
y = [ pt[1] for pt in data[desc] ]
processed_data[desc] = (x,y)
return processed_data | dd1498cd8fa842516880ffc0c386c6b6dfe26b51 | 51,053 |
import random
def pick_random_move(state):
"""
Determine which indices into the board array contain None.
These are the possible moves.
Returns the index into the state array of the next move.
"""
possible_moves = []
for i, moves in enumerate(state):
if moves == None:
possible_moves.append(i)
random_index_into_possible_moves = random.randint(0,
len(possible_moves)-1)
return possible_moves[random_index_into_possible_moves] | 783e85dd2682c366ad9c21b2e9a15fcb35ca25d6 | 51,054 |
def string2bool(input_string):
"""
Converts string to boolena by checking if texts meaning is True, otherwise returns False.
"""
return input_string in ['true', 'True', 'Yes', '1'] | 04332faa4559b8bf6506a636c41510e6920e0165 | 51,055 |
def remove_doubles(a_list):
""" Simple naive not-efficient function to remove doubles in a list but keep the order!"""
new_list = []
for el in a_list:
if el not in new_list:
new_list.append(el)
return new_list | 6a9b1be053b9f828c4a4439e77b3f7d4db9102e7 | 51,056 |
import inspect
def get_class(method):
"""Get the class of the input unbound method.
Args:
method (object): an unbound method or function.
Returns:
A class of the input method. It will be `None` if the input method is
a function.
"""
if inspect.ismethod(method):
for cls in inspect.getmro(method.__self__.__class__):
if cls.__dict__.get(method.__name__) is method:
return cls
method = method.__func__ # fallback to __qualname__ parsing
if inspect.isfunction(method):
cls = getattr(
inspect.getmodule(method),
method.__qualname__.split('.<locals>', 1)[0].rsplit('.', 1)[0])
if isinstance(cls, type):
return cls
return getattr(method, '__objclass__',
None) | cf7c273ce91080ca7e9657e2ee04eb3b594917f2 | 51,058 |
import os
import sys
def get_command():
"""
Get the command that invoked virlutils.
It is done this way to avoid being set to setup.py during test.
"""
command = "virl"
if os.path.basename(sys.argv[0]) == "cml":
command = "cml"
return command | aeeb80d6249aff6667fd77385ba947c0cc56f7d5 | 51,060 |
def odd_or_even(number):
"""Determine if a number is odd or even."""
if number % 2 == 0: # se o resto da divicao for zero
return 'Even' # Par
else:
return 'Odd' | e7b1a23f0f55244ecf7fa148b4ffa8214d1edf78 | 51,061 |
def step(x):
"""
Step function
"""
return 1 * (x > 0) | 03e770c064d185e2e019d843ee5928c244ef5c76 | 51,062 |
def getDistance(interval_a, interval_b):
"""Returns the distance between two intervals"""
return max(interval_a[0] - interval_b[1], interval_b[0] - interval_a[1]) | 16fc181560ec01e5bddb7da6fbb911b242126112 | 51,063 |
from typing import Optional
def _asset_title_fields(asset_name: str) -> Optional[str]:
"""
Add title of the asset object
"""
if asset_name.startswith("thumbnail"):
return "Thumbnail image"
else:
return None | e78e700fed4a1f7c6170c0378a0a0c6cde7a03ad | 51,065 |
import logging
def _patched_makeRecord(self,
name,
level,
fn,
lno,
msg,
args,
exc_info,
func=None,
extra=None,
sinfo=None):
"""Monkey-patched version of logging.Logger.makeRecord
We have to patch default loggers so they use the proper frame for
line numbers and function names (otherwise everything shows up as
e.g. cli_logger:info() instead of as where it was called from).
In Python 3.8 we could just use stacklevel=2, but we have to support
Python 3.6 and 3.7 as well.
The solution is this Python magic superhack.
The default makeRecord will deliberately check that we don't override
any existing property on the LogRecord using `extra`,
so we remove that check.
This patched version is otherwise identical to the one in the standard
library.
"""
rv = logging.LogRecord(name, level, fn, lno, msg, args, exc_info, func,
sinfo)
if extra is not None:
rv.__dict__.update(extra)
return rv | d34ad23181bd5a2bd3f00a7e70dd1b60a1f34cad | 51,068 |
def get_img_fingerprints(gray_dct_ul64_list, gray_dct_ul64_avg):
"""
获取图片指纹:遍历灰度图左上8*8的所有像素,比平均值大则记录为1,否则记录为0。
:param gray_dct_ul64_list: 灰度图左上8*8的所有像素
:param gray_dct_ul64_avg: 灰度图左上8*8的所有像素平均值
:return: 图片指纹
"""
img_fingerprints = ''
avg = gray_dct_ul64_avg[0]
for i in range(8):
for j in range(8):
if gray_dct_ul64_list[i][j] > avg:
img_fingerprints += '1'
else:
img_fingerprints += '0'
return img_fingerprints | 68b47ea2243d1811fdd5677bb6f6b56673ec7392 | 51,069 |
import torch
def constraint_loss(logits, cs_onehot, cs_ids):
"""
constraint loss with mask
cs_ids: [batch_size, num_cs]
"""
log_ps = logits.log_softmax(-1).unsqueeze(2) # shape: [batch_size, length, 1, vocab_size]
constraint_max_log_ps_ = (log_ps * cs_onehot.unsqueeze(1)).max(1)[0].sum(-1) # shape: [batch_size, num_cs]
log_ps_max_ids = log_ps[:, :, 0, :].argmax(-1) # shape: [batch_size, length]
cs_ids_repeat = cs_ids.unsqueeze(2).repeat([1, 1, log_ps_max_ids.shape[1]]) # shape: [batch_size, num_cs, length]
mask = (log_ps_max_ids.unsqueeze(1) == cs_ids_repeat).type(torch.FloatTensor).sum(-1) # shape: [batch_size, num_cs]
mask = (mask < 1).type(torch.FloatTensor)
mask = mask.to(constraint_max_log_ps_.device)
loss = - (constraint_max_log_ps_ * mask).sum()
if mask.sum() != 0:
loss = loss / mask.sum()
else:
loss = 0
return loss | 2df5ffe7784a8e857931fdbf977869c170adf742 | 51,070 |
def sum_values(measure_value):
"""Sums all the measure values
Args:
measure_value: Dictionary which holds the Date and Time as a key,
and the value the current number of crossings and 0
Returns:
the sum of all the measure values for that dictionary
"""
for key, value in measure_value.items():
measure_value[key] = sum(value)
return measure_value | aa1887869bc250922bef0788365762abb6b31ea4 | 51,071 |
def string_fx(f, x):
"""
Return f(x) in string format
Parameters
----------
f : function
f(x)
x : float
value
Returns
-------
str
string format: f(x) = v
"""
return f'f({x}) = {f(x)}' | 6cc7c73ae49d4b1568a95a4e4a2cd0cb05d94633 | 51,073 |
def determine_framework(indicator):
"""
Return the framework name
"""
name = None
fristurl = [s['url'] for s in indicator['sources'] if s['description'] == 'Indicator specification'][0]
if 'Clinical Commissioning Group Indicators' in fristurl:
name = 'CCGOIS'
if 'Outcomes Framework' in fristurl:
name = 'NHSOF'
return name | 08b949cc4be1efca513d40e0293ac07cc539c29b | 51,074 |
import re
def re_compile(value):
"""
Argparse expects things to raise TypeError, re.compile raises an re.error
exception
This function is a shorthand to convert the re.error exception to a
TypeError
"""
try:
return re.compile(value)
except re.error as e:
raise TypeError(e) | 8befbf18c3bf895fbe6e0ed6aecd53fff8388969 | 51,076 |
def harmonic(n):
"""
Compute the n-th Harmonic number
Compute Hn where Hn = 1 + 1/2 + 1/3 + ... + 1/n
Parameters
----------
n : int
n-th number
Returns
-------
hn
harmonic number
"""
hn = 0
for i in range(1, n):
hn += 1 / i
return hn | cd4d870b66b5fc037c3b3c11b3c735e91c476c6b | 51,077 |
import os
def _token_cache_path():
"""Return the path to a credentials file."""
xdg_cache_dir = os.environ.get("XDG_CACHE_DIR")
if not xdg_cache_dir:
xdg_cache_dir = os.path.expanduser("~/.cache")
return os.path.join(xdg_cache_dir, "faculty", "token-cache.json") | fed8395d0568580f8b2248268a1037ab105def13 | 51,078 |
def specific_help(functions, func):
"""
Prints the specific ?help <command> message to the chat.
-----
:param <functions>: <class 'dict'> ; dictionary of available botjack commands
:param <func>: <class 'str'> ; command requested by user
"""
msg = "Beep, boop! I'm not a smart pony!"
if (func in functions.keys()) or ("!" + func in functions.keys()):
msg = functions[func] if func in functions.keys() else functions["!" + func]
return msg | 5e50aeb42bb1a740107ed71163e0f6602719a14e | 51,079 |
import glob
import os
def _njobs_in_dir(block_dir):
"""
Internal method to count the number of jobs inside a block
Args:
block_dir: (str) the block directory we want to count the jobs in
Return:
(int)
"""
return len(glob.glob("%s/launcher_*" % os.path.abspath(block_dir))) | a7fc8526ac4b1dc365964e0c3259f9ac05776edb | 51,080 |
import random
def calculate_gauge():
"""Create fixed range statistic for gauge."""
return random.randint(0, 100) | 63d73344553cebeb7390c8836158ddfd2e76f770 | 51,081 |
import os
def get_asset_path(name):
"""Returns the filename concatenated with the path to the test assets
folder."""
return os.path.join(os.path.abspath(os.path.dirname(__file__)), "test_assets", "expect", name) | 4544cafaadd8124cabc1cc696b5d7dbfbc8b3377 | 51,082 |
def _browser_list_string(browser_names):
"""Takes a list of browser names and returns a string representation in the
format '{browser0,browser1,browser2}'
:param browser_names: List of browser names
:return: String representation of the browser name list
"""
return '{{{}}}'.format(','.join(browser_names)) | 0a5c3a9516c4a72db7cb947f12a47ac70394c65f | 51,083 |
from numpy import exp
def exponential_1(x,x0,A,tau, offset):
"""
exponential function with one exponent
"""
func = A*exp(-(x-x0)/tau)+offset
return func | f48b79527e7dccf41562cfa4777c78ae4f50f613 | 51,084 |
def fatorial(num, show=False): #define a função fatorial com os parâmetros num e show (padrao false)
#DOCSTRING
"""
-> Calcula o Fatorial de um numero.
:param num: o número a ser calculado.
:param show: (opcional) mostrar o calculo
:return: retorna o resultado do fatorial de um numero n.
"""
f = 1 #variavel fatorial recebe 1
for c in range(num, 0, -1): #repetição de num ate 0 subtraindo 1
if show: #se show for true
print(c, end='') #printa o contador sem quebrar a linha
if c > 1: #se c for maior que um
print(' x ', end='') #print um x para indicar multiplicação
else: #senao
print(' = ', end='') #print = para o resultado final
f = f * c #fatorial recebe fatorial multiplicado pelo c
return f | 9842b09f3e9003362844ca533c18243bf9097e94 | 51,087 |
import os
def findInSearchPath(envname, pathlist, basename):
"""
Look for file: If ${envname} in in environment, return it, else search
each directory in the path list for a file or directory named basename
"""
try:
return os.environ[envname]
except:
pass
if type(pathlist) is str:
pathlist = pathlist.split(":")
for p in pathlist:
try:
stat = os.stat(os.path.join(p, basename))
return os.path.join(p, basename)
except:
pass
raise ValueError("Could not find %s in paths : %s" % (basename, pathlist)) | be051340b9e7d02e86229704000f022347cebe2c | 51,088 |
import codecs
def deserializer_with_decoder_constructor(deserialization_func, decoder_type='utf-8', decoder_error_mode='replace'):
"""
Wrap a deserialization function with string encoding. This is important for JSON, as it expects to operate on
strings (potentially unicode), NOT bytetsteams. A decoding steps is needed in between.
:param deserialization_func: The base deserialization function.
:param decoder_type: The decoder type. Default: 'utf-8'
:param decoder_error_mode: The decode error mode. Default: 'replace'.
:return: The deserializer function wrapped with specified decoder.
:rtype: bytes | bytearray | str -> T
"""
decoder = codecs.getdecoder(decoder_type)
def deserialize(payload):
decoded, _ = decoder(payload, decoder_error_mode)
return deserialization_func(decoded)
return deserialize | b66b6aae507385748868b6e738f5239db5e3cc27 | 51,089 |
def check_meta(row,meta_val,meta_type):
"""
check if the current role/type is equal to or contains role
"""
# set target as list
if type(meta_val)==str:
target = [meta_val]
else:
target = meta_val
# set current value to list
if type(row[meta_type]) ==str:
current = [row[meta_type]]
else:
current = row[meta_type]
# check if at least one of the target types is in the current row
return True in [t in current for t in target] | 8576980f08903afe53542b7cb691db404520f293 | 51,090 |
def substract(x, subset):
"""Remove elements of subset if corresponding token id sequence exist in x.
Args:
x (list): set of hypotheses
subset (list): subset of hypotheses
Returns:
final (list): new set
"""
final = []
for x_ in x:
if any(x_.yseq == sub.yseq for sub in subset):
continue
final.append(x_)
return final | c0ac276b413ec6f735651d8a062df84008498e51 | 51,091 |
import os
def JudgeFileExist(FilePath):
"""
给定文件路径,判断是否存在该文件
"""
if os.path.exists(FilePath) == True:
return True
else:
return False | 1b6788e8ed9e0fd517b733e8e44f135b3226fae4 | 51,092 |
import os
def getOS():
"""Find and return OS
Returns:
string -- OS Name
"""
return os.uname().sysname | b5b6ec2c28a5583a84d2e80e79b01459217c19d8 | 51,093 |
def combine_intervals(I1, I2):
""" Return a 2-tuple of timestamps (I1[0], I2[1]). """
return (I1[0], I2[1]) | 132ad7a0fd0cb32770873139e5b8cd578907580c | 51,094 |
def select_last_year(df_features):
"""
In order to predict into the future, we always need the input from the last
year.
"""
df_lastYear = df_features.reset_index()
lastYear = str(df_features.reset_index()['year'].apply(int).max())
df_lastYear = df_lastYear[df_lastYear['year'] == lastYear]
df_lastYear.set_index(['site_id', 'species', 'year'], inplace=True)
return(df_lastYear) | edffa463279c48458879afc149fc7f482e982af9 | 51,095 |
def _default_key(obj):
"""Default key function."""
return obj | a5ccb0bb9f0072734f73da8a205fadc549694104 | 51,096 |
def _is_index_labelled(graph):
"""graph is index-labels [0, len(graph) - 1]"""
return all(v in graph for v in range(len(graph))) | 5627a3342dba9d2fd4974d63f9238b24f4a3057d | 51,097 |
import sys
def ensure_python_version(function):
"""Raise SystemError if Python version not in 3.{5, 6, 7, 8}"""
def wrapper(*args, **kwargs):
python_version = sys.version_info
if not (python_version.major == 3 and python_version.minor in (5, 6, 7, 8)):
raise SystemError("Python version should be 3.{5, 6, 7, 8}")
return function(*args, **kwargs)
return wrapper | 9dab19d6cacdb2c9e99834124ff355cf170d7b36 | 51,099 |
def a_or_an(s:str) -> str:
""" Return 'a [str]' or 'an [str]' as appropriate."""
return f"an {s}" if s[0] in "aeiouAEIOU" else f"a {s}" | e99fad6c4e050abc05964fa6d07ff7d15ac03362 | 51,101 |
def _DetectLto(lines):
"""Scans LLD linker map file and returns whether LTO was used."""
# It's assumed that the first line in |lines| was consumed to determine that
# LLD was used. Seek 'thinlto-cache' prefix within an "indicator section" as
# indicator for LTO.
found_indicator_section = False
# Potential names of "main section". Only one gets used.
indicator_section_set = set(['.rodata', '.ARM.exidx'])
start_pos = -1
for line in lines:
# Shortcut to avoid regex: The first line seen (second line in file) should
# start a section, and start with '.', e.g.:
# 194 194 13 1 .interp
# Assign |start_pos| as position of '.', and trim everything before!
if start_pos < 0:
start_pos = line.index('.')
if len(line) < start_pos:
continue
line = line[start_pos:]
tok = line.lstrip() # Allow whitespace at right.
indent_size = len(line) - len(tok)
if indent_size == 0: # Section change.
if found_indicator_section: # Exit if just visited "main section".
break
if tok.strip() in indicator_section_set:
found_indicator_section = True
elif indent_size == 8:
if found_indicator_section:
if tok.startswith('thinlto-cache'):
return True
return False | 6cfe65a769b4a070274a497d17b65742dfeaff44 | 51,102 |
def isEasyGeneratorPossible(s:tuple):
"""
Return True is intA's possible to generate easly a generator.
"""
# Control if easy generator is possible
p_filter = lambda p : p % 3 == 2 and (p % 12 == 1 or p % 12 == 11)
p, _ = s
if p_filter(p):
easyGenerator = True
else:
easyGenerator = False
return easyGenerator | fcaec2a420dde800d8cfc7fc4af88dd4aa54c7f9 | 51,103 |
import time
def measure_time(f_name):
"""This is a decorator that measures performance of the decorated function
:param function_name: The name of the decorated function
"""
def wrapper(f):
f_inner_name = f_name
if f_inner_name is None:
f_inner_name = f.__name__
def wrapped_f(*args, **kwargs):
start = time.time()
return_data = f(*args, **kwargs)
end = time.time()
print('%11s: %0.3f sec' % (f_inner_name, (end - start)))
return return_data
return wrapped_f
return wrapper | 36c8a9d9a54769464b63ec9b868376b756f45de1 | 51,105 |
def to_modify_quota(input_quota, array_quota, array_include_overhead):
"""
:param input_quota: Threshold limits dictionary passed by the user.
:param array_quota: Threshold limits dictionary got from the Isilon Array
:param array_include_overhead: Whether Quota Include Overheads or not.
:return: True if the quota is to be modified else returns False.
"""
if input_quota['include_overheads'] is not None \
and input_quota['include_overheads'] != array_include_overhead:
return True
for limit in input_quota:
if limit in array_quota and input_quota[limit] is not None and\
input_quota[limit] != array_quota[limit]:
return True
return False | 25a8ebe45425911fac44a8e2ebf0d60734a711d2 | 51,106 |
def getZoneLocFromGrid(gridCol, gridRow):
"""
Create a string location (eg 'A10') from zero based grid refs (col=0,
row=11)
"""
locX = chr(ord('A') + gridCol)
locY = str(gridRow + 1)
return locX + locY | bce2137222d0431a4e6761fee772aaa92cacbfe3 | 51,107 |
def second_differences(signal):
"""The mean of the absolute values of the second differences of the raw signal"""
sec_diff = []
for i in range(0,len(signal)-2):
sec_diff.append(abs(signal[i+2]-signal[i]))
fd_sum = sum(sec_diff)
delta = float(fd_sum)/(len(signal)-2)
return(delta) | fb394697e922bde829bf86f1ac97139b96afffe2 | 51,108 |
def _formatted(dataset, format='dict', orient_results='columns'):
"""
Internal helper that returns dataset into dict structure
"""
# convertion to pandas
pdf = dataset.toPandas()
# returning results
if format == 'dict':
# orient results column format, internal dict format equivalence
_orient_results = orient_results
if orient_results == 'columns':
_orient_results = 'dict'
return pdf.to_dict(orient=_orient_results)
else:
raise Exception('Internal Error: Unknow format {0}.'.format(format)) | 28cd1058575341aabb271550dbdb8c360a91a104 | 51,109 |
import math
def get_divisors(x, filter=None):
""" Return the divisors of the integer x
Call the filter function to filter out the illegal one.
"""
divisors = []
large_divisors = []
for i in range(1, int(math.sqrt(x) + 1)):
if x % i == 0:
if (filter and not filter(i)) or not filter:
divisors.append(int(i))
if i * i != x:
if (filter and not filter(int(x / i))) or not filter:
large_divisors.append(int(x / i))
for d in reversed(large_divisors):
divisors.append(d)
return divisors | b9306ae30824c783f01ab61d0f8fda84ded9db88 | 51,111 |
import argparse
def parse_args():
"""Parse and return command line arguments using argparse."""
argparser = argparse.ArgumentParser(description='System test runner.')
argparser.add_argument('config_file',
help='config file specifiying the system tests.')
return argparser.parse_args() | 5ce0363e94e006fb4c40d057c7c3510c6c1d2163 | 51,115 |
def binary_tree_max(tree: dict):
"""
Função que encontra o maior value dentro de um dict = {'value': n, 'children':[]} com 3 ramos
"""
has_children = tree.get('children', 0)
if has_children == []:
return tree['value']
children_value = []
#Para cada dicionario aninhado, value é anexado ao children_value
for children_dicts in tree['children']:
children_value.append(binary_tree_max(children_dicts))
if tree['value'] >= max(children_value):
return tree['value']
else:
return max(children_value) | 5649cad9cec11a29a60f0a43a1cdbed81ea221b4 | 51,117 |
def in_range(x, a, b):
"""
Tests if a value is in a range. a can be greater than or less than b.
"""
return (x >= a and x <= b) or (x <= a and x >= b) | 206c9c0cffb178267327fe127886f14f5e674740 | 51,118 |
import os
def get_node_id(filename:str='/var/lib/cloud/data/instance-id') -> str:
""" Cloud init stores the node id on disk so it possible to use this for us to get host information that way
Ideally this would have been done using the cloud_init library, but that is python2 only, so this is a bit of a hack
Args:
filename: should only be changed when testing the function
Returns:
node id (string)
Raises:
if the instance not found raise a RuntimeError
"""
if ( not os.path.isfile( filename )):
raise RuntimeError("instance file ({}) does not exists".format( filename))
fh = open(filename, 'r')
id = fh.readline().rstrip("\n")
fh.close()
return id | 6187c23afec24cb89b0691106bbe66b75c5e7c35 | 51,119 |
def _read_servers(filename):
"""Reads file with the server names.
Returns a list of servers.
"""
with open(filename) as file_obj:
return [server.strip() for server in file_obj] | 180ee3e1530afc76c84db44028f0c443dd324be4 | 51,120 |
def argument_checker(
Csin,
Csout,
Crin,
Crout,
Sin,
Rin,
precision_value,
):
""""Checks the arguments to be of correct form"""
if not (Csin < 64000 and Csin > 1024):
print('ERROR: Cin of wrong size')
return False
if not (Csout < 64000 and Csout > 1024):
print('ERROR: Csout of wrong size')
return False
if not (Crin < 64000 and Crin > 1024):
print('ERROR: Crin of wrong size')
return False
if not (Crout < 64000 and Crout > 1024):
print('ERROR: Crout of wrong size')
return False
if Sin == Rin:
print('ERROR: Port numbers are the same')
return False
if not (precision_value < 1 and precision_value >= 0):
print('ERROR: precision value of wrong size')
return False
return True | b0163cbd49b6138f832b1656f6359d9f4fa83c2d | 51,121 |
def CalculateThePrice(TheUnitPrice, num, freight):
"""
计算价格
:param TheUnitPrice: 单价
:param num: 数量
:param freight:运费
:return: 总价
"""
# 单价*数量+邮费
price = TheUnitPrice * num + freight
# 保留两位小数
return '%.2f' % price | c49f10ef48c147c617cbb16a684dd96a59f623bf | 51,122 |
import math
def log_Eval_point(state, data):
""" natural logarithm of external validity, where data is a
multiset of points, to be used as point predicates"""
vals = [data(*a) * math.log(state(*a)) for a in data.sp.iter_all()]
return sum(vals) | dd980554e78d6fb717ad8b41cd31323089da7d1a | 51,123 |
def count_start(tokenizer):
"""
A decorator which wrap the given tokenizer to yield (token, start).
Notice! the decorated tokenizer must take a int arguments stands for the start position of the input context/sentence
>>> tokenizer = lambda sentence: sentence.split(' ')
>>> tokenizer('The quick brown fox jumps over the lazy dog')
['The', 'quick', 'brown', 'fox', 'jumps', 'over', 'the',
'lazy', 'dog']
>>> tokenizer = count_start(tokenizer)
>>> tokenizer('The quick brown fox jumps over the lazy dog', 0)
('The', 0)
('quick', 4)
...
"""
def wrapper(context, base):
tokens = list(tokenizer(context))
flag = 0
for token in tokens:
start = context.index(token, flag)
flag = start + len(token)
yield (token, base + start)
return wrapper | 2f04e09c311102c02b37ca1c49934f49b49178dd | 51,125 |
import os
import json
def load_fixture(path):
"""Load a JSON fixture given a relative path to it."""
if not path.endswith('.json'):
path += '.json'
# Build the absolute path to the fixture.
path = os.path.join(
os.path.dirname(__file__),
'fixtures',
path,
)
with open(path) as file:
return json.load(file) | fc5f2b0ffbce70d306b88338c5b78a1c5ed8583e | 51,126 |
from typing import Mapping
def update_config(config, overwrite_config):
"""Recursively update dictionary config with overwrite_config.
See
http://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth
for details.
Args:
config (dict): dictionary to update
overwrite_config (dict): dictionary whose items will overwrite those in config
"""
def _update(d, u):
for (key, value) in u.items():
if (isinstance(value, Mapping)):
d[key] = _update(d.get(key, {}), value)
else:
d[key] = value
return d
_update(config, overwrite_config) | 935ddf0ff5abf7f2b5875b36179a4444c9cf9693 | 51,128 |
def ListJoiner(list):
""" Takes in a nested list, returns a list of strings """
temp_list = []
for item in list:
i = " ".join(item)
temp_list.append(i)
return temp_list | 0918967a6ae0d561dd216f2b9ad8089dad28f3ef | 51,129 |
def data2_eq(data2):
""" Returns a function that matches to the specified data2 value. """
return lambda m: m.data2 == data2 | 4d71f6f88f6ea264dbf1058a200f9700b8ce3574 | 51,131 |
def tuplify(obj) -> tuple:
"""Converts obj to a tuple intelligently.
Examples
--------
Normal usage::
tuplify('str') # ('str',)
tuplify([1, 2]) # (1, 2)
tuplify(len) # (<built-in function len>,)
"""
if isinstance(obj, str):
return (obj,)
try:
return tuple(obj)
except TypeError:
return (obj,) | 1fad82dd77b4b150163b5a7684ebb83c324df311 | 51,132 |
import fnmatch
def filefilter(filename):
"""Filter list of files for .jpg and return those"""
return fnmatch.fnmatch(filename, '*.JPG') or fnmatch.fnmatch(filename, '*.jpg') or fnmatch.fnmatch(filename, '*.jpeg') | c10f25b2f6f334613bb18558a0fdbfb200c3243d | 51,134 |
def do_sciplots(request):
""" A pytext fixture to decide whether to create plots (or not) when testing the
scientific stability of ampycloud.
Adapted from the similar function in dvas, which itself was adapted from the response of ipetrik
on `StackOverflow <https://stackoverflow.com/questions/40880259>`__
To use this, simply call it as an argument in any of the test function, e.g.:
def test_some_func(a, b, do_sciplots):
...
if do_sciplots:
diagnostic(...)
"""
return request.config.getoption("--DO_SCIPLOTS") | 0b9353a93d500b189c6098bef7f5c372c2ea39fd | 51,135 |
def get_chipseq_atacseq_qc_summary(quality_metric, qc_type):
"""
Chipseq and Atacseq QCs both have common QC Summary metrics. This method
calculates the metrics within quality_metric_summary calculated property
"""
def round2(numVal):
return round(numVal * 100) / 100
qc_summary = []
if 'overlap_reproducibility_qc' in quality_metric:
if 'idr_reproducibility_qc' in quality_metric:
qc_method = 'idr'
else:
qc_method = 'overlap'
opt_set = quality_metric.get(
qc_method + "_reproducibility_qc")["opt_set"]
qc_summary.append({"title": "Optimal Peaks",
"value": str(quality_metric.get(qc_method + "_reproducibility_qc")["N_opt"]),
"numberType": "integer"})
qc_summary.append({"title": "Rescue Ratio",
"tooltip": "Ratio of number of peaks (Nt) relative to peak calling based" +
" on psuedoreplicates (Np) [max(Np,Nt) / min (Np,Nt)]",
"value": str(round2(quality_metric.get(qc_method + "_reproducibility_qc")["rescue_ratio"])),
"numberType": "float"})
qc_summary.append({"title": "Self Consistency Ratio",
"tooltip": "Ratio of number of peaks in two replicates [max(N1,N2) / min (N1,N2)]",
"value": str(round2(quality_metric.get(qc_method + "_reproducibility_qc")["self_consistency_ratio"])),
"numberType": "float"})
qc_summary.append({"title": "Fraction of Reads in Peaks",
"value": str(round2(quality_metric.get(qc_method + "_frip_qc")[opt_set]["FRiP"])),
"numberType": "float"})
elif 'flagstat_qc' in quality_metric or 'ctl_flagstat_qc' in quality_metric:
pref = ''
if 'ctl_flagstat_qc' in quality_metric:
pref = 'ctl_'
# mitochondrial rate (only for ATAC-seq)
if qc_type == 'QualityMetricAtacseq':
total = quality_metric.get(
pref + "dup_qc")[0]["paired_reads"] + quality_metric.get(pref + "dup_qc")[0]["unpaired_reads"]
nonmito = quality_metric.get(
pref + "pbc_qc")[0]["total_read_pairs"]
mito_rate = round2((1 - (float(nonmito) / float(total))) * 100)
qc_summary.append({"title": "Percent mitochondrial reads",
"value": str(mito_rate),
"numberType": "percent"})
qc_summary.append({"title": "Nonredundant Read Fraction (NRF)",
"value": str(round2(quality_metric.get(pref + "pbc_qc")[0]["NRF"])),
"tooltip": "distinct non-mito read pairs / total non-mito read pairs",
"numberType": "float"})
qc_summary.append({"title": "PCR Bottleneck Coefficient (PBC)",
"value": str(round2(quality_metric.get(pref + "pbc_qc")[0]["PBC1"])),
"tooltip": "one-read non-mito read pairs / distinct non-mito read pairs",
"numberType": "float"})
final_reads = quality_metric.get(
pref + "nodup_flagstat_qc")[0]["read1"] # PE
if not final_reads:
final_reads = quality_metric.get(
pref + "nodup_flagstat_qc")[0]["total"] # SE
qc_summary.append({"title": "Filtered & Deduped Reads",
"value": str(final_reads),
"numberType": "integer"})
return qc_summary if qc_summary else None | 448242a2af9f1f7a37840b8f488dd5c4d702dd79 | 51,136 |
def info(text):
"""Create a pretty informative string from text."""
return f"\033[92m{text}\033[m" | 15f0ebda91ce2de8a47d488cbb8b5644bde96b8f | 51,138 |
def deltas(errors, epsilon, mean, std):
"""Compute mean and std deltas.
delta_mean = mean(errors) - mean(all errors below epsilon)
delta_std = std(errors) - std(all errors below epsilon)
"""
below = errors[errors <= epsilon]
if not len(below):
return 0, 0
return mean - below.mean(), std - below.std() | b91d95b09eb3a138f2d6323ac818a90be5e0d534 | 51,139 |
def cli(active, id):
"""Console script for simple_clinic."""
active.id = id
return 0 | cb39a6891b3d949e044819bc757c5339f64e79e2 | 51,140 |
import os
def fread(*paths, mode=''):
"""
Read and return the entire file specified by `path`
Args:
paths:
is the path of the file to read.
mode(str):
If `mode='b'` it returns `bytes`.
If `mode=''` it returns a `str` decoded from `bytes`.
Returns:
file content in string or bytes.
"""
path = os.path.join(*paths)
with open(path, 'r' + mode) as f:
return f.read() | 95baf12741efb2bb648fe1869b9c1db265e5e95d | 51,141 |
def hbnb():
""" Prints a Message when /hbnb is called """
return 'HBNB' | afedc84731298906e1181ec00b474e22462725ec | 51,142 |
def parse_mbld(s: str) -> float:
""" Turns the WCA multiblind format into a number. """
diff, time = 99 - int(s[0:2]), int(s[2:7])/60
return diff + max((60 - time)/60, 0) | f59ac36557d565e1a3acbe0947b8d6830cf23b70 | 51,144 |
def mult_over_list(l):
"""
Doc string.
"""
product = 1
for e in l:
product *= int(e)
return product | 91dd798f564ae14afe83af8146e841f7c963cbc0 | 51,145 |
def draw_individual(canv,x,y,rad,out_color,in_color):
"""DO NOT MODIFY THIS FUNCTION"""
return canv.create_oval((x-rad),(y-rad),(x+rad),(y+rad),width=1, outline=out_color, fill=in_color) | 9ec0c964b3b9653ef078d9a5a63f38c1f2d394a2 | 51,146 |
def bytes_to_u16(data):
"""大端字节序"""
data_u16 = data[0] << 8 | data[1]
return data_u16 | e21d9f4d2660bd29b88de4341f97de2645fec127 | 51,147 |
def _map_args_test(x: float, y: float=2, z: float=3) -> float:
"""A test function for the map_args function. Returns the sum of x, y, z"""
return x + y + z | 20e63e40c0c98151ef6199776c1c6296ce5b39b8 | 51,148 |
def generate_key(token):
"""
根据设备指纹生成自定义加密密钥
:return:
"""
return "rsp67ou9" + '-'.join(token.split('-')[1:])[2:18] | ca507ec759f0eea33160a773a12be78eff53c82c | 51,150 |
def get_sentence_token_information(sentences):
"""Return parsed as nested sentence-token-information list"""
parsed_text = []
for sent in sentences:
parsed_sentence = []
# split sentence into tokens
tokens = sent.split('\n')
for token in tokens:
# split token string into token information
parsed_token = [t for t in token.split('\t')]
parsed_sentence.append(parsed_token)
parsed_text.append(parsed_sentence)
return parsed_text | 4177aad55ae93419b2216051b4f3e07ba246c592 | 51,151 |
import re
def is_ner(tag):
"""
Check the 10th column of the first token to determine if the file contains
NER tags
"""
tag_match = re.match('([A-Z_]+)-([A-Z_]+)', tag)
if tag_match:
return True
elif tag == "O":
return True
else:
return False | 92d8628a44de9d4d3f90d8b1e19926036214bcd4 | 51,153 |
import os
def is_jpg_complete(img_path):
"""
Checks whether the JPG image is complete.
https://en.wikipedia.org/wiki/Portable_Network_Graphics#Critical_chunks
http://www.libpng.org/pub/png/spec/1.2/PNG-Structure.html#Chunk-layout
http://www.libpng.org/pub/png/spec/1.2/PNG-Chunks.html#C.IEND
:param img_path: the absolute path to the PNG image
:type img_path: str
:return: True if complete
:rtype: bool
"""
try:
flen = os.path.getsize(img_path)
if flen > 2:
with open(img_path, "rb") as f:
f.seek(flen - 2, 0)
marker = f.read(2)
return (marker[0] == 0xFF) and (marker[1] == 0xD9)
else:
return False
except:
return False | 825ef30400a4d4fe10d5f87d4c41d51e3684d479 | 51,154 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.