content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
import json
def get_top_python_packages(top_n=100):
"""
Generate list of most downloaded python packages
Args:
top_n: the number of most downloaded packages to return
Returns:
(list) Names of most downloaded packages
"""
# JSON file containing top 4000 packages
# found here: https://hugovk.github.io/top-pypi-packages/
top_python_pkgs = "top_python_pkg_downloads.json"
with open(top_python_pkgs, "r") as j:
contents = json.loads(j.read())
# store names of top packges
top_pkgs = []
cnt = 0
for pkg in contents["rows"]:
# only append TOP_N number of packages
if cnt == top_n:
break
top_pkgs.append(pkg["project"])
cnt += 1
return top_pkgs
|
d19136a5ee21fe65c33ef4a5336f46863bf2c6b0
| 28,459
|
import argparse
def parse_args():
"""Parses arguments."""
parser = argparse.ArgumentParser(description='Conduct Sefa.')
parser.add_argument('--config', type=str, help='Path to the Sefa configuration.')
return parser.parse_args()
|
886a328c511dfeeecbee2e461742f6991e7ff63e
| 28,460
|
def get_arrhythmia_type(fields):
"""Returns type of arrhythmia based on fields of the sample
Arguments
---------
fields: fields of sample read from wfdb.rdsamp
Returns
-------
Type of arrhythmia
'a': asystole
'b': bradycardia
't': tachycardia
'f': ventricular fibrillation
'v': ventricular tachycardia
"""
arrhythmias = {
'Asystole': 'a',
'Bradycardia': 'b',
'Tachycardia': 't',
'Ventricular_Tachycardia': 'v',
'Ventricular_Flutter_Fib': 'f'
}
arrhythmia_type = fields['comments'][0]
return arrhythmias[arrhythmia_type]
|
4772acc3612492a4f41acac0619435774f11cdff
| 28,463
|
def chain_3(d3f_dg3, dg_dx, d2f_dg2, d2g_dx2, df_dg, d3g_dx3):
"""
Generic chaining function for third derivative
.. math::
\\frac{d^{3}(f . g)}{dx^{3}} = \\frac{d^{3}f}{dg^{3}}(\\frac{dg}{dx})^{3} + 3\\frac{d^{2}f}{dg^{2}}\\frac{dg}{dx}\\frac{d^{2}g}{dx^{2}} + \\frac{df}{dg}\\frac{d^{3}g}{dx^{3}}
"""
return d3f_dg3*(dg_dx**3) + 3*d2f_dg2*dg_dx*d2g_dx2 + df_dg*d3g_dx3
|
90c7563821a30fe0ff0b7e2431122d4754d52210
| 28,464
|
def calc_qty_from_value(row):
"""
Estimates product quantity based on product value and food group
Args:
row (): see extrapolate_qty_from_productvalue() function
Returns:
qty (float): Product quantity estimation (g)
"""
value = abs(row.CP_MontantTotal)
food_group = row.P_food_group
Q_unit = row.CP_QuantiteUnite
Q_val = abs(row.CP_QuantiteTotale)
#print(Q_unit)
# Check if CP has already a weight. If so, use it.
if Q_unit in ['Kg','kg','Kilo','Litre','Litres']:
row.Qty_val = Q_val * 1000
row.Qty_unit = 'g'
row.Qty_approx = bool(False)
row.Qty_method = 3.0
row.Qty_std = bool(True)
#print(row[['P_Id','Qty_val','Qty_unit','Qty_std','Qty_approx','Qty_method']])
return row[['P_Id','Qty_val','Qty_unit','Qty_std','Qty_approx','Qty_method']]
# Calculate weight from food group and product value
if float(value)==0:
#print(row[['P_Id','Qty_val','Qty_unit','Qty_std','Qty_approx','Qty_method']])
return row[['P_Id','Qty_val','Qty_unit','Qty_std','Qty_approx','Qty_method']]
dict_euro_kilo = {
None:3,
'nan':3,
'Produits gras sucrés salés':3,
'Produits laitiers (hors fromage)':3,
'Féculents raffinés':2,
'Fruits':1.5,
'Exclus':3,
'Plats préparés':3,
'Légumes':1.5,
'Viande, oeufs':3,
'Fromage':3,
'Matières grasses ajoutées':3,
'Féculents non raffinés':1.5,
'Poisson':3}
if food_group in (dict_euro_kilo.keys()):
row.Qty_val = dict_euro_kilo[food_group] / float(value) * 1000
row.Qty_unit = 'g'
row.Qty_approx = bool(False)
row.Qty_method = 3.0
row.Qty_std = bool(True)
else:
row.Qty_val = 3 / float(value) * 1000
row.Qty_unit = 'g'
row.Qty_approx = bool(False)
row.Qty_method = 3.0
row.Qty_std = bool(True)
#print(row[['P_Id','Qty_val','Qty_unit','Qty_std','Qty_approx','Qty_method']])
return row[['P_Id','Qty_val','Qty_unit','Qty_std','Qty_approx','Qty_method']]
|
4eb70438e6e794ce3e44f15fa28a5f399782ccea
| 28,467
|
def nextIteration(pagerankvector: list, outlinkvector: list, adjMatrix: list, dampingfactor: float) -> list:
"""Takes the page rank vector and adjacency matrix of network of pages and returns the
page rank performing one iteration of the page rank algorithm."""
nodeCount = len(adjMatrix)
newpageRankVector = [1 - dampingfactor] * nodeCount
for col in range(nodeCount):
# adjMatrix[row][col] is 1 if there is a link from node row to node col.
for row in range(nodeCount):
if adjMatrix[row][col] > 0:
newpageRankVector[col] += adjMatrix[row][col]*dampingfactor * (pagerankvector[row] / outlinkvector[row])
return newpageRankVector
|
acaa445bf120d71dbd9261a93667d76147f94803
| 28,468
|
import warnings
def ditto(request):
"""
Deprecated.
"""
warnings.warn("The ditto context_processor is no longer used.", DeprecationWarning)
return {}
|
7250488bf5d94aa14dec7ba64974642542149e59
| 28,469
|
import subprocess
def getsnapshots():
"""Reads all current local snapshots
returns:
List of snapshots
"""
cmd = subprocess.Popen(['tmutil', 'listlocalsnapshotdates'],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
snapshots, _ = cmd.communicate()
snapshots = snapshots.splitlines()
return snapshots[1:]
|
94ccc0af80c47528ba1ccf919038a804df464d91
| 28,470
|
def createProperMovieDictionary(raw_dict):
"""
Takes the dictionary provided by the request (full of strings)
and formats it where some variables need to have other types (e.g int, float)
also escapes all the single quotes characters in strings
:param raw_dict: the raw dictionary with the movie information provided by the api
:return proper_dict: a dictionary with the proper types to input in the database
containing id, title, year, length, rating and the cast
(a dictionary of actors in movies)
"""
try:
year = int(raw_dict['year'])
except ValueError:
year = 0
try:
rating = float(raw_dict['rating'])
except ValueError:
rating = 0.0
proper_dict = {
'id': raw_dict['id'],
'title': raw_dict['title'].replace(u'\xa0', u'').replace("'", "''"),
'year': year,
'length': raw_dict['length'],
'rating': rating,
'cast': raw_dict['cast']
}
for actor in proper_dict['cast']:
actor['actor'] = actor['actor'].replace("'", "''")
actor['character'] = actor['character'].replace("'", "''")
return proper_dict
|
ac30cbff86bfd9659c9363d31acacbd3c76b6f0f
| 28,472
|
import argparse
def _parse_arguments():
"""Parse commandline arguments.
:returns: argparse.Namespace with arguments.
"""
parser = argparse.ArgumentParser(description="Generate allure report.")
parser.add_argument(
"--data-location", dest="data_location", default="data",
help="Location of the allure data",
)
parser.add_argument(
"--report-location", dest="report_location", default="report",
help="Location where report should be stored",
)
parser.add_argument(
"--allure-location", dest="allure_location",
help=" ".join((
"Location where allure commandline can be found.",
"Zip archive of the latest release will be used if empty string is specified",
)),
)
return parser.parse_args()
|
1b8d5d85efd869be7537f8e493dece524577ff04
| 28,473
|
def get_phred_query(sample_id, gt_ll, genotype, prefix=" and ", invert=False):
"""Default is to test < where a low value phred-scale is high
confidence for that genotype
>>> get_phred_query(2, 22, "het")
' and gt_phred_ll_het[1] < 22'
>>> get_phred_query(2, 22, "het", prefix="")
'gt_phred_ll_het[1] < 22'
>>> get_phred_query(2, 22, "het", prefix="", invert=True)
'gt_phred_ll_het[1] > 22'
"""
assert genotype in ("het", "homref", "homalt")
if not gt_ll: return ""
# they passed in the subject:
if hasattr(sample_id, "sample_id"):
sample_id = sample_id.sample_id
sign = ["<", ">"][int(invert)]
s = "gt_phred_ll_{genotype}[{sample_id}] {sign} {gt_ll}"\
.format(sample_id=sample_id-1, genotype=genotype,
gt_ll=gt_ll, sign=sign)
return prefix + s
|
3b913725bafec554c105173fb4ff720324aa8ae7
| 28,475
|
def yfun(p,B,pv0,f):
"""
Steady state solution for y without CRISPR
"""
return (B*p-p/pv0-f*(B*p-p/pv0+1))/(p*(B*p-p/pv0+1))
|
191835f60bb755f173dd21c4f116efcc1c2c86bf
| 28,476
|
def _command_to_string(cmd):
"""Convert a list with command raw bytes to string."""
return ' '.join(cmd)
|
e8c39aa287f64099358e420b63b507f6f3f68c7a
| 28,477
|
def get_racecar_camera_topics(racecar_name):
""" The camera topics where the racecar publishes the frames
Arguments:
racecar_name (str): The name of the racecar
"""
return [
"/{}/main_camera/zed/rgb/image_rect_color".format(racecar_name),
"/sub_camera/zed/rgb/image_rect_color"
]
|
7d535d2e6f9e9d056dec8604dc856901b30b2c20
| 28,478
|
def string_preset(preset):
""" Print preset
"""
string = '{\n'
for attr in list(preset.keys()):
string = "{0} {1}: {2},\n".format(string, attr, preset[attr])
string += '}\n'
return string
|
2a654d92e96c02c9ccd4ae81fda1e553b0555f2a
| 28,481
|
import numpy
def _parse_UB_matrix(header_line):
"""Parse G3 header line and return UB matrix
:param str header_line: G3 header line
:return: UB matrix
"""
return numpy.array(list(map(float, header_line.split()))).reshape((1, 3, 3))
|
94528ae811790d6f64e2d8458eff8a7dd117d391
| 28,484
|
def links_as_text(code):
""" ersetzt alle Vorkommen von '<a href="xxxxxxxx" ... >yyyy</a>' durch '(xxxxxxxx : yyyy)' """
def link_as_text(code):
ret = code
i = code.rfind('<a href="')
if i>-1:
j = code.find('"', i+9)
k = code.find('>', j)
l = code.find('<', k)
ret = code[:i] + '(' + code[i+9:j] + ' : ' + code[k+1:l] + ')' + code[l+4:]
return i, ret
i=1
ret = code
while i>-1:
i, ret = link_as_text(ret)
return ret
|
30cb5f132dd15283889107728a582a88c832e60b
| 28,486
|
def test_class_objects():
"""类对象。
类对象支持两种操作:
- 属性引用
- 实例化
"""
# 属性引用使用Python中用于所有属性引用的标准语法: obj.name.
# 有效的属性名是创建类对象时类的命名空间中的所有名称。
# 对于MyCounter类,以下引用是有效的属性引用:
class ComplexNumber:
"""复数类的例子"""
real = 0
imaginary = 0
def get_real(self):
"""返回复数的实部。"""
return self.real
def get_imaginary(self):
"""返回复数的虚部。"""
return self.imaginary
assert ComplexNumber.real == 0
# __doc__ 也是一个有效的属性,返回属于类的文档字符串
assert ComplexNumber.__doc__ == '复数类的例子'
# 类属性也可以被赋值,因此可以更改 ComplexNumber 的值。计数器的任务。
ComplexNumber.real = 10
assert ComplexNumber.real == 10
# 类实例化使用函数表示法。只需假设类对象是一个返回类的新实例的无参数函数。例如(假设上面的类):
complex_number = ComplexNumber()
assert complex_number.real == 10
assert complex_number.get_real() == 10
# 让我们把计数器的默认值改回来。
ComplexNumber.real = 10
assert ComplexNumber.real == 10
# 实例化操作(“调用”类对象)创建一个空对象。许多类喜欢创建具有自定义到特定初始状态的实例的对象。
# 因此,类可以定义一个名为__init__()的特殊方法,像这样:
class ComplexNumberWithConstructor:
"""带有构造函数的类示例"""
def __init__(self, real_part, imaginary_part):
self.real = real_part
self.imaginary = imaginary_part
def get_real(self):
"""返回复数的实部。"""
return self.real
def get_imaginary(self):
"""返回复数的虚部。"""
return self.imaginary
complex_number = ComplexNumberWithConstructor(3.0, -4.5)
assert complex_number.real, complex_number.imaginary == (3.0, -4.5)
|
f0acaf49376b43cc76524a4b07dc643025d97b98
| 28,487
|
from pathlib import Path
def find_modules(x):
"""Get all python files given a path
Args:
x (string): file path
Returns:
(list): recursive list of python files including sub-directories
"""
return Path(x).rglob('*.py')
|
1b8b84087c3de476c8e2b96b8f09001755ebabac
| 28,488
|
def medium_file(file_path):
"""Open a medium file (headerless tsv, 2 columns (str, float))
Return a generator of (str, float) tuples
This function is used by argparse for type validation."""
def row_generator(file_path):
with open(file_path, "r") as fh:
for line in fh:
reaction_id, lower_bound, upper_bound = line.rstrip().split("\t")
yield (reaction_id, float(lower_bound), float(upper_bound))
return row_generator(file_path)
|
eba4c372a1c07feab28634d65dcbd148f9dfa995
| 28,489
|
import subprocess
def run_command (cmd):
"""
Run command from command line
"""
#p = subprocess.call(cmd, shell=True)
p = subprocess.Popen(cmd, shell=True).communicate()
return p
|
4fe6233ba2a5c3e8a6e6f7f170b14f89bcbd6c39
| 28,490
|
def score(board):
"""
This function takes a nested list of numbers representing a Reversi
board and returns the score of player 1 and 2 out of that board.
:param board: nested list of integers that represent a Reversi board
:returns: a tuple (s1, s2) representing the points of player 1 and 2
:pre-condition: board must be a nested list of numbers (should be 8 * 8)
:raises: TypeError, if pre-condition is not true
"""
s1, s2 = 0, 0
for row in board:
s1 += row.count(1)
s2 += row.count(2)
return s1, s2
|
e8545cd43924849aae39c355eecf23607311593d
| 28,491
|
from typing import Any
import hashlib
def hashname_from_data(data: Any) -> str:
"""
Donne le haché des données en string python. Mauvais pour une utilisation classique du haché, mais utile pour des
noms de fichier.
Args:
data: Données à hacher
Returns:
Une chaine de caractères.
"""
return str(hashlib.md5(str(data).encode("UTF8")).hexdigest())
|
51db5ec9e7a3b42873a766dc5be834f37bcc9a63
| 28,492
|
def rules_cidrs_and_security_groups(rules):
"""
Return a dict with keys "cidrs" and "sgids"
from a list of security group rules.
:param rules: list of security group rules
:type rules: list
:return: Dict with keys "cidrs" and "sgids"
:rtype: dict
"""
cidrs = set(
ip_range["CidrIp"]
for rule in rules
for ip_range in rule["IpRanges"]
)
sgids = set(
group_pair["GroupId"]
for rule in rules
for group_pair in rule["UserIdGroupPairs"]
)
return {"cidrs": cidrs, "sgids": sgids}
|
7da621f59543ae929856c78c69d101b234c4540c
| 28,493
|
def get_WFC_G800L_WCS():
"""
Defines parameters for the ACS/WFC/G800L slitless mode
@return: slitless mode parameters
@rtype: dictionary
"""
wcs_keys = {}
#grism image "j8qq12kgq":
#/ World Coordinate System and Related Parameters
wcs_keys['grism'] = [
['WCSAXES ',2,'number of World Coordinate System axes'],
['CRPIX1',2.048000000000E+03 ,'x-coordinate of reference pixel'],
['CRPIX2 ',1.024000000000E+03 ,'y-coordinate of reference pixel'],
['CRVAL1',5.317727241178E+01 ,'first axis value at reference pixel'],
['CRVAL2',-2.779882660148E+01 ,'second axis value at reference pixel'],
['CTYPE1','RA---TAN','the coordinate type for the first axis'],
['CTYPE2','DEC--TAN','the coordinate type for the second axis'],
['CD1_1',-8.4686E-06 ,'partial of first axis coordinate w.r.t. x'],
['CD1_2',-1.16183E-05 ,'partial of first axis coordinate w.r.t. y'],
['CD2_1',-1.09568E-05 ,'partial of second axis coordinate w.r.t. x'],
['CD2_2',7.76629E-06 ,'partial of second axis coordinate w.r.t. y'],
['LTV1',0.0000000E+00 ,'offset in X to subsection start'],
['LTV2',0.0000000E+00 ,'offset in Y to subsection start'],
['LTM1_1',1.0 ,'reciprocal of sampling rate in X'],
['LTM2_2',1.0 ,'reciprocal of sampling rate in Y'],
['ORIENTAT',-56.2392 ,'position angle of image y axis (deg. e of n)'],
['RA_APER',5.316373797542E+01 ,'RA of aperture reference position'],
['DEC_APER',-2.779155504028E+01 ,'Declination of aperture reference position'],
['PA_APER',-56.4791 ,'Position Angle of reference aperture center (de'],
['VAFACTOR',1.000027253644E+00 ,'velocity aberration plate scale factor'],
['EXPNAME','j8qq12kgq' ,'exposure identifier'],
]
wcs_keys['drizzle'] = [
['DRZCNUM',15,'Number of coefficients per coordinate'],
['DRZSCALE',0.05,'Scale for drizzling'],
['DRZ2X01', 29.20952271 ,'Drizzle coefficient 01 in X'],
['DRZ2X02', 0.98463856,'Drizzle coefficient 02 in X'],
['DRZ2X03', 0.047121902,'Drizzle coefficient 03 in X'],
['DRZ2X04', 8.2405479e-06,'Drizzle coefficient 04 in X'],
['DRZ2X05', -7.1109122e-06,'Drizzle coefficient 05 in X'],
['DRZ2X06', 1.7714826e-06,'Drizzle coefficient 06 in X'],
['DRZ2X07', -4.6293307e-10,'Drizzle coefficient 07 in X'],
['DRZ2X08', -1.243901e-10,'Drizzle coefficient 08 in X'],
['DRZ2X09', -5.3285875e-10,'Drizzle coefficient 09 in X'],
['DRZ2X10', 5.1490811e-11,'Drizzle coefficient 10 in X'],
['DRZ2X11', 1.6734254e-14,'Drizzle coefficient 11 in X'],
['DRZ2X12', 3.425828e-14,'Drizzle coefficient 12 in X'],
['DRZ2X13', 9.5688062e-14 ,'Drizzle coefficient 13 in X'],
['DRZ2X14', -1.6229259e-14,'Drizzle coefficient 14 in X'],
['DRZ2X15', 1.2711148e-13,'Drizzle coefficient 15 in X'],
['DRZ2Y01', 1047.90670925,'Drizzle coefficient 01 in Y'],
['DRZ2Y02', 0.040785422,'Drizzle coefficient 02 in Y'],
['DRZ2Y03', 0.97161774,'Drizzle coefficient 03 in Y'],
['DRZ2Y04', -2.5332551e-06,'Drizzle coefficient 04 in Y'],
['DRZ2Y05', 5.9183197e-06,'Drizzle coefficient 05 in Y'],
['DRZ2Y06', -9.4306843e-06,'Drizzle coefficient 06 in Y'],
['DRZ2Y07', 7.3674246e-11,'Drizzle coefficient 07 in Y'],
['DRZ2Y08', -4.3916951e-10,'Drizzle coefficient 08 in Y'],
['DRZ2Y09', -5.371583e-11,'Drizzle coefficient 09 in Y'],
['DRZ2Y10', -3.8747876e-10,'Drizzle coefficient 10 in Y'],
['DRZ2Y11', -1.4892746e-14,'Drizzle coefficient 11 in Y'],
['DRZ2Y12', -3.1028203e-14,'Drizzle coefficient 12 in Y'],
['DRZ2Y13', -1.024679e-13,'Drizzle coefficient 13 in Y'],
['DRZ2Y14', 2.9690206e-14,'Drizzle coefficient 14 in Y'],
['DRZ2Y15', -1.4559746e-13,'Drizzle coefficient 15 in Y'],
]
wcs_keys['direct'] = None
wcs_keys['dimension'] = [4096, 2048]
return wcs_keys
|
3413c0318eaf7379eb681173fe2a3b2a7003112e
| 28,494
|
def dimer_true(dataframe, col_num, dimer_list):
"""
Boolean masks to let us know which primers from original df
form dimers. If so they are dropped.
Args:
dataframe (pd.DataFrame): the primer dataframe
col_num (int): the column number to check for primer match
dimer_list (list): the list containing primer dimer info
Returns:
out_series (pd.Series): boolean masked series, True if primer is dimer, else False
"""
out_series = dataframe.iloc[:, col_num].isin([seq[1] or seq[2] for seq in dimer_list])
return out_series
|
138bcbaf01ed93bfb195e89900b37ab682bd6dea
| 28,496
|
def child_dir_name(parent, child):
"""return the child directory name truncated under the parent"""
if parent == '' or child == '': return child
plen = len(parent)
clen = len(child)
if child[0:plen] != parent: return child # not a proper child
# return everything after separator
if clen < plen + 2: return '.' # trivial as child
else: return child[plen+1:] # remove parent portion
|
85b003ee346580dda2647a64b31a93f7ada25d25
| 28,497
|
def seq_from_truth_table(table, order):
"""
Computes the binary sequence string from a given truth table.
Currently, this function assumes that the truth table describes
a de Bruijn sequence.
Parameters
----------
table : dict
A mapping from binary strings of length `n` to 0 or 1.
order : integer
The order of the boolean function.
Returns
-------
seq : string
The binary sequence represented by the truth table.
"""
seq = '0' * order
while len(seq) < 2**order:
seq += str(table[seq[-(order-1):]] ^ int(seq[-order]))
return seq
|
aa56551194637827a7dfb5a50e073e71d3589846
| 28,498
|
from typing import Dict
from typing import Any
from typing import Union
def get_cluster_mode(
cycle_config: Dict[str, Any],
) -> Union[None, str]:
"""
Decode the cluster mode from the cycle configuration. Currently only a
`slurm`, `local`, or None mode are available.
Parameters
----------
cycle_config : Dict[str, Any]
The cluster mode is searched within this cycle configuration.
Returns
-------
mode : str or None
The specified cluster mode. If the mode is None, then no mode was found.
"""
try:
if cycle_config['CLUSTER']['slurm']:
mode = 'slurm'
else:
mode = 'local'
except KeyError:
mode = None
return mode
|
6ac157e812dfbe3627d1f3bf363b774f083801f8
| 28,499
|
def nmove(m: int, n: int) -> float:
"""minimum number of moves
:param m: number of disks
:param n: number of rods
:return: minimum number of moves
"""
n = min(m + 1, n)
if n == 2:
return 1 if m == 1 else float("inf")
elif n == 3:
return 2 ** m - 1
elif n == m + 1:
return 2 * m - 1
return min(nmove(i, n) * 2 + nmove(m - i, n - 1) for i in range(1, m))
|
7a3206eeba5c4036a8636b2344a9a1b904d7a6aa
| 28,500
|
def _select_last_iteration_only(jobs):
"""
Looks through a list of jobs and removes duplicates (only keeps last iteration of a particular job)
"""
jobs_by_name = {}
for job in jobs:
if job.name in jobs_by_name:
jobs_by_name[job.name].append(job)
else:
jobs_by_name[job.name] = [job]
selected_jobs = []
for name, named_jobs in jobs_by_name.items():
named_jobs = sorted(named_jobs, key=lambda j: j.id, reverse=True)
selected_jobs.append(named_jobs[0])
return selected_jobs
|
cd00f51ec3be8349538daf7470552b735eda2902
| 28,501
|
def get_the_attr_from_objList(obj_list, attr_by, attr_by_value, target_attr, attr_exclude=None, attr_exclude_value=None):
"""
从对象列表中根据对象中某属性匹配指定对象,返回该对象中目标属性, 指定排除属性和值
:param obj_list:对象列表,数据结构如[{}, {}, {}]
:param attr_by:用于匹配对象的属性名
:param attr_by_value:用于匹配对象属性名的值
:param target_attr:目标属性名
:param attr_exclude:用于排除对象的属性名
:param attr_exclude_value:用例排除对象属性名的值, 多个值则使用英文逗号分割
:return:
"""
for obj in obj_list:
# 排除属性和值均非空,且匹配。则跳过当前对象
if attr_exclude and attr_exclude_value and obj[attr_exclude] in [x.strip() for x in attr_exclude_value.split(',')]:
continue
# 匹配属性命中,返回对象的目标属性
if obj[attr_by]==attr_by_value:
try:
return obj[target_attr]
except KeyError:
return 'no match target_attr'
return 'no match data'
|
b9890a0dccd9ea192074fc005d1f192fd8631700
| 28,502
|
def extract_hashtags(hashtags_string):
"""
Split the string of hashtags into a list of hashtags
:param hashtags_string: hashtags as a single string
:return: list of hashtags
"""
hashtags = hashtags_string.split()
return hashtags
|
9ead711230f55b8a2ba3ab29a3c3c09ffb86184c
| 28,507
|
def sampleFunction2(x2: int, y2: float) -> float:
"""
Multiply int and float sample.
:param x2: x value
:type x2: int
:param y2: y value
:type y2: float
:return: result
:return type: float
"""
return x2 * y2
|
554707befa81313f7ca084afbb39b5a75f44040e
| 28,509
|
def midi2freq(midi_number):
"""
Given a MIDI pitch number, returns its frequency in Hz.
Source from lazy_midi.
"""
midi_a4 = 69 # MIDI Pitch number
freq_a4 = 440. # Hz
return freq_a4 * 2 ** ((midi_number - midi_a4) * (1. / 12.))
|
995453032f5280e5b8a28a2265ef2c4de8f83b8e
| 28,510
|
def data(package):
""" Return the PackageData for the given package."""
return package.__rdata__
|
20000314ab22a603aad619f139fcda65f5f5a15f
| 28,511
|
import json
def format_str(str_value, is_json):
"""
Returns a formatted string with break lines; if is_json True, pretty format the output
:param str_value: plain text or json value
:param is_json: Boolean
:return: str
"""
str_value = json.dumps(str_value, indent=4, sort_keys=True) if is_json else str_value
return '\n {} \n'.format(str_value)
|
3840a355b44404a0c0022f0297453023a2eb698a
| 28,513
|
def conv_unit(x, unit1, unit2):
"""Utility function for pressure unit conversion"""
units = {"atm": 1,
"kPa": 101.3,
"in Hg": 29.92,
"mm Hg": 760,
"torr": 760,
"psi": 14.69,
"Pa": 1.013e5
}
if unit1 not in units:
raise ValueError("'{}' not defined in unit conversion dictionary. Available units are: {}".format(unit1, ("".join([i+", " for i, j in units.items()])[:-2])))
elif unit2 not in units:
raise ValueError("'{}' not defined in unit conversion dictionary. Available units are: {}".format(unit2, ("".join([i+", " for i, j in units.items()])[:-2])))
else:
return x / units[unit1] * units[unit2]
|
ac7c8230336a44736b1a1a0a45125f59de58bf69
| 28,514
|
def default_empty(default):
"""Check an input against its falsy value or return a default."""
def get_value(test_value):
if test_value:
return test_value
else:
return default
return get_value
|
b3ff34d8fb5d46a62ad11a439dad7d03fb4425f2
| 28,515
|
def _find_error_code(e):
"""Gets the approriate error code for an exception e, see
http://tldp.org/LDP/abs/html/exitcodes.html for exit codes.
"""
if isinstance(e, PermissionError):
code = 126
elif isinstance(e, FileNotFoundError):
code = 127
else:
code = 1
return code
|
a9e14ec1cf01f0eba65b231aa7a40afaf4712535
| 28,516
|
def take_guess(force_upper: bool = True) -> str:
"""Take input from user and handle mistakes."""
guess = input()
# Check for incorrect length
while len(guess) != 5:
print("Your guess is the incorrect length. It must be 5 characters. "
"Try again:")
guess = input()
# Keep input at uppercase
if force_upper:
return guess.upper()
else:
return guess
|
76139aa17c1cbba0b2ad83add7040721156fab45
| 28,519
|
def hgcSiSensorCCE(sensor,version):
"""
this method returns different parameterizations of the charge collection efficiency (CCE)
for different sensor types (sensor) and measurement versions (version)
sensor = 120,200,300
version = TDR_{600V,800V} - TDR based measurements at different voltages
TTU_{600V,800V} - Texas Tech based measurements at different voltages
CERN21_{600V,800V}_{annealing} - CERN2021 based measurements at different voltages and annealing times
if the pair (sensor,version) is unknown a ValueError exception is raised
"""
if version=='TDR_600V':
if sensor==120 : return [3.5e+15,10.31,-0.2635]
elif sensor==200: return [9e+14,8.99,-0.241]
elif sensor==300: return [3e+14,10.16,-0.2823]
elif version=='TDR_800V':
if sensor==120 : return [3.5e+15,10.39,-0.2638]
elif sensor==200: return [1.5e+15,10.41,-0.2779]
elif sensor==300: return [5e+14,12.59,-0.3501]
elif version=='CERN21_600V_10m':
if sensor==120 : return [1.35e+15,9.591,-0.2452]
elif sensor==200: return [9e+14,11.95,-0.3186]
elif sensor==300: return [5.85e+14,9.701,-0.2668]
elif version=='CERN21_600V_30m':
if sensor==120 : return [1.35e+15,8.362,-0.2105]
elif sensor==200: return [9e+14,15.48,-0.4191 ]
elif sensor==300: return [5.85e+14,9.89,-0.2699]
elif version=='CERN21_600V_90m':
if sensor==120 : return [1.35e+15,7.769,-0.1954]
elif sensor==200: return [9e+14,8.983,-0.2354]
elif sensor==300: return [5.85e+14,8.79,-0.2377]
elif version=='CERN21_600V_120m':
if sensor==120 : return [1.35e+15,7.119,-0.1775]
elif sensor==200: return [9e+14,8.647,-0.2257 ]
elif sensor==300: return [5.85e+14,9.369,-0.2544]
elif version=='CERN21_800V_10m':
if sensor==120 : return [1.35e+15,8.148,-0.2031]
elif sensor==200: return [9e+14,7.32,-0.1833]
elif sensor==300: return [5.85e+14,11.45,-0.3131]
elif version=='CERN21_800V_30m':
if sensor==120 : return [1.35e+15,7.097,-0.1731]
elif sensor==200: return [9e+14,13.68,-0.3653]
elif sensor==300: return [5.85e+14, 10,-0.269]
elif version=='CERN21_800V_90m':
if sensor==120 : return [1.35e+15,6.387,-0.155]
elif sensor==200: return [9e+14,7.739,-0.198]
elif sensor==300: return [5.85e+14,7.701,-0.2023]
elif version=='CERN21_800V_120m':
if sensor==120 : return [1.35e+15,5.997,-0.1443]
elif sensor==200: return [9e+14,7.172,-0.1821]
elif sensor==300: return [5.85e+14,7.855,-0.2068]
raise ValueError('sensor={} version={} is unknown to retrieve CCE parameterization for HGC Si sensors'.format(sensor,version))
|
16b1d8c80bee9e12f28de801ebc7f7a38b4166b4
| 28,520
|
def make_behaving(ary, dtype=None):
""" Make sure that `ary` is a "behaving" bohrium array of type `dtype`.
Requirements for a behaving array:
* Is a bohrium array
* Points to the first element in the underlying base array (no offset)
* Has the same total length as its base
Parameters
----------
ary : BhArray
The array to make behaving
dtype : boolean, optional
The return array is converted to `dtype` if not None
Returns
-------
A behaving BhArray that might be a copy of `ary`
Note
----
Use this function to make sure that operands given to `execute()` is "behaving" that is
the kernel can access the arrays without worrying about offset and stride.
"""
if ary.isbehaving():
ret = ary
else:
ret = ary.flatten(always_copy=True)
if dtype is not None:
ret = ret.astype(dtype)
return ret
|
e74c3c276ef2f5eb0ddb3d9b7870af0035cdaab4
| 28,521
|
from pathlib import Path
def file_exists(filename):
""" Returns true if file exists
:param filename: The file name to check
:type filename: str
:returns: True if file exists
:rtype: bool
"""
file_handle = Path(filename)
if file_handle.is_file():
return True
else:
return False
|
5800f9b15aa869993e8bc0decea0ea1bf4a449e6
| 28,523
|
import os
import errno
def is_running_posix(pid):
"""Return True if process of pid is running.
Args:
pid(int): pid of process which this function checks
whether it is running or not.
Returns:
bool: True if process of pid is running.
Raises:
OSError if something happens in os.kill(pid, 0)
"""
try:
os.kill(pid, 0)
except OSError as e:
if e.errno == errno.ESRCH or e.errno == errno.EPERM:
return False
raise e
return True
|
14952ab7ffafcac9a187a498eddb6b8317cb87aa
| 28,525
|
def trim_streams(streams, start = None, end = None):
"""
Trims streams to the same overall time span.
:return: list of trimmed streams
"""
max_start_time = start
min_end_time = end
for stream in streams:
current_start = min([x.stats.starttime for x in stream])
current_end = max([x.stats.endtime for x in stream])
if not max_start_time:
max_start_time = current_start
if not min_end_time:
min_end_time = current_end
if current_start > max_start_time:
max_start_time = current_start
if current_end < min_end_time:
min_end_time = current_end
cut_streams = []
for st in streams:
cut_streams.append(st.slice(max_start_time, min_end_time))
return cut_streams
|
8d0090319b969b96a30656bf54530bef6ab54dcb
| 28,526
|
def center_y(display_height: int) -> int:
"""
Find the vertical center position of given screen/space/display
Parameters:
display_width (int) : The character height of the screen/space/display
Returns:
(int): Vertical character number
"""
return display_height // 2
|
b59a325ef4fb84d306f22c7c5717b1d9a155ed45
| 28,527
|
def parse_syntax(line):
"""
>>> parse_syntax('syntax: glob')
'glob'
>>> parse_syntax('syntax: regexp')
'regexp'
>>> parse_syntax('syntax: none')
Traceback (most recent call last):
...
Exception: Unknown syntax "none"
"""
line = line.replace(':', ' ')
_, syntax = line.split()
if syntax in ['glob', 'regexp']:
return syntax
else:
raise Exception('Unknown syntax "%s"' % syntax)
|
8f619b216ede8cc9bdd8198f6433add997c87768
| 28,528
|
def get_hosts_content():
""" gets content of hosts file """
with open ("/etc/hosts","r") as myfile:
hosts_text = myfile.readlines()
return hosts_text
|
9807f5b4cbf23c83c8a3d82768d0086617b8c510
| 28,530
|
def boundCreator(mus, sigmas, c=3):
"""
Creates interval around the mean
Parameters
----------
mus : array of shape [Num flights, time-steps, num of features]
the mean temporal values to be used.
sigmas : array of shape [Num flights, time-steps, num of features]
the temporal standard deviation values to be used.
c : TYPE, optional
DESCRIPTION. The default is 3.
Returns
-------
upper : array
upper bound.
lower : array
lower bound.
"""
upper = mus + c*sigmas
lower = mus - c*sigmas
return upper,lower
|
5ec6af891a0c39fd6f51e961de13a0db946a20cc
| 28,531
|
def coerce_to_bool(x: str) -> bool:
"""Convert string to bool value."""
if x.lower() in ('true', 'yes', 'on', '1', 'false', 'no', 'off', '0'):
return x.lower() in ('true', 'yes', 'on', '1')
raise ValueError()
|
d8da41c00ad94e75fd20f3c4a4aaaf8fb04cbb08
| 28,532
|
from typing import Any
from typing import Optional
from typing import List
import os
def _add_config_from_env(app: Any, config_key: str, env_variable: str,
missing_list: Optional[List[str]] = None,
default_value: Any = None)-> bool:
"""
Function for adding configuration variables to a Flask app from environment
variables.
:param app: Flask app object
:param config_key: the name of the config key in the app: app.config[config_key]
:param env_variable: the name of the environment variable in which the value is stored
:param missing_list: a list of strings to which missing environment variables
are added. Can be omitted.
:param default_value: if value is missing, set config value to this.
:return: True if successful, False if environment variable was undefined
"""
val = os.environ.get(env_variable, None)
if val is not None:
app.config[config_key] = val
return True
elif default_value:
app.config[config_key] = default_value
return True
if missing_list is not None:
missing_list.append(env_variable)
return False
|
e2e41ab4890f7a9fc5d9ed6ba0d15dc448d18196
| 28,533
|
def mock_health_response(status) -> dict:
"""
Mocks the cluster health response
:return:
"""
response = {
"cluster_name": "elasticsearch",
"status": status,
"timed_out": False,
"number_of_nodes": 1,
"number_of_data_nodes": 1,
"active_primary_shards": 9,
"active_shards": 9,
"relocating_shards": 0,
"initializing_shards": 0,
"unassigned_shards": 5,
"delayed_unassigned_shards": 0,
"number_of_pending_tasks": 0,
"number_of_in_flight_fetch": 0,
"task_max_waiting_in_queue_millis": 0,
"active_shards_percent_as_number": 64.2857142857
}
return response
|
fbe90ac9b3eedb724ee3136a390a542d98e1f228
| 28,534
|
import sys
import subprocess
def subprocess_check_output(cmd, *args, **kwargs):
"""
Wrapper around subprocess.check_output which is not available under Python 2.6.
"""
if sys.version_info < (2, 7, 0):
output = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, *args, **kwargs
).communicate()[0]
else:
output = subprocess.check_output(cmd, *args, **kwargs)
return output
|
dab7cad1439d7c6ff157d023acde1b982259ff57
| 28,535
|
import numpy
def get_negatives_positives(score_lines):
"""Take the output of load_score and return negatives and positives. This
function aims to replace split_four_column and split_five_column but takes a
different input. It's up to you to use which one.
"""
pos_mask = score_lines["claimed_id"] == score_lines["real_id"]
positives = score_lines["score"][pos_mask]
negatives = score_lines["score"][numpy.logical_not(pos_mask)]
return (negatives, positives)
|
85b042099214a89db58d8dc4ffcd0b05846153fe
| 28,536
|
def get_ua_platform(user_agent):
"""Get platform (mobile, tablet, pc)."""
if user_agent.is_mobile:
return "mobile"
elif user_agent.is_tablet:
return "tablet"
elif user_agent.is_pc:
return "pc"
else:
return "unknown"
|
be735fa118fd09a25f3120dda8bdb1c45943b859
| 28,538
|
import numpy
def psfSharpness(psf):
"""
Calculates how 'sharp' the PSF is as defined here by how large
the mean frequency component is. The idea is that a better average
PSF will be less blurred out, so it will have more power in
the larger frequencies.
"""
psd = numpy.abs(numpy.fft.fftn(psf))**2
k1 = numpy.abs(numpy.fft.fftfreq(psf.shape[0]))
k2 = numpy.abs(numpy.fft.fftfreq(psf.shape[1]))
k3 = numpy.abs(numpy.fft.fftfreq(psf.shape[2]))
# Ignore the highest frequencies as these are mostly pixel noise.
k1[(k1 > 0.4)] = 0
k2[(k2 > 0.4)] = 0
k2[(k3 > 0.4)] = 0
[m_k1, m_k2, m_k3] = numpy.meshgrid(k1, k2, k3, indexing = 'ij')
return numpy.mean(psd * m_k1 * m_k2 * m_k3)
|
8b195719b7f28395a107f59ae2cbdfc70a3443b2
| 28,540
|
def average(a, b):
""" Decorator ensures that this function can be used in a Tangled graph
"""
return (a + b) / 2
|
08dc564389af2a9eb18c37c2cfd5ca09f43ab130
| 28,541
|
import socket
def dns_exchange(dnsIp, dnsQuery, dnsAnswer):
""" Exchange DNS query and wait answer """
result = None
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
sock.settimeout(1)
query = dnsQuery.serialize()
sock.sendto(query, (dnsIp, 53))
answer = sock.recv(1024)
try:
dnsAnswer.unserialize(answer)
if dnsQuery.header.ident == dnsAnswer.query.header.ident:
result = dnsAnswer.name
except:
pass
finally:
sock.close()
return result
|
532b3c1dbc5fe0f2f75ce53ccc5b123fb22d4fdf
| 28,542
|
def format_field(val: int, show: bool) -> str:
""" Format a field """
return f'{val:>8}' if show else ""
|
b4a0af9eecf1d48c09dacafa9e239e0b6a7f3370
| 28,543
|
def create_deterministic_delayer(delay):
"""
Create a deterministic delayer that always returns the same delay value
Args:
delay (float): Delay value to return
Returns:
function: delayer
"""
assert delay >= 0, "Inputted delay must be non-negative!"
return lambda: delay
|
8d25603dc273b631eef2e270753dbb6ae4c7d959
| 28,545
|
def num_of_words_in_string(strng):
"""
:param strng: The string of which the words should be counted.
:returns: The number of words in the string.
:type strng: str
:rtype: int
"""
return len(strng.split(' '))
|
a35335fbf456887ff88ff1ee229eeaf1b9f9f14d
| 28,546
|
def check_bounds(position, size):
"""
Checks whether a coordinate is within the indices of a grid.
Parameters
----------
position: list
the coordinate to check for within the grid
size: int
the size of the grid to compare the coordinate values to
Return
----------
boolean:
True if the coordinate is within the grid, false if otherwise
"""
for elem in position:
if elem < 0 or elem >= size:
return False
return True
|
f93721e2b852df8e2e52d0e8fb7157f60bda7536
| 28,548
|
def field_validator(value, validators: set) -> bool:
"""
Execute a set of validator functions (the _is_x) against a value.
Return True if any of the validators are True.
"""
return any([True for validator in validators if validator(value)])
|
d8331dac68e85b1725280af9df6b71f8f0c46465
| 28,549
|
import re
def get_sort_version(config, version):
"""
Returns a form of the version obtained from the execution of get_migration_filename_info() call that can be properly sorted.
"""
noAlpha = re.compile('[^0-9.]+')
return tuple(int(x) for x in noAlpha.sub('', version).split('.'))
|
19aa947f2389403792838c2586f72692cddca952
| 28,551
|
from typing import Union
import sys
from pathlib import Path
import yaml
def parse_and_validate_conda(conda: Union[str, dict]) -> Union[str, dict]:
"""Parses and validates a user-provided 'conda' option.
Conda can be one of three cases:
1) A dictionary describing the env. This is passed through directly.
2) A string referring to the name of a preinstalled conda env.
3) A string pointing to a local conda YAML file. This is detected
by looking for a '.yaml' or '.yml' suffix. In this case, the file
will be read as YAML and passed through as a dictionary.
"""
assert conda is not None
if sys.platform == "win32":
logger.warning(
"runtime environment support is experimental on Windows. "
"If you run into issues please file a report at "
"https://github.com/ray-project/ray/issues."
)
result = None
if isinstance(conda, str):
yaml_file = Path(conda)
if yaml_file.suffix in (".yaml", ".yml"):
if not yaml_file.is_file():
raise ValueError(f"Can't find conda YAML file {yaml_file}.")
try:
result = yaml.safe_load(yaml_file.read_text())
except Exception as e:
raise ValueError(f"Failed to read conda file {yaml_file}: {e}.")
else:
# Assume it's a pre-existing conda environment name.
result = conda
elif isinstance(conda, dict):
result = conda
else:
raise TypeError(
"runtime_env['conda'] must be of type str or " f"dict, got {type(conda)}."
)
return result
|
5c3377bbc704c829ed6a3da9c5ffe145dc03ac31
| 28,552
|
def get_fieldnames(content):
"""
Return the longest Dict Item for csv header writing
"""
item_length = 0
csv_header = []
for item in content:
if len(item) >= item_length:
longest_item = item
item_length = len(item)
for key in longest_item.keys():
if key not in csv_header:
csv_header.append(key)
return csv_header
|
64ece1c8c9fded8fc1d79b98b403ccb5c4caec30
| 28,553
|
def line_filter(regexp, lines):
"""
Filter each line of input by a regular expression.
If the regular expression is not matched, then the line is not output.
"""
output = []
for line in lines:
match = regexp.match(line)
if match:
output.append(line)
return output
|
8b3ed6abde04a0278fa98d618117bb5ee686baf8
| 28,556
|
def to_native(s):
"""转成 str
:type s: Union[str, bytes]
:rtype str
"""
if isinstance(s, bytes):
return s.decode('utf-8')
return s
|
0db82b972ea7260198e5edd4e147cb2206ec8705
| 28,557
|
def get_nonblocking(queue):
""" Get without blocking from multiprocessing queue"""
try:
resp = queue.get(block=False)
except Exception as e:
resp = None
return resp
|
8cb190f8652a8ea54b6f469cac4204b1706b4d6e
| 28,558
|
def move(password, position_x, position_y):
"""Move letter from position_x to position_y.
Letter should be removed from password and inserted at position_y.
"""
to_move = password.pop(position_x)
password.insert(position_y, to_move)
return password
|
a63124adeb25aeec1e31469d808725f5654b3928
| 28,561
|
def splinter_driver_kwargs():
"""Webdriver kwargs."""
return {}
|
a4844c63a2723b7b2985723a03a4f1ecab572b1e
| 28,562
|
def predict_raw(model, periods):
"""Returns the raw output of a Prophet model.
Paramaters
----------
model : dict
A trained Prophet model created with init_fit.
periods : int
The number of periods to forecast.
cap : float
An upper limit for the case of logistic growth.
Returns
-------
prophet_output : DataFrame
The output of m.predict() method of the Prophet class.
"""
future = model['m'].make_future_dataframe(periods=periods)
future['cap'] = model['cap']
prophet_output = model['m'].predict(future)
return prophet_output
|
709fbf4757b968d7b6cc882ccea1c6be222d418c
| 28,564
|
def compss_wait_on(obj):
"""
Dummy compss_wait_on
:param obj: The object to wait on.
:return: The same object defined as parameter
"""
return obj
|
cb433878c460a507d98c1c3a8880626f804b201f
| 28,565
|
from typing import List
def count_trees(tree_map: List[str], dx: int, dy: int) -> int:
"""Count trees (`#`) in the tree map.
Args:
tree_map (List[str]): a list of `.` and `#` (trees)
Returns:
int: number of trees
"""
count = 0
for i, row in enumerate(tree_map[::dy]):
if i == 0:
continue
index = (i * dx) % len(row)
if row[index] == '#':
count += 1
return count
|
d6a642761f9ee5043fd771a91d9eb916b752ac55
| 28,566
|
def get_dict_dot(d: dict, key: str, default=None):
""" Gets an entry from a dictionary using dot notation key, eg: this.that.something """
try:
if isinstance(d, dict) and key:
split = key.split(".")
value = d.get(split[0], None)
if value is not None:
if len(split) == 1:
return value
return get_dict_dot(value, key[len(split[0]) + 1 :], default)
except KeyError:
pass
return default
|
2e179f81ee7d05554150fad2fd8363d44c4b3a88
| 28,568
|
import pandas
def signals_to_positions(signals,
init_pos=0,
mask=('Buy', 'Sell', 'Short', 'Cover')):
"""
Translate signal dataframe into positions series (trade prices aren't
specified.
WARNING: In production, override default zero value in init_pos with
extreme caution.
"""
long_en, long_ex, short_en, short_ex = mask
pos = init_pos
ps = pandas.Series(0., index=signals.index)
for t, sig in signals.iterrows():
# check exit signals
if pos != 0: # if in position
if pos > 0 and sig[long_ex]: # if exit long signal
pos -= sig[long_ex]
elif pos < 0 and sig[short_ex]: # if exit short signal
pos += sig[short_ex]
# check entry (possibly right after exit)
if pos == 0:
if sig[long_en]:
pos += sig[long_en]
elif sig[short_en]:
pos -= sig[short_en]
ps[t] = pos
return ps[ps != ps.shift()]
|
a6fa124b31f15b88ce8f0a77aa419654df54ba45
| 28,573
|
def get_list_components(namefile):
""" Get list of components in medoc list : COMPO.txt"""
list_comp =[]
with open(namefile, 'r') as f:
for line in f:
new_line = line.replace('\n', '')
new_line = new_line.split(' ')
compo = new_line[2]
if not compo in list_comp:
list_comp.append(list_comp)
return len(list_comp)
|
7a6b8f8f09c040be80ae5f8696484ab5095af046
| 28,574
|
def edge_list_get_tail_index(edge_list, tail_index):
"""
Takes a list of edges and returns an edge if the tail_index matches the
given index, or None otherwise.
"""
for edge in edge_list :
if edge.tail_index == tail_index :
return edge
return None
|
1d7f55afec3fb9da269d1c45dd111ce05cb10bd5
| 28,575
|
import math
def _fraction2str(num, decimals=2):
"""
Helper method to pretty print percentages.
Args:
num: A number in the range [0,1].
"""
if isinstance(num, str):
return num
mult = math.pow(10, decimals)
if num < 0.5/mult:
return "0"
ret = str(int(num*mult+.5)/float(mult))
if len(ret) < decimals+2:
ret += "0"
if ret[0] == "0":
return ret[1:]
return ret
|
5b2483fb5c2fe3ec4c543c4d6e1186491bb348dc
| 28,576
|
def all_lines_at_idx(mm, idx_list):
""" return a list of lines given a list of memory locations
follow up on all_lines_with_tag
e.g. all_lines_at_idx(mm, all_lines_with_tag(mm, 'Atom') )
reads '''
Atom 0 0 0 0
Atom 1 1 1 1
Atom 2 2 2 2
'''
Args:
mm (mmap.mmap): memory map to file
idx_list (list): a list of memory locations (int)
Return:
list: a list of strings, each being the line at idx
"""
lines = []
for idx in idx_list:
mm.seek(idx)
# row back to beginning of line
ibegin = mm.rfind('\n')
if ibegin == -1:
ibegin = 0
mm.seek(ibegin)
mm.readline()
# read desired line
line = mm.readline()
lines.append(line)
return lines
|
2ca06a01773bdf1ab0f94cf5a60405c5f85ca772
| 28,577
|
def get_github_stars(github_page_data):
"""Retrieve number of github stars from github data"""
num_stars = "No github found"
if github_page_data["github_page"]:
# If data on github comes form github API, parse json
if github_page_data["github_data_source"] == "API":
num_stars = github_page_data["github_data"]["stargazers_count"]
# If data on github comes from web scraping, parse beautiful soup object
elif github_page_data["github_data_source"] == "webscrape":
try:
num_stars_element = github_page_data["github_data"].find(
"a", {"class": "social-count js-social-count"}
)
num_stars = num_stars_element.contents[0].strip()
except AttributeError:
num_stars = "Error"
return num_stars
|
d6e2109003cac72a1d8953febd4b28409ac2e5b9
| 28,578
|
def _get_attrs_items(obj):
"""Returns a list of (name, value) pairs from an attrs instance.
The list will be sorted by name.
Args:
obj: an object.
Returns:
A list of (attr_name, attr_value) pairs.
"""
return [(attr.name, getattr(obj, attr.name))
for attr in obj.__class__.__attrs_attrs__]
|
6e1ac65ea2b9a136581dfdddf8e789fc8fa9da90
| 28,580
|
def analyze(data_frame, col):
"""
按照指定列分组统计
:param data_frame: DataFrame
:param col: 列名
:return: DataFrame
"""
df = data_frame.groupby([col])[col].count() \
.reset_index(name='num').sort_values(by='num', ascending=False)
total = df['num'].sum()
df['percent'] = round(df['num'] / total, 2)
return df
|
75db70376bb8deb563b7d2520fe864f0b798fe09
| 28,581
|
def addElementInTuple(arr, elt):
"""
:param list arr: The list to manipulate
arr format -> [(x,x,{}), (x,x,{}), (x,x,{})]
:param dict elt: The element to insert
"""
arrs = []
for tpl in arr:
tpl[2].update(elt)
arrs.append(tpl)
return arrs
|
6b9f27dac4f7c4ca46b666315ccacf2341106b2f
| 28,583
|
def safe_col_name(args_pair):
"""Ensure that the column name is safe for SQL (unique value, no spaces, no trailing punctuation).
Typically called with `df.columns = [*map(safe_col_name, enumerate(df.columns.to_list()))]`
Args:
args_pair: tuple of arguments from map function in `(idx, col)`
Returns:
string: safely formatted string for SQLite
"""
idx, col = args_pair
col = col.strip().replace(' ', '_').replace('.', '_').replace(',', '_')
return str(idx) if col == '' else col
|
51256b15ee2fcc55cbc77dbdc2aa03408a6f1e26
| 28,585
|
def get_input_method():
"""Collects the input method the user desires.
This function prompts the user as to whether they want to use a GUI or
the CLI. The function will loop until valid input is achieved. It uses the
'loop and a half' method to allow users to correct invalid input.
"""
types = ['CLI', 'GUI']
while True:
selection = input('Would you prefer a CLI or GUI?\n>> ').upper()
if selection in types:
print('{0} selected.\n'.format(selection))
return selection
print('You didn\'t correctly select :( Please type either CLI or GUI '
'when prompted.\n')
|
a93989d6141b76e174ecb0231b4eac9297077637
| 28,586
|
import json
import hashlib
def hash(block) -> str:
"""
Hashes a block
:param block: <dict> block (see example-block.py)
:return: <bytes> hash of block
- Returns block hash as bytes
"""
block_encoded = bytes(json.dumps(block, sort_keys=True).encode()) # Sorted to ensure consistent hashes
return hashlib.sha256(block_encoded).hexdigest()
|
c580e23d816ac8769b586e40188c5e394b7cd50c
| 28,587
|
def get_datas_from_line(line):
"""Doc to do."""
line = line.split(',')
values = [float(v)*100 for v in line[1:]]
# values = ["%.2f" % round(v, 2) for v in values]
return values
|
7e0d7cac3bae1f63ce412aeb5b6a60975457af1c
| 28,590
|
def format_sec_fixed(sec):
"""Format seconds in a fixed format."""
return '%d:%02d:%02d' % (int(sec / 3600), int(sec % 3600 / 60), int(round(sec % 60)))
|
d6fb346e3a83b451e65277a6301f49b0f91e5efb
| 28,591
|
def temp_column_name(*dataframes):
"""Gets a temp column name that isn't included in columns of any dataframes
Parameters
----------
dataframes : list of Pandas.DataFrame
The DataFrames to create a temporary column name for
Returns
-------
str
String column name that looks like '_temp_x' for some integer x
"""
i = 0
while True:
temp_column = "_temp_{}".format(i)
unique = True
for dataframe in dataframes:
if temp_column in dataframe.columns:
i += 1
unique = False
if unique:
return temp_column
|
307d6d1778ac550c1a0ba468e2fd81206d9c0cb9
| 28,597
|
def get_func_and_script_url_from_initiator(initiator):
"""Remove line number and column number from the initiator."""
if initiator:
return initiator.rsplit(":", 2)[0].split(" line")[0]
else:
return ""
|
9029050bdc567e02dceca9a3967f289fc21324ab
| 28,600
|
def format_array(arr, precision=4):
""" Create a string representation of a numpy array with less precision
than the default.
Parameters
----------
arr : array
Array to be converted to a string
precision : int
Number of significant digit to display each value.
Returns
-------
str
Nice string representation of the array.
"""
if arr is None:
return ""
formatting_str = "{0:." + str(precision) + "g}"
str_values = [formatting_str.format(float(val)) for val in arr]
content = ", ".join(str_values)
return "[" + content + "]"
|
fb97da91c88a769aa95454666f734a6bc68ef4f5
| 28,601
|
def get_default_args():
"""return a dictionary with key as argument name and value as additional arguments"""
return {
# GPT_ARGS
"--num-layers": "2",
"--hidden-size": "128",
"--num-attention-heads": "4",
"--seq-length": "256",
"--max-position-embeddings": "256",
"--micro-batch-size": "4",
"--global-batch-size": "8",
"--lr-decay-iters": "320000",
"--lr-decay-style": "cosine",
"--lr": "0.00015",
"--min-lr": "1.0e-5",
"--train-iters": "5000",
"--tokenizer-type": "PretrainedFromHF",
"--tokenizer-name-or-path": "gpt2",
"--data-impl": "mmap",
"--split": "949,50,1",
"--distributed-backend": "nccl",
"--weight-decay": "1e-2",
"--clip-grad": "1.0",
"--lr-warmup-fraction": ".01",
"--fp16": "",
"--attention-dropout": "0",
"--hidden-dropout": "0",
# OUTPUT_ARGS
"--log-interval": "10",
"--save-interval": "500",
"--eval-interval": "100",
"--eval-iters": "10",
"--checkpoint-activations": "",
# DATA_ARGS
}
|
1a78905824acfbe3ff9c815d752d850dcaf0edb4
| 28,603
|
def add_args(parser):
"""
parser : argparse.ArgumentParser
return a parser added with args required by fit
"""
parser.add_argument('--dataset', type=str, default='lending_club_loan', metavar='N',
help='dataset used for training')
parser.add_argument('--client_number', type=int, default=2, metavar='NN',
help='number of workers in a distributed cluster')
parser.add_argument('--comm_round', type=int, default=100,
help='how many round of communications we shoud use')
parser.add_argument('--batch_size', type=int, default=256, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.001)')
parser.add_argument('--frequency_of_the_test', type=int, default=30,
help='the frequency of the algorithms')
args = parser.parse_args()
return args
|
80dc659f99f652afc92eed5816ae5ec263f330f2
| 28,605
|
def _create_merged_rule(rule_1, rule_2):
""" Finds a rule which is the least general rule that is more general than
both argument rules.
"""
neurons = set((l, n) for (l, n, t, b) in rule_1).intersection((l, n) for (l, n, t, b) in rule_1)
new_rule = []
for (l_i, n_i) in neurons:
bigger_t_1 = [t for (l, n, t, b) in rule_1 if (l, n) == (l_i, n_i) and b]
smaller_t_1 = [t for (l, n, t, b) in rule_1 if (l, n) == (l_i, n_i) and not b]
bigger_t_2 = [t for (l, n, t, b) in rule_2 if (l, n) == (l_i, n_i) and b]
smaller_t_2 = [t for (l, n, t, b) in rule_2 if (l, n) == (l_i, n_i) and not b]
if bigger_t_1 and bigger_t_2:
min_t = min(bigger_t_1 + bigger_t_2)
new_rule.append((l_i, n_i, min_t, True))
if smaller_t_1 and smaller_t_2:
max_t = max(smaller_t_1 + smaller_t_2)
new_rule.append((l_i, n_i, max_t, False))
return new_rule
|
d9eac0f9c208a68e6b9c4d888a5fdc51d707d54a
| 28,606
|
def in1d_sorted(ar1, ar2):
"""
Does the same than np.in1d but uses the fact that ar1 and ar2 are sorted. Is therefore much faster.
"""
if ar1.shape[0] == 0 or ar2.shape[0] == 0: # check for empty arrays to avoid crash
return []
inds = ar2.searchsorted(ar1)
inds[inds == len(ar2)] = 0
return ar2[inds] == ar1
|
768bba5ae0050049bd150ce826ec8d149416607d
| 28,607
|
def black_percentage(rgb):
"""
rgb: rgb tuple of a pixel
returns: pixel percentage of black
"""
if isinstance(rgb, int):
return 100 - rgb
return 100 - (int((((rgb[0] + rgb[1] + rgb[2])/3)*100)/255))
|
916f2c61e0af68509ba6fa45fe187c5144ba5337
| 28,608
|
def _align(value, alignment):
"""Find the smallest multiple of `alignment` that is at least as large as `value`."""
return ((value - 1) // alignment + 1) * alignment
|
9bbe11caf6221b73778caf9f2583ed3829d22942
| 28,609
|
import csv
def csv_sniff(data, enc):
"""Given a list, sniff the dialect of the data and return it.
Args:
data - list like ["col1,col2,col3"]
enc - python encoding value ('utf_8','latin-1','cp870', etc)
Returns:
csv.dialect.delimiter
"""
data = data.decode(enc)
dialect = csv.Sniffer().sniff(data)
return dialect.delimiter
|
93765d83b490f3bc2fb3730c7b7651d602646912
| 28,610
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.