content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def create_dataverse_url(base_url, identifier):
"""Creates URL of Dataverse.
Example: https://data.aussda.at/dataverse/autnes
Parameters
----------
base_url : str
Base URL of Dataverse instance
identifier : str
Can either be a dataverse id (long), a dataverse alias (more
robust), or the special value ``:root``.
Returns
-------
str
URL of the dataverse
"""
assert isinstance(base_url, str)
assert isinstance(identifier, str)
base_url = base_url.rstrip("/")
url = "{0}/dataverse/{1}".format(base_url, identifier)
assert isinstance(url, str)
return url
|
8dcaacf58c7ca8b601ed2543f8d8de20bbcbc8a2
| 27,906
|
import re
def createMetaFile(bookList,fileCount):
"""
Method creates meta file. Meta file stores information on which book is currently playing,
and the current file number and progress for each book.
"""
currentBook = 0
try: # code for if meta file already exists
f = open('meta.txt','r')
metaContents = f.readlines()
f.close()
f=open('meta.txt','w')
i = 0
for line in metaContents:
newLine = re.findall('CURRENTBOOK' + '.+',line)
if newLine:
f.write(newLine[0][11:]+'\n')
currentBook = i
else:
f.write(line)
i = i + 1
f.close()
except: # code for creating meta.txt if doesn't exist
f = open('meta.txt', 'w')
chapter= 1
fractional = 0
for name in bookList:
f.write(name + ',')
f.write(str(chapter)+ ',')
f.write(str(fractional) + '\n')
f.close()
return (currentBook)
|
62bd24a311713164e4f5cec439948faa408f0508
| 27,907
|
def refs_should_be_omitted(ref: str):
"""
Determine if a ref should be completely omitted from json output, we do not want
to show origin
@param ref: string containing the ref
@return: True if this ref should be omitted from the list
"""
return ref.startswith("origin/")
|
79e923b1be03f0b552c989250bd1cadfc528e25b
| 27,909
|
def isinsetf(s):
"""
Returns a function which tests whether an element is in a set `s`.
Examples
--------
>>> colors = ['red', 'green', 'blue']
>>> f = isinsetf(colors)
>>> map(f, ['yellow', 'green'])
[False, True]
"""
s = set(s)
return lambda e: e in s
|
492f5381a66ef42670e5dd229c41a5481290114a
| 27,910
|
def ccalc_turbo(x, rgbmax):
""" viridis 'turbo' colour map"""
r = [0.18995, 0.19483, 0.19956, 0.20415, 0.2086, 0.21291, 0.21708, 0.22111, 0.225, 0.22875, 0.23236, 0.23582, 0.23915, 0.24234, 0.24539, 0.2483, 0.25107, 0.25369, 0.25618, 0.25853, 0.26074, 0.2628, 0.26473, 0.26652, 0.26816, 0.26967, 0.27103, 0.27226, 0.27334, 0.27429, 0.27509, 0.27576, 0.27628, 0.27667, 0.27691, 0.27701, 0.27698, 0.2768, 0.27648, 0.27603, 0.27543, 0.27469, 0.27381, 0.27273, 0.27106, 0.26878, 0.26592, 0.26252, 0.25862, 0.25425, 0.24946, 0.24427, 0.23874, 0.23288, 0.22676, 0.22039, 0.21382, 0.20708, 0.20021, 0.19326, 0.18625, 0.17923, 0.17223, 0.16529, 0.15844, 0.15173, 0.14519, 0.13886, 0.13278, 0.12698, 0.12151, 0.11639, 0.11167, 0.10738, 0.10357, 0.10026, 0.0975, 0.09532, 0.09377, 0.09287, 0.09267, 0.0932, 0.09451, 0.09662, 0.09958, 0.10342, 0.10815, 0.11374, 0.12014, 0.12733, 0.13526, 0.14391, 0.15323, 0.16319, 0.17377, 0.18491, 0.19659, 0.20877, 0.22142, 0.23449, 0.24797, 0.2618, 0.27597, 0.29042, 0.30513, 0.32006, 0.33517, 0.35043, 0.36581, 0.38127, 0.39678, 0.41229, 0.42778, 0.44321, 0.45854, 0.47375, 0.48879, 0.50362, 0.51822, 0.53255, 0.54658, 0.56026, 0.57357, 0.58646, 0.59891, 0.61088, 0.62233, 0.63323, 0.64362, 0.65394, 0.66428, 0.67462, 0.68494, 0.69525, 0.70553, 0.71577, 0.72596, 0.7361, 0.74617, 0.75617, 0.76608, 0.77591, 0.78563, 0.79524, 0.80473, 0.8141, 0.82333, 0.83241, 0.84133, 0.8501, 0.85868, 0.86709, 0.8753, 0.88331, 0.89112, 0.8987, 0.90605, 0.91317, 0.92004, 0.92666, 0.93301, 0.93909, 0.94489, 0.95039, 0.9556, 0.96049, 0.96507, 0.96931, 0.97323, 0.97679, 0.98, 0.98289, 0.98549, 0.98781, 0.98986, 0.99163, 0.99314, 0.99438, 0.99535, 0.99607, 0.99654, 0.99675, 0.99672, 0.99644, 0.99593, 0.99517, 0.99419, 0.99297, 0.99153, 0.98987, 0.98799, 0.9859, 0.9836, 0.98108, 0.97837, 0.97545, 0.97234, 0.96904, 0.96555, 0.96187, 0.95801, 0.95398, 0.94977, 0.94538, 0.94084, 0.93612, 0.93125, 0.92623, 0.92105, 0.91572, 0.91024, 0.90463, 0.89888, 0.89298, 0.88691, 0.88066, 0.87422, 0.8676, 0.86079, 0.8538, 0.84662, 0.83926, 0.83172, 0.82399, 0.81608, 0.80799, 0.79971, 0.79125, 0.7826, 0.77377, 0.76476, 0.75556, 0.74617, 0.73661, 0.72686, 0.71692, 0.7068, 0.6965, 0.68602, 0.67535, 0.66449, 0.65345, 0.64223, 0.63082, 0.61923, 0.60746, 0.5955, 0.58336, 0.57103, 0.55852, 0.54583, 0.53295, 0.51989, 0.50664, 0.49321, 0.4796]
g = [0.07176, 0.08339, 0.09498, 0.10652, 0.11802, 0.12947, 0.14087, 0.15223, 0.16354, 0.17481, 0.18603, 0.1972, 0.20833, 0.21941, 0.23044, 0.24143, 0.25237, 0.26327, 0.27412, 0.28492, 0.29568, 0.30639, 0.31706, 0.32768, 0.33825, 0.34878, 0.35926, 0.3697, 0.38008, 0.39043, 0.40072, 0.41097, 0.42118, 0.43134, 0.44145, 0.45152, 0.46153, 0.47151, 0.48144, 0.49132, 0.50115, 0.51094, 0.52069, 0.5304, 0.54015, 0.54995, 0.55979, 0.56967, 0.57958, 0.5895, 0.59943, 0.60937, 0.61931, 0.62923, 0.63913, 0.64901, 0.65886, 0.66866, 0.67842, 0.68812, 0.69775, 0.70732, 0.7168, 0.7262, 0.73551, 0.74472, 0.75381, 0.76279, 0.77165, 0.78037, 0.78896, 0.7974, 0.80569, 0.81381, 0.82177, 0.82955, 0.83714, 0.84455, 0.85175, 0.85875, 0.86554, 0.87211, 0.87844, 0.88454, 0.8904, 0.896, 0.90142, 0.90673, 0.91193, 0.91701, 0.92197, 0.9268, 0.93151, 0.93609, 0.94053, 0.94484, 0.94901, 0.95304, 0.95692, 0.96065, 0.96423, 0.96765, 0.97092, 0.97403, 0.97697, 0.97974, 0.98234, 0.98477, 0.98702, 0.98909, 0.99098, 0.99268, 0.99419, 0.99551, 0.99663, 0.99755, 0.99828, 0.99879, 0.9991, 0.99919, 0.99907, 0.99873, 0.99817, 0.99739, 0.99638, 0.99514, 0.99366, 0.99195, 0.98999, 0.98775, 0.98524, 0.98246, 0.97941, 0.9761, 0.97255, 0.96875, 0.9647, 0.96043, 0.95593, 0.95121, 0.94627, 0.94113, 0.93579, 0.93025, 0.92452, 0.91861, 0.91253, 0.90627, 0.89986, 0.89328, 0.88655, 0.87968, 0.87267, 0.86553, 0.85826, 0.85087, 0.84337, 0.83576, 0.82806, 0.82025, 0.81236, 0.80439, 0.79634, 0.78823, 0.78005, 0.77181, 0.76352, 0.75519, 0.74682, 0.73842, 0.73, 0.7214, 0.7125, 0.7033, 0.69382, 0.68408, 0.67408, 0.66386, 0.65341, 0.64277, 0.63193, 0.62093, 0.60977, 0.59846, 0.58703, 0.57549, 0.56386, 0.55214, 0.54036, 0.52854, 0.51667, 0.50479, 0.49291, 0.48104, 0.4692, 0.4574, 0.44565, 0.43399, 0.42241, 0.41093, 0.39958, 0.38836, 0.37729, 0.36638, 0.35566, 0.34513, 0.33482, 0.32473, 0.31489, 0.3053, 0.29599, 0.28696, 0.27824, 0.26981, 0.26152, 0.25334, 0.24526, 0.2373, 0.22945, 0.2217, 0.21407, 0.20654, 0.19912, 0.19182, 0.18462, 0.17753, 0.17055, 0.16368, 0.15693, 0.15028, 0.14374, 0.13731, 0.13098, 0.12477, 0.11867, 0.11268, 0.1068, 0.10102, 0.09536, 0.0898, 0.08436, 0.07902, 0.0738, 0.06868, 0.06367, 0.05878, 0.05399, 0.04931, 0.04474, 0.04028, 0.03593, 0.03169, 0.02756, 0.02354, 0.01963, 0.01583]
b = [0.23217, 0.26149, 0.29024, 0.31844, 0.34607, 0.37314, 0.39964, 0.42558, 0.45096, 0.47578, 0.50004, 0.52373, 0.54686, 0.56942, 0.59142, 0.61286, 0.63374, 0.65406, 0.67381, 0.693, 0.71162, 0.72968, 0.74718, 0.76412, 0.7805, 0.79631, 0.81156, 0.82624, 0.84037, 0.85393, 0.86692, 0.87936, 0.89123, 0.90254, 0.91328, 0.92347, 0.93309, 0.94214, 0.95064, 0.95857, 0.96594, 0.97275, 0.97899, 0.98461, 0.9893, 0.99303, 0.99583, 0.99773, 0.99876, 0.99896, 0.99835, 0.99697, 0.99485, 0.99202, 0.98851, 0.98436, 0.97959, 0.97423, 0.96833, 0.9619, 0.95498, 0.94761, 0.93981, 0.93161, 0.92305, 0.91416, 0.90496, 0.8955, 0.8858, 0.8759, 0.86581, 0.85559, 0.84525, 0.83484, 0.82437, 0.81389, 0.80342, 0.79299, 0.78264, 0.7724, 0.7623, 0.75237, 0.74265, 0.73316, 0.72393, 0.715, 0.70599, 0.69651, 0.6866, 0.67627, 0.66556, 0.65448, 0.64308, 0.63137, 0.61938, 0.60713, 0.59466, 0.58199, 0.56914, 0.55614, 0.54303, 0.52981, 0.51653, 0.50321, 0.48987, 0.47654, 0.46325, 0.45002, 0.43688, 0.42386, 0.41098, 0.39826, 0.38575, 0.37345, 0.3614, 0.34963, 0.33816, 0.32701, 0.31622, 0.30581, 0.29581, 0.28623, 0.27712, 0.26849, 0.26038, 0.2528, 0.24579, 0.23937, 0.23356, 0.22835, 0.2237, 0.2196, 0.21602, 0.21294, 0.21032, 0.20815, 0.2064, 0.20504, 0.20406, 0.20343, 0.20311, 0.2031, 0.20336, 0.20386, 0.20459, 0.20552, 0.20663, 0.20788, 0.20926, 0.21074, 0.2123, 0.21391, 0.21555, 0.21719, 0.2188, 0.22038, 0.22188, 0.22328, 0.22456, 0.2257, 0.22667, 0.22744, 0.228, 0.22831, 0.22836, 0.22811, 0.22754, 0.22663, 0.22536, 0.22369, 0.22161, 0.21918, 0.2165, 0.21358, 0.21043, 0.20706, 0.20348, 0.19971, 0.19577, 0.19165, 0.18738, 0.18297, 0.17842, 0.17376, 0.16899, 0.16412, 0.15918, 0.15417, 0.1491, 0.14398, 0.13883, 0.13367, 0.12849, 0.12332, 0.11817, 0.11305, 0.10797, 0.10294, 0.09798, 0.0931, 0.08831, 0.08362, 0.07905, 0.07461, 0.07031, 0.06616, 0.06218, 0.05837, 0.05475, 0.05134, 0.04814, 0.04516, 0.04243, 0.03993, 0.03753, 0.03521, 0.03297, 0.03082, 0.02875, 0.02677, 0.02487, 0.02305, 0.02131, 0.01966, 0.01809, 0.0166, 0.0152, 0.01387, 0.01264, 0.01148, 0.01041, 0.00942, 0.00851, 0.00769, 0.00695, 0.00629, 0.00571, 0.00522, 0.00481, 0.00449, 0.00424, 0.00408, 0.00401, 0.00401, 0.0041, 0.00427, 0.00453, 0.00486, 0.00529, 0.00579, 0.00638, 0.00705, 0.0078, 0.00863, 0.00955, 0.01055]
i = int(round(x * 255, 0))
return [r[i]*rgbmax, g[i]*rgbmax, b[i]*rgbmax]
|
ddfe1cf465cdc804363075b3371214184b1a05e4
| 27,911
|
def to_component_dict(component):
"""
:rtype: ``dict``
"""
result = {
'id': component.id,
'name': component.name
}
return result
|
8b8102529cfa4bd54cce3a6710cb7db5af7d0320
| 27,913
|
import math
def f(n):
"""
Define f(n) as the sum of the digit factorials for given number n.
For example:
f(342) = 3! + 4! + 2! = 32
:param n: number
:return: sum digit factorial
"""
return sum(math.factorial(int(ch)) for ch in str(n))
|
334ca97a936876d79643cad70994c3da8cbee98e
| 27,914
|
def get_custom_name_record(ttfont, text):
"""Return a name record by text. Record ID must be greater than 255"""
for record in ttfont['name'].names[::-1]:
if record.nameID > 255:
rec_text = record.toUnicode()
if rec_text == text:
return record
return None
|
80e6b267753ba0ece3f75dc83c3ace3dfdd1dda0
| 27,915
|
import os
def default_root():
"""
Default root for the lilcaches.
"""
home_dir = os.path.expanduser("~")
path = os.path.join(home_dir, ".lilcache")
if not os.path.exists(path):
os.mkdir(path)
return path
|
ecfdb38a58d39e09ffb37b2dbe5b48044c620745
| 27,917
|
def plot_line(x1, y1, x2, y2):
"""Brensenham line drawing algorithm.
Return a list of points(tuples) along the line.
"""
dx = x2 - x1
dy = y2 - y1
if dy < 0:
dy = -dy
stepy = -1
else:
stepy = 1
if dx < 0:
dx = -dx
stepx = -1
else:
stepx = 1
dy = dy*2
dx = dx*2
x = x1
y = y1
pixelpoints = [(x, y)]
if dx > dy:
fraction = dy - (dx/2)
while x is not x2:
if fraction >= 0:
y = y + stepy
fraction = fraction - dx
x = x + stepx
fraction = fraction + dy
pixelpoints.append((x, y))
else:
fraction = dx - (dy/2)
while y is not y2:
if fraction >= 0:
x = x + stepx
fraction = fraction - dy
y = y + stepy
fraction = fraction + dx
pixelpoints.append((x, y))
return pixelpoints
|
2d0f1e2e9efda98ce19a93ca3e0aa830e7793f64
| 27,918
|
import asyncio
import socket
async def getfirstaddrinfo(
host, port, family=0, type=0, proto=0, sock=None, flags=0, loop=None
):
"""
retrieve sockaddr for host/port pair with given family, type, proto settings.
return first sockaddr. raises socket.gaierror if no result was returned.
"""
if sock is not None:
if family != 0 or type != 0 or proto != 0:
raise ValueError(
"family/type/proto and sock cannot be specified at the same time"
)
family = sock.family
type = sock.type
proto = sock.proto
if loop is None: # pragma: nobranch
loop = asyncio.get_event_loop()
result = await loop.getaddrinfo(
host, port, family=family, type=type, proto=proto, flags=flags
)
if not result: # pragma: nocover
raise socket.gaierror(
socket.EAI_NODATA, f"no address info found for {host}:{port}"
)
return result[0]
|
32d5e6e3559c19f9e25dbdbcebe35aeeb6f4699c
| 27,919
|
def createc_fbz(stm):
"""
Function returning Createc channel feedback z value
Parameters
----------
stm : createc.CreatecWin32
Createc instance
Returns
-------
value : str
"""
# from createc.Createc_pyCOM import CreatecWin32
# stm = CreatecWin32()
return stm.client.getdacvalfb()
|
affda33fd1050fdf865544cfc66e3899788fccc2
| 27,922
|
def similar(x,y):
"""
function that checks for the similarity between the words of
two strings.
:param x: first string
:param y: second string
:return: returns a float number which is the result of the
division of the length of the intersection between the two strings'
words by the length of their union.
"""
result = ''
x_list = x.split() # convert string to a list for parsing
x_list = [word.lower() for word in x_list] # transform all words to lowercase
x_list = list(set(x_list)) # list of of words that appear at least one in x_list
y_list = y.split() # convert string to a list for parsing
y_list = [word.lower() for word in y_list] # transform all words to lowercase
y_list = list(set(y_list)) # list of words that appear at least one in y_list
intersection = [word for word in x_list if word in y_list] # obtain the common words between x_list and y_list
union = list(set(x_list).union(y_list)) # words that appear in both lists as well as their common ones
result = float(len(intersection) / len(union) ) # find the coefficient of their similarity
return result
|
92edaf8ebcedcbfbb1adf2b87c8d00f159b3ccc8
| 27,923
|
import uuid
import os
def random_fname():
"""Generates a random file name. In the *very* unlikely case that `uuid4`
generates a file name that already exists, we'll generate a new one until
a unique file name is generated.
Returns:
A unique file name `str`.
"""
fname = "{}.tmp".format(str(uuid.uuid4()))
while (os.path.isfile(fname)):
fname = "{}.tmp".format(str(uuid.uuid4()))
return fname
|
1e40db54939f48edb7d03f33026b5af456c8536b
| 27,924
|
from typing import Dict
from typing import List
from typing import Any
def get_entity_embedding(
examples,
tokenizer,
subject_start_marker: str,
subject_end_marker: str,
object_start_marker: str,
object_end_marker: str
) -> Dict[str, List[Any]]:
""" returns entity embeddings """
subj_start_id = tokenizer.convert_tokens_to_ids(subject_start_marker)
subj_end_id = tokenizer.convert_tokens_to_ids(subject_end_marker)
obj_start_id = tokenizer.convert_tokens_to_ids(object_start_marker)
obj_end_id = tokenizer.convert_tokens_to_ids(object_end_marker)
entity_ids = []
is_entity = False
for input_id in examples["input_ids"]:
if input_id in [subj_end_id, obj_end_id]:
is_entity = False
entity_id = 1 if is_entity else 0
entity_ids.append(entity_id)
if input_id in [subj_start_id, obj_start_id]:
is_entity = True
return {"entity_ids": entity_ids,}
|
b1ae8d1d901d3c2f0ae7e0f6b553bc43edf2a364
| 27,925
|
from datetime import datetime
def convert_time(ts):
"""converts timestamps from time.time() into reasonable string format"""
return datetime.fromtimestamp(ts).strftime("%Y-%m-%d::%H:%M:%S")
|
195124dac4c4c145c397fe8e4fd10d3ab3d6700f
| 27,926
|
def valid_url_input(zip_url, urls):
"""Helper function, to check if input was valid"""
if zip_url in urls:
return True
else:
return False
|
5e49598835d478d759e3b64979129de19d2d810d
| 27,927
|
import os
def test_cassandra_tarball():
"""default cassandra tarball of a given version to use for all tests"""
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "testdata", "diag", "cassandra")
|
8183ce61799f81bb3a6d73e688f7640effbb9487
| 27,928
|
def isEncodingWith(filePath, encoding):
"""
已知问题
1. 若文件编码为UTF8-BOM,isEncodingWith(GBK)返回True
"""
def isUTF8(encoding):
return encoding.lower() in ('utf8', 'utf-8', 'utf_8', 'u8')
def isUTF8WithBOM(encoding):
return encoding.lower() in ('utf_8_sig')
"""
注意utf8和utf_8_sig都能打开带BOM和不带BOM的UTF8文件
- utf8返回的文件内容不会去掉BOM标识
- utf_8_sig返回的文件内容会自动去掉BOM标识
"""
if isUTF8(encoding) or isUTF8WithBOM(encoding):
try:
with open(filePath, encoding='utf8') as fp:
s = fp.read(1)
if s == '\ufeff':
return isUTF8WithBOM(encoding)
else:
return isUTF8(encoding)
except UnicodeDecodeError:
return False
except Exception as e:
raise
try:
with open(filePath, encoding=encoding) as fp:
fp.read()
return True
except Exception:
return False
|
be8a0f5ac3c33ad96949546f5cb733a053452a14
| 27,929
|
def get_nr_to_check(selection, line_scores):
"""
Gets the number of checks the annotators should do given a selection and a line_score
:param selection: selection of the lines to check
:param line_scores: the lines with the given score
:return: the number of checks that still need to be performed
"""
total_checks = 0
maximum_checks = 0
for (name, lines), (name2, (lines_with_score)) in zip(selection, line_scores):
score = sum([score for (line, score) in lines_with_score if line in lines])
max_score = len(lines) * 2
total_checks += score
maximum_checks += max_score
return maximum_checks - total_checks
|
4560a1f6a8ab3671b73513e6eab193dd5300ec82
| 27,930
|
def dmc_task2str(domain_name, task_name):
"""Convert domain_name and task_name to a string suitable for environment_kwargs"""
return '%s-%s-v0' % (domain_name, task_name)
|
5a124b6a73a35fe898f24910d8f8bedda9eaa807
| 27,932
|
def read_pair_align(read1, read2):
""" Extract read pair locations as a fragment oriented in increasing chromosome coordinates
:param read1: read #1 of pair in pysam AlignedSegment format
:param read2: read #2 of pair in pysam AlignedSegment format
:return 4-item array in the following format: [fragA-start, fragA-end, fragB-start, fragB-end]
with monotonically increasing chromosome coordinates
"""
r1pos = [x+1 for x in read1.positions]
r2pos = [x+1 for x in read2.positions]
if read1.mate_is_reverse and r1pos[0] < r2pos[0]: # read1 is earlier
read = [r1pos[0], r1pos[-1], r2pos[0], r2pos[-1]]
elif read2.mate_is_reverse and r2pos[0] < r1pos[0]: # read2 is earlier
read = [r2pos[0], r2pos[-1], r1pos[0], r1pos[-1]]
else:
read = []
# print("Skipping read pair from error in alignment.")
# print("%s--%s> <%s--%s" % tuple(read))
return read
|
f9d1476330a8cf1c9e836654d67a8bcda9e18eb7
| 27,933
|
def _xds_version(xds_output_list):
"""Return the version of XDS which has been run."""
for line in xds_output_list:
if "XDS VERSION" in line:
return line.split("XDS VERSION")[1].split(")")[0].strip()
if "XDS" in line and "VERSION" in line:
return line.split("(VERSION")[1].split(")")[0].strip()
raise RuntimeError("XDS version not found")
|
bf0da2a837e2139e9a2e21ed3e743cdc91ea21e7
| 27,934
|
from datetime import datetime
def days(date):
"""Convert a single datetime to a Julian day number"""
delta = date - datetime(date.year, 1, 1, 0, 0, 0)
result = delta.total_seconds() / 24 / 60 / 60
return result
|
cc3ae8f79ac0c4e558b813aaaa928a429ca33006
| 27,935
|
def InitTagger4Sentence(FREQDICT, sentence):
"""
Dictionary-based initial tagger for a particular language.
Labeling a sentence.
"""
words = sentence.strip().split()
taggedSen = ''
for word in words:
if word in FREQDICT:
taggedSen += word + "/" + FREQDICT[word] + " "
else:
"""
# Deal with unknown words (out-of-dictionary words):
# Providing several heuristics to deal those cases for your own language.
#...................................
"""
taggedSen += word + "/" + FREQDICT["DefaultTag"] + " "
return taggedSen.strip()
|
761d7e893becdd21fad2e1364bf553051e86a9a5
| 27,937
|
from typing import List
from typing import Union
def normalise(num_list: List[Union[int, float]]) -> List[Union[int, float]]:
""" Simple normalisation into [0,1] """
max_x = max(num_list)
min_x = min(num_list)
return [(x - min_x) / (max_x - min_x) for x in num_list]
|
c76ecd6064b474b8c1e1d7ac8bbe4966518dc76e
| 27,939
|
def getaxeslist(pidevice, axes):
"""Return list of 'axes'.
@type pidevice : pipython.gcscommands.GCSCommands
@param axes : Axis as string or list of them or None for all axes.
@return : List of axes from 'axes' or all axes or empty list.
"""
axes = pidevice.axes if axes is None else axes
if not axes:
return []
if not hasattr(axes, '__iter__'):
axes = [axes]
return axes
|
6a01538eb46a7f19efcc2bfb737bf1945ec4db52
| 27,942
|
def diff(current_block, previous_block, *args):
"""Subtracts the previous block from the current block."""
return (current_block - previous_block[:len(current_block)])
|
06c6cd2ba1bc403d25d0dfcb0ae477bca5593ac9
| 27,943
|
import mimetypes
def is_html(path: str) -> bool:
"""
Determine whether a file is an HTML file or not.
:param path: the path to the file
:return: True or False
"""
(mime_type, _) = mimetypes.guess_type(path)
return mime_type in ('application/xhtml+xml', 'text/html')
|
bfd570f19c78447adf2ab28b2d94f1119922b97d
| 27,945
|
import decimal
import math
def shannon_inform(feature_matrix, gradations_list):
"""
Procedure to count shannon's information content.
:param feature_matrix: list of lists with objects of different classes.
:param gradations_list: list of gradations of feature
:return: information content value
"""
total_amount_of_objects = 0
for column in feature_matrix:
total_amount_of_objects += len(column)
parts = list()
for gradation in gradations_list:
gradation_frequencies = list()
for column in feature_matrix:
m = 0
for i in range(len(column)):
if column[i] == gradation:
m += 1
gradation_frequencies.append(m)
total_gradation_amount = decimal.Decimal(0)
for x in gradation_frequencies:
total_gradation_amount += decimal.Decimal(x)
parts_of_gradations_in_classes = list()
for x in gradation_frequencies:
if x > 0 and total_gradation_amount > 0:
tmp = decimal.Decimal(x) / decimal.Decimal(total_gradation_amount)
else:
tmp = decimal.Decimal(0)
parts_of_gradations_in_classes.append(tmp)
part_of_gradation = total_gradation_amount / total_amount_of_objects
part = 0
for x in parts_of_gradations_in_classes:
if x > 0:
part += x * decimal.Decimal(math.log(x, len(feature_matrix)))
part *= part_of_gradation
parts.append(part)
result = 0
for x in parts:
result += x
result += 1
return result
|
02ae72a9ebfe8f29fffbd26d5f9631397906ae9e
| 27,946
|
def parse_dats_information(dats_dict):
"""
Parse the content of the DATS dictionary and grep the variables of interest for
the summary statistics.
:param dats_dict: dictionary with the content of a dataset's DATS.json file
:type dats_dict: dict
:return: dictionary with the variables of interest to use to produce the
summary statistics
:rtype: dict
"""
extra_properties = dats_dict["extraProperties"]
keywords = dats_dict["keywords"]
values_dict = {
"extraProperties": {},
"keywords": [],
}
for extra_property in extra_properties:
values_dict[extra_property["category"]] = extra_property["values"][0]["value"]
for keyword in keywords:
values_dict["keywords"].append(keyword["value"])
authorization = "unknown"
if "authorizations" in dats_dict["distributions"][0]["access"]:
authorization = dats_dict["distributions"][0]["access"]["authorizations"][0][
"value"
]
return {
"title": dats_dict["title"],
"data_provider": dats_dict["distributions"][0]["access"]["landingPage"],
"authorization": authorization,
"dataset_size": dats_dict["distributions"][0]["size"],
"size_unit": dats_dict["distributions"][0]["unit"]["value"],
"number_of_files": values_dict["files"] if "files" in values_dict else "",
"keywords": values_dict["keywords"] if "keywords" in values_dict else "",
}
|
c1847d6b107ea3277f235d1298030932d2e4ff9b
| 27,947
|
def escapeHTML(txt):
"""transform Unicode character -> DEC numerical entity"""
return txt.encode("ascii", "xmlcharrefreplace").decode()
|
b4af51030f8f035017bbbe08dec6c7d794402b65
| 27,948
|
import ast
def stripped_literal(literal):
"""
evaluate literals, ignoring leading/trailing whitespace
This function is capable of handling all literals supported by
:py:func:`ast.literal_eval`, even if they are surrounded by whitespace.
"""
return ast.literal_eval(literal.strip())
|
9a2b6eb3af5df23bcd756e4fb261efe420fccaab
| 27,949
|
import re
def only_scripts(input_iterable):
"""
Given HTML input, transform it by removing all content that is
not part of a script (between <script>…</script> tags).
Any non-script content is blanked out. The number of lines
returned is identical to the number of lines in the input so
line-number-based error messages will still be accurate.
input_iterable -- must be iterable
"""
lines = []
start_tag = re.compile(r'(\<\s*script)[\s\>]', re.IGNORECASE)
end_tag = re.compile(r'\<\/\s*script[\s\>]', re.IGNORECASE)
state = 'IGNORE'
for line in input_iterable:
while line:
if state == 'IGNORE':
match = start_tag.search(line)
if match:
# found a script tag
line = ' ' * match.end(1) + line[match.end(1):]
state = 'LOOK_FOR_END_OF_OPENING_TAG'
else:
lines.append('\n')
line = None
elif state == 'LOOK_FOR_END_OF_OPENING_TAG':
gt_pos = line.find('>')
if gt_pos != -1:
line = ' ' * (gt_pos + 1) + line[gt_pos + 1:]
state = 'PIPE_TO_OUTPUT'
else:
lines.append('\n')
line = None
elif state == 'PIPE_TO_OUTPUT':
match = end_tag.search(line)
if match:
# found closing </script> tag
line_part = line[:match.start()]
# if line is all whitespace, strip it
if len(line_part.strip()) == 0:
line_part = '\n'
lines.append(line_part)
line = line[match.end():]
state = 'IGNORE'
else:
# if line is all whitespace, strip it
if len(line.strip()) == 0:
line = '\n'
lines.append(line)
line = None
return lines
|
6abb53a92ecf7a993d9ecbc368ab90809d11eeaf
| 27,950
|
def _next_set(args):
"""
Deterministically take one element from a set of sets
"""
# no dupes, deterministic order, larger sets first
items = sorted(list(map(frozenset, args)), key=lambda x: -len(x))
return items[0], set(items[1:])
|
37d1fdf1796d2b0b455f638bc8e03de030d668f0
| 27,951
|
import re
def is_lower_camel_case(id_name):
"""Check if id_name is written in camel case.
>>> is_lower_camel_case('')
False
>>> is_lower_camel_case('_')
False
>>> is_lower_camel_case('H')
False
>>> is_lower_camel_case('h')
True
>>> is_lower_camel_case('hW')
True
>>> is_lower_camel_case('HW')
False
>>> is_lower_camel_case('HWW')
False
>>> is_lower_camel_case('hWhWhWh')
True
>>> is_lower_camel_case('helloWorld')
True
>>> is_lower_camel_case('HelloWorld')
False
>>> is_lower_camel_case('hWorld')
True
>>> is_lower_camel_case('hello6orld')
True
>>> is_lower_camel_case('hello_world')
False
>>> is_lower_camel_case('_hello')
False
>>> is_lower_camel_case('hello_')
False
>>> is_lower_camel_case('hello-world')
False
>>> is_lower_camel_case('helloGoodWorld77')
True
"""
lower_camel_case_re = re.compile(r"""
[a-z]([A-Z]?[a-z0-9]*)*
$
""", re.VERBOSE)
return lower_camel_case_re.match(id_name) is not None
|
8f560a6bd6ea634526573342a58fbba11f8dab37
| 27,953
|
import re
def config_filename(name, ext="json"):
"""
>>> config_filename("system global")
'system_global.json'
>>> config_filename('system replacemsg webproxy "deny"')
'system_replacemsg_webproxy_deny.json'
>>> config_filename("system.*")
'system_.json'
"""
cname = re.sub(r"[\"'\.]", '', re.sub("[- *]+", '_', name))
return "{}.{}".format(cname, ext)
|
906c35bee18ec0128ff18338174eeab93a6e89c2
| 27,954
|
import os
def getOxum(dataPath):
"""
Calculate the oxum for a given path
"""
fileCount = 0
fileSizeTotal = 0
for root, dirs, files in os.walk(dataPath):
for fileName in files:
fullName = os.path.join(root, fileName)
stats = os.stat(fullName)
fileSizeTotal += stats.st_size
fileCount += 1
return "%s.%s" % (fileSizeTotal, fileCount)
|
75d0ecf09aa00d015b0d3e768639816143364622
| 27,955
|
def num_to_frak(n):
"""Convert a number to a Fraktur character.
Args:
n (int): Number
"""
return "𝔞𝔟𝔠𝔡𝔢𝔣𝔤𝔥𝔦𝔧𝔨𝔩𝔪𝔫𝔬𝔭𝔮𝔯𝔰𝔱𝔲𝔳𝔴𝔵𝔶𝔷"[n]
|
1de34494d71548526d616ff631253502c14f5dc5
| 27,956
|
def _get_figure_size(numaxes):
"""
Return the default figure size.
Width: 8 units
Height: 3 units for every subplot or max 9 units
Return
------
(width, height)
The figure size in inches.
"""
figure_width = 8
figure_height = max(6, min(numaxes * 3, 10))
return (figure_width, figure_height)
|
bb6f3a08b974cac2d5da2b69eac8653e9b41411e
| 27,957
|
def _simpsons_inner(f, a, f_a, b, f_b):
"""Calculate the inner term of the adaptive Simpson's method.
Parameters
----------
f : callable
Function to integrate.
a, b : float
Lower and upper bounds of the interval.
f_a, f_b : float
Values of `f` at `a` and `b`.
Returns
-------
m : float
Midpoint (the mean of `a` and `b`).
f_m : float
Value of `f` at `m`.
whole : float
Simpson's method result over the interval [`a`, `b`].
"""
# pprint({k: format(v, '0.3f') for k, v in locals().items() if k != 'f'})
m = (a + b) / 2
f_m = f(m)
return (m, f_m, abs(b - a) / 6 * (f_a + 4 * f_m + f_b))
|
e0e9170b8030f8f5c2f66927b91b034d9cd4a82f
| 27,958
|
def portfolio_vol(weights, comvat):
"""
Weights -> Volatility
@ is matrix multiplication
"""
return (weights.T @ comvat @ weights) ** 0.5
|
529053e5868aad6ac106eb6997e8eb66bf42a3d6
| 27,960
|
import glob
def globimgs(path, globs:list):
"""returns a list of files with path with globing with more than one extensions"""
imgs = []
for i in globs:
imgs.extend(glob.glob(path + i))
paths = []
for path in imgs:
paths.append(path.replace("\\", "/"))
return paths
|
d9ffdee24fd1de496286e165333232c0b7b087be
| 27,962
|
def upload_to_dict(upload):
"""Creates a Python dict for an Upload database entity.
This is an admin-only function that exposes more database information than
the method on Upload.
"""
return dict(
id=upload.id,
flake=upload.flake,
filename=upload.filename,
mimetype=upload.mimetype,
uri=upload.uri,
uploader=upload.user.to_dict(),
post=upload.post.to_dict()
)
|
c6fdc5b53dbbc1fa28e64fb574c5a3919f5e780e
| 27,963
|
def wrong_adjunction(left, right, cup):
""" Wrong adjunction error. """
return "There is no {0}({2}, {3}) in a rigid category. "\
"Maybe you meant {1}({2}, {3})?".format(
"Cup" if cup else "Cap", "Cap" if cup else "Cup", left, right)
|
263684e737a3212a1d44fcd88ba719fc9f1c07a1
| 27,965
|
def brute_force_optimized(game):
"""
Solves MasterMind by running through generators.
This saves memory but is dumb in the sense that it returns
through possible solutions in lexical order.
Returns the solution translated back into the game colors.
"""
solutions = game.create_solution_generator()
i = 0
n_solutions = len(game.colordict) ** len(game.slots)
print('i {} and len(solutions) {}'.format(i, n_solutions))
n_slots = len(game.slots)
trial_inner = ''.join('a' for _ in range(int(n_slots / 2)))
trial_inner += ''.join('b' for _ in range(int((n_slots + 1) / 2)))
trial = ''.join(trial_inner)
result = game.evaluator(trial)
solutions = [c for c in game.reduce_solution_set(solutions, trial, result)]
i += 1
print('n {} and len(s) {} after trial {} with evaluation {}' \
''.format(i, len(solutions), trial, game.evaluator(trial)))
while len(solutions) > 1:
trial = ''.join(i for i in game.create_code(solutions))
result = game.evaluator(trial)
solutions = [c for c in
game.reduce_solution_set(solutions, trial, result)]
i += 1
print('n {} and len(s) {} after trial {} with evaluation {}' \
''.format(i, len(solutions), trial, result))
if len(solutions) == 1:
print(''.join(_ for _ in solutions[0]))
if ''.join(_ for _ in solutions[0]) == game.challenge:
return [game.colordict[solutions[0][i]]
for i in game.slots], i
else:
return 'Challenge {} has no solution - solver terminated ' \
'after {} trials'.format(game.challenge, i)
else:
return 'Challenge {}) has no solution - solver terminated ' \
'after {} trials'.format(game.challenge, i)
|
b408d6ae6c271d23c0ac0f92ecd915a33a994980
| 27,967
|
def mock_url_for(endpoint, **kwargs):
"""Simple mock for :func:`flask.url_for`."""
params = '/'.join(map(str, kwargs.values()))
return f'http://{endpoint}/{params}'
|
f95c9ae00915a0d40c2bd3199f77beee8e95383f
| 27,969
|
import hashlib
def get_file_hash(filename, dhash=None, bufsize=None):
"""Return SHA256 hash for file"""
if dhash is None:
dhash = hashlib.sha256()
buffer = bytearray(128 * 1024 if bufsize is None else bufsize)
# using a memoryview so that we can slice the buffer without copying it
buffer_view = memoryview(buffer)
with open(filename, "rb", buffering=0) as fobj:
while True:
n_chunk = fobj.readinto(buffer_view)
if not n_chunk:
break
dhash.update(buffer_view[:n_chunk])
return dhash
|
2309a87660d3940cf30bba5a425863c47e40c184
| 27,970
|
import os
def identify_file_extension(fpath):
"""
:param fpath:
:return:
"""
fp, fn = os.path.split(fpath)
comp_ext = fn.rsplit('.', 1)[1]
is_compressed = comp_ext in ['zip', 'gz', 'gzip', 'bz', 'bz2', 'bzip2']
if is_compressed:
ext = '.'.join(fn.rsplit('.', 2)[1:])
else:
ext = fn.rsplit('.', 1)[1]
return ext
|
662049adfbbd36ddb472c084505fe83ebfd02d4d
| 27,971
|
from datetime import datetime
def get_time_obj(date_time_str):
"""Check if date format is correct"""
try:
date_time_obj = datetime.strptime(date_time_str, '%d/%m/%y %H:%M:%S')
return date_time_obj
except ValueError as error:
print(f'Error: {error}')
return None
|
77b6dbe20c1e3813ce7ebc9474cfde69c6ad6e09
| 27,972
|
import os
def path_from_root(path):
""" Returns a path computed relative to the repository root.
This is determined by computing the path to this script, then
traversing up one directory and appending the `path` argument.
"""
self_path = os.path.abspath(__file__)
self_dir = os.path.dirname(self_path)
root_path, _ = os.path.split(self_dir)
return os.path.join(root_path, path)
|
e742a36d43634156139f3ce3dd50d64a1ca7e01b
| 27,973
|
def count_envs(lines, ignore_chars=0):
"""Reads env counts from lines. Returns dict of {name:{env:count}}.
Assumes all name-env mappings are unique -- will overwrite counts with the
last observed count rather than adding.
"""
result = {}
for line in lines:
fields = line.split()
#skip if we don't have at least label and field
if len(fields) < 2:
continue
name, env = fields[:2]
if ignore_chars:
env = env[ignore_chars:]
if len(fields) > 2:
count = int(fields[2])
else:
count = 1
if name not in result:
result[name] = {}
result[name][env] = count
return result
|
99bbbbf07a4f3d17a951fc6823d95f73a26fbb55
| 27,977
|
def magnify_contents(contents, features):
"""
Create additional features in each entry by replicating some column
of the original data. In order for the colums to differ from the
original data append a suffix different for each new additional
artificial column.
"""
magnified_contents = []
for entry in contents:
magnified_entry = entry[:-1] + [entry[feature]+str(i) for i, feature
in enumerate(features)] + [entry[-1]]
magnified_contents.append(magnified_entry)
return magnified_contents
|
ec2a43cdb280da74b44a6fec96d0708c90d03f18
| 27,978
|
def binary_search(arr, first, last, element):
"""
Function to search an element in a given sorted list.
The function returns the index of the first occurrence of an element in the list.
If the element is not present, it returns -1.
Arguments
arr : list of elements
first : position of the first element
last : position of the last element
element : element that is to be searched
"""
mid = (first + last) // 2
if first <= last:
if element == arr[mid]:
return arr.index(element)
elif element > arr[mid]:
return binary_search(arr, mid+1, last, element)
elif element < arr[mid]:
return binary_search(arr, first, mid-1, element)
else:
return -1
|
d006f751bf13efe04d55ab72e166ea279bef9d3d
| 27,979
|
def _as_list(arr):
"""Force being a list, ignore if already is."""
if isinstance(arr, list):
return arr
return [arr]
|
3af09d6aae798be53d4f99fb63f17a3fd8e0f3ed
| 27,980
|
import os
def find_files_in_dirs(dirs, extensions=('.wav', '.mp3', '.aif', '.aiff', '.flac')):
"""
Find all files in the directories `dir` and their subdirectories with `extensions`, and return the full file path
Parameters
----------
dirs : list[str]
extensions : list[str]
Returns
-------
files : list[str]
"""
found_files = []
for adir in dirs:
for root, subdirs, files in os.walk(adir):
for f in files:
if os.path.splitext(f)[1].lower() in extensions:
found_files.append(os.path.join(root, f))
return found_files
|
2699df51a429cfaa65a623dfd2bdcd873ec1af49
| 27,981
|
def right_digit(x):
"""Returns the right most digit of x"""
return int(x%10)
|
3f52393e9241714839e97a41f858753485cc5c89
| 27,983
|
import random
def generate_string(length: int) -> str:
"""Generates a random string of a given lentgh."""
symbols: str = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"
final_string: str = ""
for i in range(length):
final_string += symbols[random.randint(0, len(symbols) - 1)]
return final_string
|
9e6d4cbccf52f8abb6adf462a9a37b384a707ca3
| 27,985
|
def is_number(n):
"""
Return True if the value can be parsed as a float.
"""
try:
float(n)
return True
except ValueError as _:
return False
|
d9a2f8e4893b7379c2dcabf24f7f5f731423a753
| 27,987
|
import posixpath
import os
def _JoinPaths(path1, path2, gsutil_path=False):
"""Joins paths using the appropriate separator for local or gsutil."""
if gsutil_path:
return posixpath.join(path1, path2)
else:
return os.path.join(path1, path2)
|
76b85abd71ec811e170ac526cd5047721f94257f
| 27,988
|
import select
def get_hub():
"""
Checks whether poll is available and falls back
on select if it isn't.
Note about epoll:
Review: https://review.openstack.org/#/c/18806/
There was a problem where once out of every 30 quadrillion
connections, a coroutine wouldn't wake up when the client
closed its end. Epoll was not reporting the event or it was
getting swallowed somewhere. Then when that file descriptor
was re-used, eventlet would freak right out because it still
thought it was waiting for activity from it in some other coro.
"""
try:
if hasattr(select, "poll"):
return "poll"
return "selects"
except ImportError:
return None
|
e94a6964a40d1f311de5b5c7459d22f9427804cf
| 27,990
|
def death_fraction():
"""
Real Name: b'Death Fraction'
Original Eqn: b'7/1000'
Units: b'1/Year'
Limits: (None, None)
Type: constant
b''
"""
return 7 / 1000
|
cf6c2988cf79c9638f5c9e401dc5b006d275d544
| 27,992
|
def fx_ugoira_body():
"""Ugoira page data."""
with open('./tests/mock/ugoira.html') as f:
return f.read().encode('u8')
|
2a9ab6295536b049d6d9a409bf1b63a832b98b18
| 27,994
|
import numpy
def meshgrid(xrange, yrange):
"""HIDE"""
xar = numpy.arange(*xrange)
yar = numpy.arange(*yrange)
shape = (len(yar), len(xar))
nx = len(xar)
ny = len(yar)
x = numpy.transpose(numpy.reshape(numpy.repeat(xar, len(yar)), (len(xar), len(yar))))
y = numpy.reshape(numpy.repeat(yar, len(xar)), shape)
x = x * 1.
y = y * 1.
#r = sqrt(x**2+y**2)
return x, y
|
9d02e56126fa5b2cf84fd8f38ac87a675dca7565
| 27,995
|
from pathlib import Path
def create_flag_file(filepath: str) -> str:
"""
Create a flag file in order to avoid concurrent build of same previews
:param filepath: file to protect
:return: flag file path
"""
flag_file_path = "{}_flag".format(filepath)
Path(flag_file_path).touch()
return flag_file_path
|
80ad8e181574600fcb1b9ded6e5c64c3c0d5b457
| 27,996
|
def stringify_ossl_cert(a_cert_obj):
""" try to stryingy a cert object into its subject components and digest hexification.
E.g. (with extra newline added for line-wrap):
3E:9C:58:F5:27:89:A8:F4:B7:AB:4D:1C:56:C8:4E:F0:03:0F:C8:C3
C=US/ST=State/L=City/O=Org/OU=Group/CN=Certy Cert #1
"""
if isinstance(a_cert_obj, (list,tuple)):
return ', '.join([ stringify_ossl_cert(i) for i in a_cert_obj ])
return '/'.join([ '='.join([ j.decode() for j in i ]) for i in a_cert_obj.get_subject().get_components() ])
|
b6fef23a1d4b8c3ab73f8f2ef9d2f033b0a25514
| 27,998
|
import os
import pickle
def load_prediction_dict():
"""Load the prediction_dict.pkl as a dict. """
cwd = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(cwd, "data/prediction_dict.pkl"), "rb") as f:
prediction_dict = pickle.load(f)
return prediction_dict
|
233b7021b40ede54b4045b923a02cbc03bc42678
| 27,999
|
def _get_range_clause(column, value, bucket_interval):
"""Returns an SQL clause specifying that column is in the range
specified by value. Uses bucket_interval to avoid potentially
ambiguous ranges such as 1.0B-1.9B, which really means [1B, 2B).
"""
if value[0] == '-':
# avoid minus sign with split
arr = value[1:].split('-', 1)
arr[0] = '-' + arr[0]
else:
arr = value.split('-', 1)
if len(arr) > 1:
low = arr[0]
high = arr[1]
else:
return column + " = " + value
if low.endswith('M'):
low = int(round(float(low[:-1]) * 1000000))
high = low + bucket_interval
elif low.endswith('B'):
low = int(round(float(low[:-1]) * 1000000000))
high = low + bucket_interval
elif '.' not in low:
low = int(low)
high = low + bucket_interval
# low is inclusive, high is exclusive
# See https://github.com/elastic/elasticsearch-dsl-py/blob/master/elasticsearch_dsl/faceted_search.py#L125
return column + " >= " + str(low) + " AND " + column + " < " + str(high)
|
7b0e9da8fa1ac9365e93ccd1137d519f08dadbed
| 28,000
|
def mysterious_func(nums):
"""Find the holes.
input: int, number
output: int, count of how many holes
ex: getNum(300) #-> returns 2
getNum(90783) #-> returns 4
getNum(123321) #-> returns 0
getNum(89282350306) #-> returns 8
getNum(3479283469) #-> returns 5
"""
holes = ['0', '6', '8', '9']
hole_count = 0
num_st = str(nums)
for num in num_st:
if num in holes:
if num == '8':
hole_count += 2
else:
hole_count += 1
return hole_count
|
c523a516eeab76f7192a5fef1f220ddce85320b3
| 28,002
|
def createNewLoghostConfig(deviceLoghostUndoConfig):
"""
returns the needed syntax to remove the non-compliant config
"""
deviceLoghostUndoConfig.insert(0, 'system-view')
deviceLoghostUndoConfig.append('info-center enable')
deviceLoghostUndoConfig.append('info-center loghost 172.25.32.78')
return deviceLoghostUndoConfig
|
4a0a023280fc1fc1754fe2a0f2b1a0d997abf9fc
| 28,004
|
def zzx_compose_term(f, k):
"""Map y -> x**k in a polynomial in Z[x]. """
if k <= 0:
raise ValueError("'k' must be positive, got %s" % k)
if k == 1 or not f:
return f
result = [f[0]]
for coeff in f[1:]:
result.extend([0]*(k-1))
result.append(coeff)
return result
|
0fa2bc791945d567fa653a2e92f772cde8b93914
| 28,006
|
def createStructuringElement(radius=1, neighborhood="8N"):
"""Create a structuring element function based on the neighborhood and the radius.
Args:
radius (integer): The radius of the structuring element excluding the center pixel.
neighborhood (string): 4N or 8N neighborhood definition around the center pixel.
Returns:
getStructuringElement (function): A function, which returns the neighborhood for a given center based on the configured radius and neighboorhood definition.
"""
def getStructuringElement(center):
"""Create a set of pixel coordinates for all neighbor elements.
Args:
center (number tuple): A pixel coordinate tuple of the center pixel of the structuring element.
Returns:
setImg (number tuple set): A set of the foreground pixel coordinate tuples that make up the neighboorhood for the given center.
"""
neighbors = set()
if neighborhood == "4N":
for x in range(center[0]-radius, center[0]+radius+1):
for y in range(center[1]-radius, center[1]+radius+1):
if abs(center[0] - x) + abs(center[1] - y) <= radius:
neighbors.add((x, y))
else:
for x in range(center[0]-radius, center[0]+radius+1):
for y in range(center[1]-radius, center[1]+radius+1):
neighbors.add((x, y))
return neighbors
# Use partial application of function arguments to dynamically calculate the neighborhood based on previous constraints.
return getStructuringElement
|
f99601729155fb6993a63a6317454d9359c4fd69
| 28,008
|
def get_real_coordinates(ratio, x1, y1, x2, y2):
"""
Method to transform the coordinates of the bounding box to its original size
"""
real_x1 = int(round(x1 // ratio))
real_y1 = int(round(y1 // ratio))
real_x2 = int(round(x2 // ratio))
real_y2 = int(round(y2 // ratio))
return (real_x1, real_y1, real_x2, real_y2)
|
a2876b1c3d91b14f63ea6dca7a4dec3f8a0b6842
| 28,009
|
def get_id_update(update: dict) -> int:
"""Функция для получения номера обновления.
Описание - получает номер обновления из полученного словаря
Parameters
----------
update : dict
словарь, который содержит текущий ответ от сервера телеграм
Returns
-------
update['update_id'] : int
номер текущего обновления
"""
return update['update_id']
|
68672ff86cda83a11d557ff25f1a206bd1e974b3
| 28,010
|
def earth_radius(units="m"):
"""Get earth radius in different units
:units: units
"""
if "m" == units:
return 6371000
elif "km" == units:
return 6371
elif "mi" == units:
return 3959
|
3afca64b55f14c6b964536451ee7db5093711072
| 28,012
|
def extract_some_key_val(dct, keys):
"""
Gets a sub-set of a :py:obj:`dict`.
:param dct: Source dictionary.
:type dct: :py:obj:`dict`
:param keys: List of subset keys, which to extract from ``dct``.
:type keys: :py:obj:`list` or any iterable.
:rtype: :py:obj:`dict`
"""
edct = {}
for k in keys:
v = dct.get(k, None)
if v is not None:
edct[k] = v
return edct
|
80dff136ada8cfd754e1a02423e7eef364223a48
| 28,013
|
def to_hex_string(i: int) -> str:
"""
Returns the given integer as an unsigned hex representation.
:param i: The integer.
:return: The hex-string.
"""
# Check for non-negative integers only
if i < 0:
raise ValueError(f"{to_hex_string.__qualname__} only takes non-negative integers, "
f"not {i}")
# Work out how many bytes need representing
# (plus 1 for safety)
num_bytes = i.bit_length() // 8 + 1
# Get the hex-string
hex_string = i.to_bytes(num_bytes, "big", signed=False).hex()
# Remove leading zeroes
while len(hex_string) > 1 and hex_string.startswith("0"):
hex_string = hex_string[1:]
return hex_string
|
f41567a0f949a3447de09d43e057556ed60a56ef
| 28,016
|
def ddiff_pf_contact(phi):
""" Double derivative of phase field contact. """
return -3.*phi/2.
|
53150d05e6c2b6399da503b87c6ff83f2585483b
| 28,018
|
def removeSpaces(string):
"""Returns a new string with spaces removed from the original string
>>> string = '1 173'
>>> removeSpaces(string)
'1173'
"""
return ''.join([char for char in string if char != ' '])
|
ce00687c43ce521c14b578105bd9412c31b9817a
| 28,019
|
from typing import Any
def do_nothing_collate(batch: Any) -> Any:
"""
Returns the batch as is (with out any collation
Args:
batch: input batch (typically a sequence, mapping or mixture of those).
Returns:
Any: the batch as given to this function
"""
return batch
|
45cd76fb2ab1e4ad11053041a70ae9eb9c1948ec
| 28,020
|
def digits():
"""
0: 1: 2: 3: 4:
aaaa .... aaaa aaaa ....
b c . c . c . c b c
b c . c . c . c b c
.... .... dddd dddd dddd
e f . f e . . f . f
e f . f e . . f . f
gggg .... gggg gggg ....
5: 6: 7: 8: 9:
aaaa aaaa aaaa aaaa aaaa
b . b . . c b c b c
b . b . . c b c b c
dddd dddd .... dddd dddd
. f e f . f e f . f
. f e f . f e f . f
gggg gggg .... gggg gggg
"""
return 0
|
2c784bf150d1435007f9f95bd36e2b4e769ba34b
| 28,022
|
def braking_index(p0=1.0, p1=1e-12, p2=1e-20):
"""
Accepts a spin period, pdot and pdotdot and returns the braking index, n.
"""
n = 2 - (p0 * p2) / p1**2
return n
|
30bc7d612047a3358991f3ac0730e8660c175e64
| 28,023
|
import re
def server_version_compare(v1, v2):
"""compare Demisto versions
Args:
v1 (string): string representing Demisto version (first comparable)
v2 (string): string representing Demisto version (second comparable)
Returns:
int.
0 for equal versions.
positive if v1 later version than v2.
negative if v2 later version than v1.
"""
v1 = re.sub('[\'\"]', '', v1)
v2 = re.sub('[\'\"]', '', v2)
if v1 == "" or v2 == "":
return 0
v1_nums = [int(d) for d in v1.split(".")]
v2_nums = [int(d) for d in v2.split(".")]
for i in range(min(len(v1_nums), len(v2_nums))):
if v1_nums[i] != v2_nums[i]:
return v1_nums[i] - v2_nums[i]
# versions are equal to the i th number
# versions are equal
return 0
|
12ad3c03bcef40eeb74d599aacedd195524acc7c
| 28,024
|
def minutes2milliseconds(minutes):
"""
Converts minutes to milliseconds.
:param minutes: duration in minutes as string
:return: duration in milliseconds as int
"""
if minutes:
return round(float(minutes) * 60 * 1000)
else:
return 0
|
fbf812340725ff841b93c270cefe3cead04664af
| 28,025
|
import os
import random
import string
def testfile_playbook_generator(testdir):
"""
Return an object with ``get()`` method to generate a playbook file which
creates a test file along with expected path and content.
This is usefull when one needs one or more playbooks with simple and easy
to check side effect.
"""
class PlaybookGenerator(object):
_id = 1
def get(self):
# create dummy temp file,
# so that we can check it's ``.dirname`` attribute later
dummy_file = testdir.makefile(".dummy", "")
# define file path for a test file which will be created
# by ansible-playbook run
test_file_path = os.path.join(
dummy_file.dirname,
"test_file.{0}".format(PlaybookGenerator._id))
# generate random content of the test file
test_file_content = "".join(
random.choice(string.ascii_letters) for _ in range(15))
# create ansbile playbook file(which would create file on
# test_file_path with test_file_content in it)
playbook = testdir.makefile(
".{0}.yml".format(PlaybookGenerator._id),
"---",
"- hosts: all",
" connection: local",
" tasks:",
" - name: Create test file",
" lineinfile:",
" dest={0}".format(test_file_path),
" create=yes",
" line={0}".format(test_file_content),
" state=present",
)
PlaybookGenerator._id += 1
return playbook, test_file_path, test_file_content
return PlaybookGenerator()
|
de7bfd9ffe6fe0fcac3fb889688fb5c9c65244c7
| 28,026
|
import re
def fpd_package_installed(ctx):
"""
:param ctx
:return: True or False
"""
active_packages = ctx.send("show install active summary")
match = re.search("fpd", active_packages)
if not match:
return False
else:
return True
|
5dcb095d6c39c11c714f147d1c7b350b387d06e8
| 28,027
|
import logging
def context_rewriter(function, rewrite=None, **kwargs):
"""Change arguments for the function.
Args:
function: callable for which to change arguments.
rewrite: dictionary with rewrite params in format new_key -> old_key
kwargs: the rest of the arguments (to be rewritten)
Returns:
result of function on rewritten arguments.
"""
assert callable(function), f"Function {function} must be callable"
if rewrite is None:
rewrite = {}
logging.warning("Context rewriter got no arguments")
kwargs_downstream = dict(kwargs)
for new_key, old_key in rewrite.items():
kwargs_downstream[new_key] = kwargs[old_key]
return function(**kwargs_downstream)
|
1b1c46fc6fcb61ed8f31642250574ba55336d31f
| 28,029
|
def _load_table_data(table_file):
"""Load additional data from a csv table file.
Args:
table_file: Path to the csv file.
Returns:
header: a list of headers in the table.
data: 2d array of data in the table.
"""
with open(table_file, encoding="utf-8") as f:
lines = f.readlines()
header = lines[0].strip().split(",")
data = [line.strip().split(",") for line in lines[1:]]
return header, data
|
c1f1ee84c2f04a613616897b897a01ee2364b98c
| 28,030
|
def tab_error():
"""Mixing tabs and spaces for indentation."""
try:
exec('if True:\n pass\n\tpass')
except TabError:
return "mixed tab and space"
|
f0cefd9435dcea54e5e4c2447817c2d68b3532ea
| 28,031
|
def backlog_color(backlog):
"""Return pyplot color for queue backlog."""
if backlog < 5:
return 'g'
if backlog > 24:
return 'r'
return 'y'
|
551413b28c9c9736ea19e63c740f9c28613784ee
| 28,032
|
import zlib
def gzip_entropy(s):
"""
Return the "GZIP" entropy of byte string `s`. This is the ratio of
compressed length to the original length. Because of overhead this
does not gives great results on short strings.
"""
if not s:
return 0
if isinstance(s, str):
s = s.encode('utf-8')
length = len(s)
if not length:
return 0
compressed = len(zlib.compress(s, 9))
return compressed / length
|
4642a79e85f3fd0adb117bc20811d7a325b14c5c
| 28,033
|
from datetime import date
def jqueryUIDates(datestr):
"""Preps dates for jqueryUI widgets"""
#03/02/2011
datestr = datestr.rstrip('?format=csv')
chunks = datestr.split('/')
month = int(chunks[0])
day = int(chunks[1])
year = int(chunks[2])
return date(year, month, day)
|
a924d99da1ec0687bfc97391248e80d2657c0aca
| 28,034
|
import requests
def get_url_content(url):
"""
返回url对应网页的内容,用于分析和提取有价值的内容
:param url: 网页地址
:return: url对应的网页html内容
"""
return requests.get(url).text
|
07f2e7ce8c365e601fd7ed4329f04e6ae56e214f
| 28,035
|
def reference_repr(self):
"""The string representation compatible for Reference fields."""
self.ensure_one()
return "{name},{id}".format(name=self._name, id=self.id)
|
6f8e1e848cd6c0f57250500dbbc565a313bcd1f2
| 28,037
|
def fexists(sftp, path):
"""os.path.exists for paramiko's SCP object
"""
try:
sftp.stat(path)
except IOError:
return False
else:
return True
|
3cff765bbc8cc3f5ed3a3165473961ebfc04ec94
| 28,038
|
import numpy
def makeSemBins(semArray, nBins):
"""
Makes the semantic bins.
A spot in semRows[i] and semCols[i] are the indices of words that
fall in the ith semantic bin
"""
# Split up the semantic space into equal segments
semBins = list(numpy.linspace(semArray.min(),semArray.max(),nBins+1))
# Creates boundaries for the segments
semBins = zip(*[semBins[i:]+semBins[-1:i] for i in range(2)])
semRows = []
semCols = []
for bin in semBins:
# For words within those boundaries, append the indices to
# semRows and semCols
(i,j) = ((semArray > bin[0]) & (semArray < bin[1])).nonzero()
semRows.append(i)
semCols.append(j)
return semRows, semCols
|
51adaf43816900ac2c5cc0db64f5fa0659c2792c
| 28,041
|
import random
def random_guesser_v1(passage):
"""Takes in a string and returns a dictionary with
6 keys for the binary values representing
features of the string, and another six keys
representing the scores of those features.
Note: for the scores, 0 represents no score,
while -1 represents '?'
Arguments
---------
passage: string to be converted to dictionary with feature scores.
"""
features_dict = {}
features = ["is_accountability", "is_unobjectivity",
"is_inaccuracy", "is_fact-basedness",
"is_influential", "is_opinionated"]
features_score = ["score_accountability", "score_unobjectivity",
"score_inaccuracy", "score_fact-basedness",
"score_influential", "score_opinionated"]
for i in range(len(features)):
features_dict[features[i]] = random.randrange(2)
features_dict[features_score[i]] = random.randrange(-1, 4)
return features_dict
|
b2759839fcdd59d36aa2bf6643750970affc77a1
| 28,042
|
import os
import sys
def this_path(this_file=None):
"""
Root of the operation.
Parameters
----------
this_file: str
Filename, default is this script location.
Returns
-------
str: path
"""
exec_dir = os.path.dirname(os.path.realpath(sys.argv[0] or 'whocares'))
return exec_dir if not this_file else os.path.join(exec_dir, this_file)
|
2eb816486175c304d46cf1b26e3d4d43b94fde48
| 28,043
|
def most_similar(train,
test,
distances):
"""
get the most similar program name
Parameters
----------
train: list
a list of string containing names of training programs
test: list
a list containing names of test programs
distances: matrix
matrix of distances where distances[i][j]
is the distance of train[i] to test[j]
Return
------
a list bench_list where bench_list[i] is the name of the
closest program from train of test[i]
"""
bench_list = {}#[None for i in range(len(test))]
for j in range(len(test)):
bench = train[0]
dist = distances[0][j]
for i in range(len(train)):
#print(train[i],test[j],distances[i][j])
if distances[i][j] < dist:
bench = train[i]
dist = distances[i][j]
bench_list[test[j]] = bench
return bench_list
|
324722574bbbdbda61e7e4bc65669c2ce9674630
| 28,045
|
def decimal_hours(timeobject, rise_or_set: str) -> float:
"""
Parameters
----------
timeobject : datetime object
Sunrise or -set time
rise_or_set: string
'sunrise' or 'sunset' specifiying which of the two timeobject is
Returns
-------
float
time of timeobject in decimal hours
"""
assert rise_or_set == "sunrise" or rise_or_set == "sunset"
if timeobject:
ret = timeobject.hour + timeobject.minute / 60
if ret == 0:
return 0.0
else:
return ret
elif rise_or_set == "sunrise":
return 0.0
else:
return 23.999
|
44fe260abf8751cb78cf6e484dbf223d05233713
| 28,046
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.