content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def get_rectangle(roi):
"""
Get the rectangle that has changing colors in the roi.
Returns boolean success value and the four rectangle points in the image
"""
gaussian = cv2.GaussianBlur(roi, (9, 9), 10.0)
roi = cv2.addWeighted(roi, 1.5, gaussian, -0.5, 0, roi)
nh, nw, r = roi.shape
# cluster
Z = roi.reshape((-1, 3))
Z = np.float32(Z)
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
K = 7
ret, label, centers = cv2.kmeans(Z, K, criteria, 10, 0)
centers = np.uint8(centers)
image_as_centers = centers[label.flatten()]
image_as_centers = image_as_centers.reshape((roi.shape))
labels = label.reshape((roi.shape[:2]))
possible_clusters = list(np.arange(K))
whiteness = map(lambda x: npl.norm(x - np.array([255, 255, 255])), centers)
whitest = np.argmin(whiteness)
possible_clusters.remove(whitest)
energys = []
correct_masks = []
for num, p in enumerate(possible_clusters):
mask_clusters = ma.masked_equal(labels, p)
draw_mask = mask_clusters.mask.astype(np.uint8)
draw_mask *= 255
labeled_array, num_features = mes.label(draw_mask)
count = np.bincount(labeled_array.flatten())
count = count[1:]
val = np.argmax(count)
mask_obj = ma.masked_equal(labeled_array, val + 1)
draw_mask = mask_obj.mask.astype(np.uint8)
draw_mask *= 255
# cv2.imshow(str(num), draw_mask)
# cv2.waitKey(0)
top = np.count_nonzero(draw_mask)
valz = np.fliplr(np.transpose(draw_mask.nonzero()))
rect = cv2.minAreaRect(valz)
box = cv2.cv.BoxPoints(rect)
box = np.int0(box)
rect_mask = np.zeros((nh, nw))
cv2.drawContours(rect_mask, [box], 0, 255, -1)
bottom = np.count_nonzero(rect_mask)
l, w, vcost = _get_lw(box)
if w < .001:
print 'WIDTH TOO SMALL'
continue
valz = np.fliplr(np.transpose(draw_mask.nonzero()))
area = cv2.contourArea(box)
area /= (nh * nw)
if vcost > .5:
print "VCOST TOO HIGH"
continue
if area < .03:
print area
print "TOOOO SMALL"
continue
if top / bottom < .7:
print "TOO SPARSE", top / bottom
continue
energy = area + 1.5 * top / bottom - abs(2.5 - l / w) - .2 * vcost
if energy < 0:
"LOW ENERGY!"
continue
print num, "area: ", area, "filled:", top, "total:", bottom, 'rat', top / bottom, "l/w", abs(2.5 - l / w), "vcost",
vcost, "energy", energy
energys.append(energy)
correct_masks.append(mask_obj)
if len(energys) == 0:
print "EVERY ENERGY WRONG"
return False, None
correct_masks = [x for y, x in sorted(zip(energys, correct_masks), reverse=True)]
energys = sorted(energys, reverse=True)
if len(energys) > 1 and abs(energys[0] - energys[1]) < .2:
print "TOO CLOSE TO CALLS"
return False, None
correct_mask = correct_masks[0]
colors = roi[correct_mask.mask]
draw_mask = correct_mask.mask.astype(np.uint8)
draw_mask *= 255
return True, colors | 27,800 |
async def challenge_process_fixture() -> Challenge:
"""
Populate challenge with:
- Default user
- Is open
- Challenge in process
"""
return await populate_challenge() | 27,801 |
def _read_pdg_masswidth(filename):
"""Read the PDG mass and width table and return a dictionary.
Parameters
----------
filname : string
Path to the PDG data file, e.g. 'data/pdg/mass_width_2015.mcd'
Returns
-------
particles : dict
A dictionary where the keys are the particle names with the charge
appended in case of a multiplet with different masses, e.g. 't'
for the top quark, 'K+' and 'K0' for kaons.
The value of the dictionary is again a dictionary with the following
keys:
- 'id': PDG particle ID
- 'mass': list with the mass, postitive and negative error in GeV
- 'width': list with the width, postitive and negative error in GeV
- 'name': same as the key
"""
data = pkgutil.get_data('flavio.physics', filename)
lines = data.decode('utf-8').splitlines()
particles_by_name = {}
for line in lines:
if line.strip()[0] == '*':
continue
mass = ((line[33:51]), (line[52:60]), (line[61:69]))
if mass[0].replace(' ', '') == '':
# if mass is empty, go to next line
# (necessasry for 2019 neutrino entries)
continue
mass = [float(m) for m in mass]
width = ((line[70:88]), (line[89:97]), (line[98:106]))
if width[0].strip() == '':
width = (0,0,0)
else:
width = [float(w) for w in width]
ids = line[0:32].split()
charges = line[107:128].split()[1].split(',')
if len(ids) != len(charges):
raise ValueError()
for i, id_ in enumerate(ids):
particle = {}
particle_charge = charges[i].strip()
particle[particle_charge] = {}
particle[particle_charge]['id'] = id_.strip()
particle[particle_charge]['mass'] = mass
particle[particle_charge]['charge'] = particle_charge
particle[particle_charge]['width'] = width
particle_name = line[107:128].split()[0]
particle[particle_charge]['name'] = particle_name
if particle_name in particles_by_name.keys():
particles_by_name[particle_name].update(particle)
else:
particles_by_name[particle_name] = particle
result = { k + kk: vv for k, v in particles_by_name.items() for kk, vv in v.items() if len(v) > 1}
result.update({ k: list(v.values())[0] for k, v in particles_by_name.items() if len(v) == 1})
return result | 27,802 |
def fps_and_pred(model, batch, **kwargs):
"""
Get fingeprints and predictions from the model.
Args:
model (nff.nn.models): original NFF model loaded
batch (dict): batch of data
Returns:
results (dict): model predictions and its predicted
fingerprints, conformer weights, etc.
"""
model.eval()
# make the fingerprints
outputs, xyz = model.make_embeddings(batch, xyz=None, **kwargs)
# pool to get the learned weights and pooled fingerprints
pooled_fp, learned_weights = model.pool(outputs)
# get the final results
results = model.readout(pooled_fp)
# add sigmoid if it's a classifier and not in training mode
if model.classifier:
keys = list(model.readout.readout.keys())
for key in keys:
results[key] = torch.sigmoid(results[key])
# add any required gradients
results = model.add_grad(batch=batch, results=results, xyz=xyz)
# put into a dictionary
conf_fps = [i.cpu().detach() for i in outputs["conf_fps_by_smiles"]]
energy = batch.get("energy")
boltz_weights = batch.get("weights")
# with operations to de-batch
n_confs = [(n // m).item()
for n, m in zip(batch['num_atoms'], batch['mol_size'])]
for key, val in results.items():
results[key] = [i for i in val]
results.update({"fp": [i for i in pooled_fp],
"conf_fps": conf_fps,
"learned_weights": learned_weights,
"boltz_weights": (list(torch.split
(boltz_weights, n_confs)))})
if energy is not None:
results.update({"energy": list(torch.split(energy, n_confs))})
return results | 27,803 |
def preprocess_text(text):
"""
Should return a list of words
"""
text = contract_words(text)
text = text.lower()
# text = text.replace('"', "").replace(",", "").replace("'", "")
text = text.replace('"', "").replace(",", "").replace("'", "").replace(".", " .") ## added by PAVAN
## To capture multiple # feature -- added by PAVAN
if re.search(r'[a-z]+\#', text):
tmp_ls = text.split()
text = ' '.join(
[re.sub(pattern=r'\#', repl=' #', string=str(i)) if re.search(r'[a-z]+\#', str(i)) else i for i in tmp_ls])
## To capture # feature -- added by PAVAN
if re.search(r'\#[a-z]+', text):
tmp_ls = text.split()
text = ' '.join(
[re.sub(pattern=r'\#', repl='hashtagfea ', string=str(i)) if re.search(r'\#[a-z]+', str(i)) else i for i in
tmp_ls])
return text.split() | 27,804 |
def set_backend(name, unsafe=False):
"""
Set a specific backend by name
Parameters
----------
name : str.
unsafe : optional: bool. Default: False.
If False, does not switch backend if current backend is not unified backend.
"""
if (backend.is_unified() == False and unsafe == False):
raise RuntimeError("Can not change backend to %s after loading %s" % (name, backend.name()))
if (backend.is_unified()):
safe_call(backend.get().af_set_backend(backend.get_id(name)))
else:
backend.set_unsafe(name)
return | 27,805 |
def unicodeToAscii(s):
"""unicodeToAscii
Turn a Unicode string to plain ASCII, thanks to https://stackoverflow.com/a/518232/2809427
For example, 'Ślusàrski' -> 'Slusarski'
"""
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
and c in all_letters
) | 27,806 |
def update_ip_table(nclicks, value):
"""
Function that updates the IP table in the Elasticsearch Database that
contains the frequency as well as the IP address of the machine querying
that particular domain.
Args:
nclicks: Contains the number of clicks registered by the submit button.
value: Contains the domain name corresponding to which the IP table has
to be returned.
Returns:
The IP address data regarding the number of times a particular domain
was queried by a particular machine.
"""
if value is None or value == '':
return []
else:
try:
count = es.get(index=value, id=1)['_source']['count']
domain_names = [key for (key, value) in sorted(count.items(),
key=lambda x: x[1],
reverse=True)]
data = [dict({'sl_no': j + 1, 'ip': i, 'count': count[i]})
for i, j in zip(domain_names, range(len(count)))]
except:
data = []
return data | 27,807 |
def collect_subclasses(mod, cls, exclude=None):
"""Collecting all subclasses of `cls` in the module `mod`
@param mod: `ModuleType` The module to collect from.
@param cls: `type` or (`list` of `type`) The parent class(es).
@keyword exclude: (`list` of `type`) Classes to not include.
"""
out = []
for name in dir(mod):
attr = getattr(mod, name)
if (
isinstance(attr, type) and
(attr not in cls if isinstance(cls, (list, tuple)) else attr != cls) and
issubclass(attr, cls) and
(attr not in exclude if exclude else True)):
out.append(attr)
return out | 27,808 |
def download_file(
sample_id,
file_type,
destination,
host,
email,
password,
api_key,
no_progress,
): # noqa: D413,D301,D412 # pylint: disable=C0301
"""Download sample file metadata.
SAMPLE_ID specific sample for which to download the results
FILE_TYPE specific deliverable to download results for
DESTINATION path/to/file
Examples:
Download sample:
gencove samples download-file e6b45af7-07c5-4a6d-9f97-6e1efbf3e215 ancestry-json ancestry.json
Download and print to stdout then compress using gzip:
gencove samples download-file e6b45af7-07c5-4a6d-9f97-6e1efbf3e215 ancestry-json - | gzip > ancestry.json.gz
\f
Args:
sample_id (str): specific sample for which
to download the results.
file_type (str): specific deliverable to download
results for.
destination (str): path/to/file.
no_progress (bool, optional, default False): do not show progress
bar.
""" # noqa: E501
if destination in ("-", "/dev/stdout"):
DownloadFile(
sample_id,
file_type,
sys.stdout.buffer,
Credentials(email=email, password=password, api_key=api_key),
Optionals(host=host),
no_progress,
).run()
else:
try:
with open(destination, "wb") as destination_file:
DownloadFile(
sample_id,
file_type,
destination_file,
Credentials(
email=email, password=password, api_key=api_key
),
Optionals(host=host),
no_progress,
).run()
except IsADirectoryError:
echo_error(
"Please specify a file path (not directory path)"
" for DESTINATION"
)
raise click.Abort() | 27,809 |
def check_collections_equivalent(a: typing.Collection, b: typing.Collection,
allow_duplicates: bool = False,
element_converter: typing.Callable = identity) -> typing.Tuple[str, list]:
"""
:param a: one collection to compare
:param b: other collection to compare
:param allow_duplicates: allow collections to contain multiple elements
:param element_converter: optional function to convert elements of collections to a different value
for comparison
:return: (message, differences)
"""
a = Counter(map(element_converter, a))
b = Counter(map(element_converter, b))
if not allow_duplicates:
duplicates = []
for name, counts in [['a', a], ['b', b]]:
for key, count in counts.items():
if count > 1:
duplicates.append([name, key, count])
if duplicates:
return 'Duplicate elements ', ['|'.join(map(str, dup)) for dup in duplicates]
diffs = []
for el in a | b:
ac = a.get(el, 0)
bc = b.get(el, 0)
if ac != bc:
'Inconsistent element frequencies', diffs.append(f'{el} a={ac} b={bc}')
if diffs:
return "Inconsistent element frequencies: ", diffs
return 'Collections equivalent', [] | 27,810 |
def voting_classifier(*args, **kwargs):
"""
same as in gradient_boosting_from_scratch()
"""
return VotingClassifier(*args, **kwargs) | 27,811 |
def download_vctk(destination, tmp_dir=None, device="cpu"):
"""Download dataset and perform resample to 16000 Hz.
Arguments
---------
destination : str
Place to put final zipped dataset.
tmp_dir : str
Location to store temporary files. Will use `tempfile` if not provided.
device : str
Passed directly to pytorch's ``.to()`` method. Used for resampling.
"""
dataset_name = "noisy-vctk-16k"
if tmp_dir is None:
tmp_dir = tempfile.gettempdir()
final_dir = os.path.join(tmp_dir, dataset_name)
if not os.path.isdir(tmp_dir):
os.mkdir(tmp_dir)
if not os.path.isdir(final_dir):
os.mkdir(final_dir)
prefix = "https://datashare.is.ed.ac.uk/bitstream/handle/10283/2791/"
noisy_vctk_urls = [
prefix + "clean_testset_wav.zip",
prefix + "noisy_testset_wav.zip",
prefix + "testset_txt.zip",
prefix + "clean_trainset_28spk_wav.zip",
prefix + "noisy_trainset_28spk_wav.zip",
prefix + "trainset_28spk_txt.zip",
]
zip_files = []
for url in noisy_vctk_urls:
filename = os.path.join(tmp_dir, url.split("/")[-1])
zip_files.append(filename)
if not os.path.isfile(filename):
logger.info("Downloading " + url)
with urllib.request.urlopen(url) as response:
with open(filename, "wb") as tmp_file:
logger.info("... to " + tmp_file.name)
shutil.copyfileobj(response, tmp_file)
# Unzip
for zip_file in zip_files:
logger.info("Unzipping " + zip_file)
shutil.unpack_archive(zip_file, tmp_dir, "zip")
# os.remove(zip_file)
# Move transcripts to final dir
shutil.move(os.path.join(tmp_dir, "testset_txt"), final_dir)
shutil.move(os.path.join(tmp_dir, "trainset_28spk_txt"), final_dir)
# Downsample
dirs = [
"noisy_testset_wav",
"clean_testset_wav",
"noisy_trainset_28spk_wav",
"clean_trainset_28spk_wav",
]
downsampler = Resample(orig_freq=48000, new_freq=16000)
for directory in dirs:
logger.info("Resampling " + directory)
dirname = os.path.join(tmp_dir, directory)
# Make directory to store downsampled files
dirname_16k = os.path.join(final_dir, directory + "_16k")
if not os.path.isdir(dirname_16k):
os.mkdir(dirname_16k)
# Load files and downsample
for filename in get_all_files(dirname, match_and=[".wav"]):
signal, rate = torchaudio.load(filename)
downsampled_signal = downsampler(signal.view(1, -1).to(device))
# Save downsampled file
torchaudio.save(
os.path.join(dirname_16k, filename[-12:]),
downsampled_signal[0].cpu(),
sample_rate=16000,
channels_first=False,
)
# Remove old file
os.remove(filename)
# Remove old directory
os.rmdir(dirname)
# logger.info("Zipping " + final_dir)
# final_zip = shutil.make_archive(
# base_name=final_dir,
# format="zip",
# root_dir=os.path.dirname(final_dir),
# base_dir=os.path.basename(final_dir),
# )
# logger.info(f"Moving {final_zip} to {destination}")
# shutil.move(final_zip, os.path.join(destination, dataset_name + ".zip"))
| 27,812 |
def assert_allclose(actual: float, desired: int):
"""
usage.scipy: 5
usage.sklearn: 1
usage.statsmodels: 1
"""
... | 27,813 |
def replace_subject_with_object(sent, sub, obj):
"""Replace the subject with object and remove the original subject"""
sent = re.sub(r'{}'.format(obj), r'', sent, re.IGNORECASE)
sent = re.sub(r'{}'.format(sub), r'{} '.format(obj), sent, re.IGNORECASE)
return re.sub(r'{\s{2,}', r' ', sent, re.IGNORECASE) | 27,814 |
def estimate_using_user_recent(list_type: str, username: str) -> int:
"""
Estimate the page number of a missing (entry which was just approved) entry
and choose the max page number
this requests a recent user's list, and uses checks if there are any
ids in that list which arent in the approved cache
"""
assert list_type in {"anime", "manga"}
logger.info(f"Estimating {list_type}list using {username}")
appr = approved_ids()
recently_updated_ids = user_recently_updated(
list_type=list_type, username=username, offset=0
)
ids = appr.anime if list_type == "anime" else appr.manga
sorted_approved = list(sorted(ids, reverse=True))
missing_approved = []
for aid in recently_updated_ids:
if aid not in ids:
missing_approved.append(aid)
estimate_pages = [_estimate_page(aid, sorted_approved) for aid in missing_approved]
max_page: int
if len(estimate_pages) == 0:
max_page = 0
else:
max_page = max(estimate_pages) + 1
logger.info(f"Estimated {max_page} {list_type} pages for {username}")
return max_page | 27,815 |
def git_acquire_lock(lock_path, log_file=None):
"""
>>> import os
>>> lock_test = '/tmp/lock-test'
>>> git_acquire_lock(lock_test)
>>> os.path.exists(lock_test)
True
>>> os.rmdir(lock_test)
"""
locked = False
attempt = 1
while not locked:
try:
# 600 attempts * 5s = 50min of waits max
if attempt > 600:
msg = 'Cannot lock: "{}" after {} attempts'.format(lock_path, attempt)
if log_file:
log(msg, log_file)
raise Exception(msg)
os.makedirs(lock_path)
if log_file:
log('Locking git mirror local directory with %s' % lock_path, log_file)
locked = True
except OSError as err:
if err.errno == os.errno.EEXIST:
# If the lock directory exists, wait until it disappears before trying to update the mirror
attempt += 1
if log_file:
log('The git mirror is locked by another process, waiting 5s... (attempt %s/600)' % attempt, log_file)
time.sleep(5)
else:
if log_file:
log('Cannot lock: %s' % err, log_file)
raise | 27,816 |
def codegen_reload_data():
"""Parameters to codegen used to generate the fn_html2pdf package"""
reload_params = {"package": u"fn_html2pdf",
"incident_fields": [],
"action_fields": [],
"function_params": [u"html2pdf_data", u"html2pdf_data_type", u"html2pdf_stylesheet"],
"datatables": [],
"message_destinations": [u"fn_html2pdf"],
"functions": [u"fn_html2pdf"],
"phases": [],
"automatic_tasks": [],
"scripts": [],
"workflows": [u"example_html2pdf"],
"actions": [u"Example: HTML2PDF"]
}
return reload_params | 27,817 |
def set_default_parameter(self, parameter_name, parameter_value):
"""
Sets a parameters to be used as default (template) in the
handling of a request.
:type parameter_name: String
:param parameter_name: The name of the parameter to be set.
:type parameter_value: Object
:param parameter_value: The value of the parameter to be set
"""
self.default_parameters[parameter_name] = parameter_value | 27,818 |
def instrument_keywords(instrument, caom=False):
"""Get the keywords for a given instrument service
Parameters
----------
instrument: str
The instrument name, i.e. one of ['niriss','nircam','nirspec',
'miri','fgs']
caom: bool
Query CAOM service
Returns
-------
pd.DataFrame
A DataFrame of the keywords
"""
# Retrieve one dataset to get header keywords
sample = instrument_inventory(instrument, return_data=True, caom=caom,
add_requests={'pagesize': 1, 'page': 1})
data = [[i['name'], i['type']] for i in sample['fields']]
keywords = pd.DataFrame(data, columns=('keyword', 'dtype'))
return keywords | 27,819 |
def header_info(data_type, payload):
"""Report additional non-payload in network binary data.
These can be status, time, grapic or control structures"""
# Structures are defined in db_access.h.
if payload == None:
return ""
from struct import unpack
data_type = type_name(data_type)
if data_type.startswith("STS_"):
status, severity = unpack(">HH", payload[0:4])
# Expecting status = 0 (normal), severity = 1 (success)
return "{status:%d,severity:%d}" % (status, severity)
elif data_type.startswith("TIME_"):
status, severity = unpack(">HH", payload[0:4])
# The time stamp is represented as two uint32 values. The first is the
# number of seconds passed since 1 Jan 1990 00:00 GMT. The second is the
# number of nanoseconds within the second.
seconds, nanoseconds = unpack(">II", payload[4:12])
from time import mktime, strftime, gmtime
offset = mktime((1990, 1, 1, 0, 0, 0, 0, 0, 0)) - mktime(
(1970, 1, 1, 0, 0, 0, 0, 0, 0)
)
t = seconds + nanoseconds * 1e-9 + offset
timestamp = strftime("%Y-%m-%d %H:%M:%S GMT", gmtime(t))
return "{status:%d,severity:%d, timestamp:%s}" % (status, severity, timestamp)
elif data_type.startswith("GR_"):
status, severity = unpack(">HH", payload[0:4])
info = "status:%d,severity:%d, " % (status, severity)
if data_type.endswith("STRING"):
pass
elif data_type.endswith("SHORT"):
unit = payload[8:16].rstrip(b"\0")
limits = unpack("6h", payload[16 : 16 + 6 * 2])
info += "unit=%r,limits=%r" % (unit, limits)
elif data_type.endswith("FLOAT"):
precision, = unpack(">h", payload[4:6])
unit = payload[8:16].rstrip(b"\0")
limits = unpack(">6f", payload[16 : 16 + 6 * 4])
info += "precision=%r,unit=%r,limits=%r" % (precision, unit, limits)
elif data_type.endswith("ENUM"):
nstrings, = unpack(">h", payload[4:6])
strings = payload[6 : 6 + 16 * 26]
info += "nstrings=%r" % nstrings
elif data_type.endswith("CHAR"):
unit = payload[8:16].rstrip(b"\0")
limits = unpack("6b", payload[16 : 16 + 6 * 1])
info += "unit=%r,limits=%r" % (unit, limits)
elif data_type.endswith("LONG"):
unit = payload[8:16].rstrip(b"\0")
limits = unpack("6i", payload[16 : 16 + 6 * 4])
info += "unit=%r,limits=%r" % (unit, limits)
elif data_type.endswith("DOUBLE"):
precision, = unpack(">h", payload[4:6])
unit = payload[8:16].rstrip(b"\0")
limits = unpack(">6d", payload[16 : 16 + 6 * 8])
info += "precision=%r,unit=%r,limits=%r" % (precision, unit, limits)
else:
info += "?"
info = info.restrip(", ")
return "{" + info + "}"
elif data_type.startswith("CTRL_"):
status, severity = unpack(">HH", payload[0:4])
info = "status:%d,severity:%d, " % (status, severity)
if data_type.endswith("STRING"):
pass
elif data_type.endswith("SHORT"):
unit = payload[8:16].rstrip(b"\0")
limits = unpack("8h", payload[16 : 16 + 8 * 2])
info += "unit=%r,limits=%r" % (unit, limits)
elif data_type.endswith("FLOAT"):
precision, = unpack(">h", payload[4:6])
unit = payload[8:16].rstrip(b"\0")
limits = unpack(">8f", payload[16 : 16 + 8 * 4])
info += "precision=%r,unit=%r,limits=%r" % (precision, unit, limits)
elif data_type.endswith("ENUM"):
nstrings, = unpack(">h", payload[4:6])
strings = payload[6 : 6 + 16 * 26]
info += "nstrings=%r" % nstrings
elif data_type.endswith("CHAR"):
unit = payload[8:16].rstrip(b"\0")
limits = unpack("8b", payload[16 : 16 + 8 * 1])
info += "unit=%r,limits=%r" % (unit, limits)
elif data_type.endswith("LONG"):
unit = payload[8:16].rstrip(b"\0")
limits = unpack("8i", payload[16 : 16 + 8 * 4])
info += "unit=%r,limits=%r" % (unit, limits)
elif data_type.endswith("DOUBLE"):
precision, = unpack(">h", payload[4:6])
unit = payload[8:16].rstrip(b"\0")
limits = unpack(">8d", payload[16 : 16 + 8 * 8])
info += "precision=%r,unit=%r,limits=%r" % (precision, unit, limits)
else:
info += "?"
info = info.rstrip(", ")
return "{" + info + "}"
return "" | 27,820 |
def dsa_verify(message, public, signature, constants=None):
"""Checks if the signature (r, s) is correct"""
r, s = signature
p, q, g = get_dsa_constants(constants)
if r <= 0 or r >= q or s <= 0 or s >= q:
return False
w = inverse_mod(s, q)
u1 = (bytes_to_num(sha1_hash(message)) * w) % q
u2 = (r * w) % q
v = ((pow(g, u1, p) * pow(public, u2, p)) % p) % q
return v == r | 27,821 |
def set_up_cgi():
"""
Return a configured instance of the CGI simulator on RST.
Sets up the Lyot stop and filter from the configfile, turns off science instrument (SI) internal WFE, and reads
the FPM setting from the configfile.
:return: CGI instrument instance
"""
webbpsf.setup_logging('ERROR')
#Set actuators numbesr
mode_in = CONFIG_PASTIS.get('RST', 'mode')
nbactuator = int(CONFIG_PASTIS.get('RST', 'nb_subapertures'))
nbactuator_in = int(np.sqrt(nbactuator))
if nbactuator_in**2 != nbactuator:
error_msg = f"The number of subapertures from config_pastis.ini is {nbactuator}, which is not the square of the actuators per row (={nbactuator_in})!"
log.error(error_msg)
raise ValueError(error_msg)
cgi = webbpsf.roman.CGI(mode=mode_in, nbactuator=int(nbactuator_in))
cgi.include_si_wfe = False
cgi.apodizer = CONFIG_PASTIS.get('RST', 'apodizer')
cgi.fpm = CONFIG_PASTIS.get('RST', 'fpm')
cgi.lyotstop = CONFIG_PASTIS.get('RST', 'lyotstop')
cgi.camera = CONFIG_PASTIS.get('RST', 'camera')
cgi.filter = CONFIG_PASTIS.get('RST', 'filter_name')
return cgi | 27,822 |
def plot_line(
timstof_data, # alphatims.bruker.TimsTOF object
selected_indices: np.ndarray,
x_axis_label: str,
colorscale_qualitative: str,
title: str = "",
y_axis_label: str = "intensity",
remove_zeros: bool = False,
trim: bool = True,
height: int = 400
) -> go.Figure:
"""Plot an XIC, mobilogram or spectrum as a lineplot.
Parameters
----------
timstof_data : alphatims.bruker.TimsTOF object
An alphatims.bruker.TimsTOF data object.
selected_indices : np.ndarray
The raw indices that are selected for this plot. These are typically obtained by slicing the TimsTOF data object with e.g. data[..., "raw"].
x_axis_label : str
The label of the x-axis. Options are:
- mz
- rt
- mobility
y_axis_label : str
Should not be set for a 1D line plot. Default is "intensity".
title : str
The title of the plot. Default is "".
remove_zeros : bool
If True, zeros are removed. Note that a line plot connects consecutive points, which can lead to misleading plots if non-zeros are removed. If False, use the full range of the appropriate dimension of the timstof_data. Default is False.
trim : bool
If True, zeros on the left and right are trimmed. Default is True.
height : int
Plot height. Default is 400.
Returns
-------
plotly.graph_objects.Figure object
A lne plot showing an XIC, mobilogram or spectrum.
"""
axis_dict = {
"mz": "m/z, Th",
"rt": "RT, min",
"mobility": "Inversed IM, V·s·cm\u207B\u00B2",
"intensity": "Intensity",
}
x_axis_label = axis_dict[x_axis_label]
y_axis_label = axis_dict[y_axis_label]
labels = {
'm/z, Th': "mz_values",
'RT, min': "rt_values",
'Inversed IM, V·s·cm\u207B\u00B2': "mobility_values",
}
x_dimension = labels[x_axis_label]
intensities = timstof_data.bin_intensities(selected_indices, [x_dimension])
if x_dimension == "mz_values":
x_ticks = timstof_data.mz_values
plot_title = "Spectrum"
elif x_dimension == "mobility_values":
x_ticks = timstof_data.mobility_values
plot_title = "Mobilogram"
elif x_dimension == "rt_values":
x_ticks = timstof_data.rt_values / 60
plot_title = "XIC"
non_zeros = np.flatnonzero(intensities)
if len(non_zeros) == 0:
x_ticks = np.empty(0, dtype=x_ticks.dtype)
intensities = np.empty(0, dtype=intensities.dtype)
else:
if remove_zeros:
x_ticks = x_ticks[non_zeros]
intensities = intensities[non_zeros]
elif trim:
start = max(0, non_zeros[0] - 1)
end = non_zeros[-1] + 2
x_ticks = x_ticks[start: end]
intensities = intensities[start: end]
fig = go.Figure()
fig.add_trace(
go.Scatter(
x=x_ticks,
y=intensities,
mode='lines',
text=[f'{x_axis_label}'.format(i + 1) for i in range(len(x_ticks))],
hovertemplate='<b>%{text}:</b> %{x};<br><b>Intensity:</b> %{y}.',
name=" ",
marker=dict(color=getattr(px.colors.qualitative, colorscale_qualitative)[0])
)
)
fig.update_layout(
title=dict(
text=plot_title,
font=dict(
size=16,
),
x=0.5,
xanchor='center',
yanchor='top'
),
xaxis=dict(
title=x_axis_label,
titlefont_size=14,
tickmode='auto',
tickfont_size=14,
),
yaxis=dict(
title=y_axis_label,
),
template="plotly_white",
height=height,
hovermode="x"
)
return fig | 27,823 |
def _ascii_encode(data: str, errors: str, index: int, out: bytearray):
"""Tries to encode `data`, starting from `index`, into the `out` bytearray.
If it encounters any codepoints above 127, it tries using the `errors`
error handler to fix it internally, but returns the a tuple of the first
and last index of the error on failure.
If it finishes encoding, it returns a tuple of the final bytes and length.
"""
_builtin() | 27,824 |
def all_gather(data):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors)
Args:
data: any picklable object
Returns:
list[data]: list of data gathered from each rank
"""
world_size = get_world_size()
if world_size == 1:
return [data]
data_list = [None] * world_size
dist.all_gather_object(data_list, data)
return data_list | 27,825 |
def get_title_count(titles, is_folder):
""" Gets the final title count """
final_title_count = 0
if len(titles.all) == 0:
if is_folder == False:
sys.exit()
else:
return 0
else:
for group, disc_titles in titles.all.items():
for title in disc_titles:
final_title_count += 1
return final_title_count | 27,826 |
def draw_graph(
adata,
layout=None,
color=None,
alpha=None,
groups=None,
components=None,
legend_loc='right margin',
legend_fontsize=None,
legend_fontweight=None,
color_map=None,
palette=None,
right_margin=None,
size=None,
title=None,
show=None,
save=None,
ax=None):
"""Scatter plot in graph-drawing basis.
Parameters
----------
adata : AnnData
Annotated data matrix.
layout : {'fr', 'drl', ...}, optional (default: last computed)
One of the `draw_graph` layouts, see sc.tl.draw_graph. By default,
the last computed layout is taken.
color : string or list of strings, optional (default: None)
Keys for sample/cell annotation either as list `["ann1", "ann2"]` or
string `"ann1,ann2,..."`.
groups : str, optional (default: all groups)
Restrict to a few categories in categorical sample annotation.
components : str or list of str, optional (default: '1,2')
String of the form '1,2' or ['1,2', '2,3'].
legend_loc : str, optional (default: 'right margin')
Location of legend, either 'on data', 'right margin' or valid keywords
for matplotlib.legend.
legend_fontsize : int (default: None)
Legend font size.
color_map : str (default: `matplotlib.rcParams['image.cmap']`)
String denoting matplotlib color map.
palette : list of str (default: None)
Colors to use for plotting groups (categorical annotation).
right_margin : float or list of floats (default: None)
Adjust the width of the space right of each plotting panel.
size : float (default: None)
Point size.
title : str, optional (default: None)
Provide title for panels either as `["title1", "title2", ...]` or
`"title1,title2,..."`.
show : bool, optional (default: None)
Show the plot.
save : bool or str, optional (default: None)
If True or a str, save the figure. A string is appended to the
default filename.
ax : matplotlib.Axes
A matplotlib axes object.
Returns
-------
matplotlib.Axes object
"""
from ..utils import check_adata
adata = check_adata(adata)
if layout is None: layout = adata.add['draw_graph_layout'][-1]
if 'X_draw_graph_' + layout not in adata.smp_keys():
raise ValueError('Did not find {} in adata.smp. Did you compute layout {}?'
.format('draw_graph_' + layout, layout))
axs = scatter(
adata,
basis='draw_graph_' + layout,
color=color,
alpha=alpha,
groups=groups,
components=components,
projection='2d',
legend_loc=legend_loc,
legend_fontsize=legend_fontsize,
legend_fontweight=legend_fontweight,
color_map=color_map,
palette=palette,
right_margin=right_margin,
size=size,
title=title,
show=show,
save=save,
ax=ax)
return axs | 27,827 |
def post_token():
"""
Receives authentication credentials in order to generate an access
token to be used to access protected models. Tokens generated
by this endpoint are JWT Tokens.
"""
# First we verify the request is an actual json request. If not, then we
# responded with a HTTP 400 Bad Request result code.
if not request.is_json:
app.logger.warning('Request without JSON payload received on token endpoint')
return jsonify({"msg": "Only JSON request is supported"}), 400
# Read credentials from json request
params = request.get_json()
# Try to ready username and password properties. If one of them is not found,
# then we generate an error and stop execution.
username = params.get('username', None)
password = params.get('password', None)
if not username:
app.logger.warning('Request without username parameter received on token endpoint')
return jsonify({"msg": "A username parameter must be provided"}), 400
if not password:
app.logger.warning('Request without password parameter received on token endpoint')
return jsonify({"msg": "A password parameter must be provided"}), 400
# If we get here, is because a username and password credentials were
# provided, so now we must verify them.
user = get_user_by_username(username)
if user is not None:
if user.authenticate(password):
# ACCESS TOKEN
access_token_expires = app.config['JWT_ACCESS_TOKEN_VALIDITY_HOURS']
access_token = create_access_token(identity=user.user_id, expires_delta=access_token_expires)
# REFRESH TOKEN
refresh_token_expires = app.config['JWT_REFRESH_TOKEN_VALIDITY_DAYS']
refresh_token = create_refresh_token(identity=user.user_id, expires_delta=refresh_token_expires)
app.logger.info('A new token has been generated for user [' + user.user_id + "]")
return jsonify({
'access_token': access_token,
'expiration': access_token_expires.total_seconds(),
'refresh_token': refresh_token
}), 200
else:
app.logger.warning('Request with invalid username was received')
return jsonify({"msg": "Unable to find user with [" + username + "] username"}), 404 | 27,828 |
def test_document_query():
# type: () -> None
"""
A realistic unit test demonstrating the usage of `DocumentQuery`.
"""
PRODUCTS_COLLECTION = [
# NOTE: use ordered dicts so that order of submitted metrics is deterministic on Python 2 too.
OrderedDict(
(
('name', 'T-Shirt'),
('category', 'clothing'),
('sales', {'sales_per_day': 100, 'sales_total': 10000}),
('locations', [{'name': 'London', 'stock': 1200}, {'name': 'Paris', 'stock': 700}]),
('total_sales_per_location', OrderedDict((('london', 2000), ('paris', 8000)))),
),
),
OrderedDict(
(
('name', 'Laptop'),
('category', 'high-tech'),
('sales', {'sales_per_day': 5, 'sales_total': 400}),
('locations', [{'name': 'New York', 'stock': 150}]),
('total_sales_per_location', {'new-york': 400}),
)
),
]
def get_data_from_db(conn):
# type: (dict) -> Iterator[Tuple[dict, List[str]]]
for product in PRODUCTS_COLLECTION:
tags = ['category:{}'.format(product['category']), 'server:{}'.format(conn['server'])]
yield product, tags
query = DocumentQuery(
source=get_data_from_db,
name='test',
prefix='products',
# Metrics obtained from a nested JSON key lookup (aka path lookup).
metrics=[
{'type': 'gauge', 'path': 'sales.sales_per_day'},
{'type': 'monotonic_count', 'path': 'sales.sales_total'},
{'type': 'gauge', 'path': 'locations', 'transformer': transformers.length},
],
# Metrics for each object in an array, tagged by the index in the array.
enumerations=[
{'path': 'locations', 'index_tag': 'location_index', 'metrics': [{'type': 'gauge', 'path': 'stock'}]}
],
# Metrics from the result of a groupby() operation (aggregation).
groups=[{'type': 'gauge', 'path': 'total_sales_per_location', 'key_tag': 'location'}],
)
conn = {'server': 'example'}
metrics = list(query.run(conn=conn))
assert metrics == [
# -- T-Shirt --
# Metrics
{
'type': 'gauge',
'name': 'products.sales.sales_per_day',
'value': 100,
'tags': ['category:clothing', 'server:example'],
},
{
'type': 'monotonic_count',
'name': 'products.sales.sales_total',
'value': 10000,
'tags': ['category:clothing', 'server:example'],
},
{'type': 'gauge', 'name': 'products.locations', 'value': 2, 'tags': ['category:clothing', 'server:example']},
# Enumerations
{
'type': 'gauge',
'name': 'products.locations.stock',
'value': 1200,
'tags': ['category:clothing', 'server:example', 'location_index:0'],
},
{
'type': 'gauge',
'name': 'products.locations.stock',
'value': 700,
'tags': ['category:clothing', 'server:example', 'location_index:1'],
},
# Groups
{
'type': 'gauge',
'name': 'products.total_sales_per_location',
'value': 2000,
'tags': ['category:clothing', 'server:example', 'location:london'],
},
{
'type': 'gauge',
'name': 'products.total_sales_per_location',
'value': 8000,
'tags': ['category:clothing', 'server:example', 'location:paris'],
},
# -- Laptop --
# Metrics
{
'type': 'gauge',
'name': 'products.sales.sales_per_day',
'value': 5,
'tags': ['category:high-tech', 'server:example'],
},
{
'type': 'monotonic_count',
'name': 'products.sales.sales_total',
'value': 400,
'tags': ['category:high-tech', 'server:example'],
},
{'type': 'gauge', 'name': 'products.locations', 'value': 1, 'tags': ['category:high-tech', 'server:example']},
# Enumerations
{
'type': 'gauge',
'name': 'products.locations.stock',
'value': 150,
'tags': ['category:high-tech', 'server:example', 'location_index:0'],
},
# Groups
{
'type': 'gauge',
'name': 'products.total_sales_per_location',
'value': 400,
'tags': ['category:high-tech', 'server:example', 'location:new-york'],
},
] | 27,829 |
def judgement(seed_a, seed_b):
"""Return amount of times last 16 binary digits of generators match."""
sample = 0
count = 0
while sample <= 40000000:
new_a = seed_a * 16807 % 2147483647
new_b = seed_b * 48271 % 2147483647
bin_a = bin(new_a)
bin_b = bin(new_b)
last16_a = bin_a[-16:]
last16_b = bin_b[-16:]
if last16_a == last16_b:
count += 1
seed_a = new_a
seed_b = new_b
sample += 1
return count | 27,830 |
def GJK(shape1, shape2):
""" Implementation of the GJK algorithm
PARAMETERS
----------
shape{1, 2}: Shape
RETURN
------
: bool
Signifies if the given shapes intersect or not.
"""
# Initialize algorithm parameters
direction = Vec(shape1.center, shape2.center).direction
A = support(shape1, shape2, direction)
simplex = [A]
direction = Vec(simplex[0], Point()).direction
while True: # while new valid support found. `direction` is updated each iteration.
B = support(shape1, shape2, direction)
AB = Vec(simplex[0], B)
if dot_vec_dir(AB, direction) <= 0: # No support past the origin
return False
else:
simplex.append(B)
if handle_simplex(simplex, direction):
return True | 27,831 |
def test_edit_add_one_author(user0, three_items_authors_only):
"""Adding one author works properly."""
for item in three_items_authors_only:
common.add_item(item, user0)
content = {
"id": "1",
"title": "Test",
"authors": [
{"last_name": "Smith", "first_name": "Joe"},
{"last_name": "Smith", "first_name": "Jane"},
{"last_name": "Williams", "first_name": "Regina"},
],
}
common.edit_item(content, user0)
doc = user0.documents.first()
assert (
len(doc.authors) == 3
and doc.authors[2].first_name == "Regina"
and doc.authors[2].last_name == "Williams"
) | 27,832 |
def _set_int_config_parameter(value: OZWValue, new_value: int) -> int:
"""Set a ValueType.INT config parameter."""
try:
new_value = int(new_value)
except ValueError as err:
raise WrongTypeError(
(
f"Configuration parameter type {value.type} does not match "
f"the value type {type(new_value)}"
)
) from err
if (value.max is not None and new_value > value.max) or (
value.min is not None and new_value < value.min
):
raise InvalidValueError(
f"Value {new_value} out of range of parameter (Range: {value.min}-{value.max})"
)
value.send_value(new_value) # type: ignore
return new_value | 27,833 |
def split_dataset(dataset_file, trainpct):
"""
Split a file containing the full path to individual annotation files into
train and test datasets, with a split defined by trainpct.
Inputs:
- dataset_file - a .txt or .csv file containing file paths pointing to annotation files.
(Expects that these have no header)
- trainpct = 0.8 produces an 80:20 train:test split
"""
if type(dataset_file) is list:
full_dataset = pd.DataFrame(dataset_file, columns=["Filename"])
else:
full_dataset = pd.read_csv(dataset_file, names=["Filename"])
print(
"You've chosen a training percentage of: {} (this variable has type: {})".format(
trainpct, type(trainpct)
)
)
testsize = 1.0 - trainpct
train, test = train_test_split(
full_dataset, test_size=testsize, shuffle=True, random_state=42
) # set the random seed so we get reproducible results!
return train, test | 27,834 |
def accession(data):
"""
Get the accession for the given data.
"""
return data["mgi_marker_accession_id"] | 27,835 |
def get_phase_dir(self):
"""Get the phase rotating direction of stator flux stored in LUT
Parameters
----------
self : LUT
a LUT object
Returns
----------
phase_dir : int
rotating direction of phases +/-1
"""
if self.phase_dir not in [-1, 1]:
# recalculate phase_dir from Phi_wind
self.phase_dir = get_phase_dir_DataTime(self.Phi_wind[0])
return self.phase_dir | 27,836 |
def atomic_number(request):
"""
An atomic number.
"""
return request.param | 27,837 |
def data_upgrades():
"""Add any optional data upgrade migrations here!"""
op.execute('''
UPDATE "ModuleForms" SET "Required" = 0
WHERE "Name" = 'Name' AND "Module_ID" = 2
''') | 27,838 |
def plot_metric(title = 'Plot of registration metric vs iterations'):
"""Plots the mutual information over registration iterations
Parameters
----------
title : str
Returns
-------
fig : matplotlib figure
"""
global metric_values, multires_iterations
fig, ax = plt.subplots()
ax.set_title(title)
ax.set_xlabel('Iteration Number', fontsize=12)
ax.set_ylabel('Mutual Information Cost', fontsize=12)
ax.plot(metric_values, 'r')
ax.plot(multires_iterations, [metric_values[index] for index in multires_iterations], 'b*', label = 'change in resolution')
ax.legend()
return fig | 27,839 |
def cross_entropy(model, _input, _target):
""" Compute Cross Entropy between target and output diversity.
Parameters
----------
model : Model
Model for generating output for compare with target sample.
_input : theano.tensor.matrix
Input sample.
_target : theano.tensor.matrix
Target sample.
Returns
-------
theano.tensor.matrix
Return Cross Entropy.
"""
return T.nnet.categorical_crossentropy(model.output(_input), _target).mean() | 27,840 |
def paste():
"""Paste and redirect."""
text = request.form['text']
# TODO: make this better
assert 0 <= len(text) <= ONE_MB, len(text)
with UploadedFile.from_text(text) as uf:
get_backend().store_object(uf)
lang = request.form['language']
if lang != 'rendered-markdown':
with HtmlToStore.from_html(render_template(
'paste.html',
text=text,
highlighter=get_highlighter(text, lang),
raw_url=app.config['FILE_URL'].format(name=uf.name),
)) as paste_obj:
get_backend().store_html(paste_obj)
else:
with HtmlToStore.from_html(render_template(
'markdown.html',
text=text,
raw_url=app.config['FILE_URL'].format(name=uf.name),
)) as paste_obj:
get_backend().store_html(paste_obj)
url = app.config['HTML_URL'].format(name=paste_obj.name)
return redirect(url) | 27,841 |
def get_groups_links(groups, tenant_id, rel='self', limit=None, marker=None):
"""
Get the links to groups along with 'next' link
"""
url = get_autoscale_links(tenant_id, format=None)
return get_collection_links(groups, url, rel, limit, marker) | 27,842 |
def over(expr: ir.ValueExpr, window: win.Window) -> ir.ValueExpr:
"""Construct a window expression.
Parameters
----------
expr
A value expression
window
Window specification
Returns
-------
ValueExpr
A window function expression
See Also
--------
ibis.window
"""
prior_op = expr.op()
if isinstance(prior_op, ops.WindowOp):
op = prior_op.over(window)
else:
op = ops.WindowOp(expr, window)
result = op.to_expr()
try:
name = expr.get_name()
except com.ExpressionError:
pass
else:
result = result.name(name)
return result | 27,843 |
def node_value(node: Node) -> int:
"""
Computes the value of node
"""
if not node.children:
return sum(node.entries)
else:
value = 0
for entry in node.entries:
try:
# Entries start at 1 so subtract all entries by 1
value += node_value(node.children[entry - 1])
except IndexError:
pass
return value | 27,844 |
def load_mac_vendors() :
""" parses wireshark mac address db and returns dict of mac : vendor """
entries = {}
f = open('mac_vendors.db', 'r')
for lines in f.readlines() :
entry = lines.split()
# match on first column being first six bytes
r = re.compile(r'^([0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2})$')
if len(entry) > 0 and r.match(entry[0]) :
# lowercase as convention
entries[entry[0].lower()] = entry[1]
return entries | 27,845 |
def parse_time(s):
"""
Parse time spec with optional s/m/h/d/w suffix
"""
if s[-1].lower() in secs:
return int(s[:-1]) * secs[s[-1].lower()]
else:
return int(s) | 27,846 |
def resize_labels(labels, size):
"""Helper function to resize labels.
Args:
labels: A long tensor of shape `[batch_size, height, width]`.
Returns:
A long tensor of shape `[batch_size, new_height, new_width]`.
"""
n, h, w = labels.shape
labels = F.interpolate(labels.view(n, 1, h, w).float(),
size=size,
mode='nearest')
labels = labels.squeeze_(1).long()
return labels | 27,847 |
def export_search(host, s, password, export_mode="raw", out=sys.stdout, username="admin", port=8089):
"""
Exports events from a search using Splunk REST API to a local file.
This is faster than performing a search/export from Splunk Python SDK.
@param host: splunk server address
@param s: search that matches events
@param password: Splunk server password
@param export_mode: default `raw`. `csv`, `xml`, or `json`
@param out: local file pointer to write the results
@param username: Splunk server username
@param port: Splunk server port
"""
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
r = requests.post("https://%s:%d/servicesNS/admin/search/search/jobs/export" % (host, port),
auth=(username, password),
data={'output_mode': export_mode,
'search': s,
'max_count': 1000000},
verify=False)
out.write(r.text.encode('utf-8')) | 27,848 |
def _extract_urls(html):
"""
Try to find all embedded links, whether external or internal
"""
# substitute real html symbols
html = _replace_ampersands(html)
urls = set()
hrefrx = re.compile("""href\s*\=\s*['"](.*?)['"]""")
for url in re.findall(hrefrx, html):
urls.add(str(url))
srcrx = re.compile("""src\s*\=\s*['"](.*?)['"]""")
for url in re.findall(srcrx, html):
urls.add(str(url))
html = re.sub('%20', ' ', html, flags=re.DOTALL)
# extract URLs that are not surrounded by quotes
urlrx = re.compile("""[^'"](http[s]?://[\.a-zA-Z0-9/]+?)\s""")
for url in re.findall(urlrx, html):
urls.add(str(url))
# extract URLs that are surrounded by quotes
# remove whitespace
html = re.sub('\s+', '', html)
urlrx = re.compile("'(http[s]?://[\.a-zA-Z0-9/]+?)'", flags=re.DOTALL)
urlrx = re.compile('"(http[s]?://[\.a-zA-Z0-9/]+?)"', flags=re.DOTALL)
for url in re.findall(urlrx, html):
urls.add(url)
# remove empty string if exists
try:
urls.remove('')
except KeyError:
pass
return sorted(urls) | 27,849 |
def reorganize_data(texts):
"""
Reorganize data to contain tuples of a all signs combined and all trans combined
:param texts: sentences in format of tuples of (sign, tran)
:return: data reorganized
"""
data = []
for sentence in texts:
signs = []
trans = []
for sign, tran in sentence:
signs.append(sign)
trans.append(tran)
data.append((signs, trans))
return data | 27,850 |
def stop(check_name, flavor, instance_name, remove, direct, location):
"""Stops an integration.
\b
$ di stop -r nginx
Stopping containers... success!
Removing containers... success!
"""
if check_name not in Checks:
echo_failure('Check `{}` is not yet supported.'.format(check_name))
sys.exit(1)
if flavor not in Checks[check_name]:
echo_failure('Flavor `{}` is not yet supported.'.format(flavor))
sys.exit(1)
check_class = Checks[check_name][flavor]
settings = load_settings()
location = location or settings.get('location', CHECKS_DIR)
location = check_class.get_location(
location, instance_name=instance_name, direct=direct
)
echo_waiting('Stopping containers... ', nl=False)
try:
output, error = check_dir_down(location)
except FileNotFoundError:
click.echo()
echo_warning('Location `{}` already does not exist.'.format(location))
sys.exit()
if error:
click.echo()
click.echo(output.rstrip())
echo_failure('An unexpected Docker error (status {}) has occurred.'.format(error))
sys.exit(error)
echo_success('success!') | 27,851 |
def client_new():
"""Create new client."""
form = ClientForm(request.form)
if form.validate_on_submit():
c = Client(user_id=current_user.get_id())
c.gen_salt()
form.populate_obj(c)
db.session.add(c)
db.session.commit()
return redirect(url_for('.client_view', client_id=c.client_id))
return render_template(
'invenio_oauth2server/settings/client_new.html',
form=form,
) | 27,852 |
def query_schema_existence(conn, schema_name):
"""Function to verify whether the current database schema ownership is correct."""
with conn.cursor() as cur:
cur.execute('SELECT EXISTS(SELECT 1 FROM information_schema.schemata WHERE SCHEMA_NAME = %s)',
[schema_name])
return cur.fetchone().exists | 27,853 |
def get_number_of_params(model, trainable_only=False):
"""
Get the number of parameters in a PyTorch Model
:param model(torch.nn.Model):
:param trainable_only(bool): If True, only count the trainable parameters
:return(int): The number of parameters in the model
"""
return int(np.sum([np.prod(param.size()) for param in model.parameters()
if param.requires_grad or (not trainable_only)])) | 27,854 |
def _test_conv_adj_fourier_hrf(ai_s, hrf):
""" Helper to test the adj conv with a Fourier implementation,
the kernel being the HRF.
"""
adj_ar_s_ref = simple_retro_convolve(hrf, ai_s)
adj_ar_s_test = spectral_retro_convolve(hrf, ai_s)
assert(np.allclose(adj_ar_s_ref, adj_ar_s_test, atol=1.0e-7)) | 27,855 |
def chart1(request):
"""
This view tests the server speed for transferring JSON and XML objects.
:param request: The AJAX request
:return: JsonResponse of the dataset.
"""
full_url = HttpRequest.build_absolute_uri(request)
relative = HttpRequest.get_full_path(request)
base_url = full_url[:-len(relative)]
request_amount = ['10', '100', '200', '500', '1000']
json_urls = list()
xml_urls = list()
for x in request_amount:
json_urls.append(reverse('objects:leads_json', args=[x]))
xml_urls.append(reverse('objects:leads_xml', args=[x]))
json_data = list()
xml_data = list()
for x in json_urls:
start = time.perf_counter()
requests.get(base_url + x)
end = time.perf_counter()
json_data.append((end - start))
for x in xml_urls:
start = time.perf_counter()
requests.get(base_url + x)
end = time.perf_counter()
xml_data.append((end - start))
final_data = {
'labels': request_amount,
'datasets': [
{
'label': 'JSON',
'backgroundColor': 'rgba(255, 99, 132, 0.2)',
'borderColor': 'rgba(255,99,132,1)',
'data': json_data,
'borderWidth': 2,
'yAxisID': 'first-y-axis'
},
{
'label': 'XML',
'backgroundColor': 'rgba(54, 162, 235, 0.2)',
'borderColor': 'rgba(54, 162, 235, 1)',
'data': xml_data,
'borderWidth': 2,
'yAxisID': 'first-y-axis'
}
]
}
return JsonResponse(final_data) | 27,856 |
async def async_setup_platform(
hass: HomeAssistant,
_: ConfigType,
add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Add lights from the main Qwikswitch component."""
if discovery_info is None:
return
qsusb = hass.data[QWIKSWITCH]
devs = [QSLight(qsid, qsusb) for qsid in discovery_info[QWIKSWITCH]]
add_entities(devs) | 27,857 |
def conv3x3(in_planes, out_planes, stride=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, bias=False) | 27,858 |
def img_mime_type(img):
"""Returns image MIME type or ``None``.
Parameters
----------
img: `PIL.Image`
PIL Image object.
Returns
-------
mime_type : `str`
MIME string like "image/jpg" or ``None``.
"""
if img.format:
ext = "." + img.format
return mimetypes.types_map.get(ext.lower())
return None | 27,859 |
def test_subnet_mask_subnet_to_num():
"""Test SubnetMask subnet to number converter"""
assert SubnetMask._subnet_to_num(None) is None
assert SubnetMask._subnet_to_num(24) == 24
assert SubnetMask._subnet_to_num('24') == 24
assert SubnetMask._subnet_to_num(None, subnet_type='ipv4') is None
assert SubnetMask._subnet_to_num(24, subnet_type='ipv4') == 24
assert SubnetMask._subnet_to_num('24', subnet_type='ipv4') == 24
assert SubnetMask._subnet_to_num('255.255.128.0', subnet_type='ipv4') == 17 | 27,860 |
def set_log_level(verbose, match=None, return_old=False):
"""Convenience function for setting the logging level
Parameters
----------
verbose : bool, str, int, or None
The verbosity of messages to print. If a str, it can be either DEBUG,
INFO, WARNING, ERROR, or CRITICAL. Note that these are for
convenience and are equivalent to passing in logging.DEBUG, etc.
For bool, True is the same as 'INFO', False is the same as 'WARNING'.
match : str | None
String to match. Only those messages that both contain a substring
that regexp matches ``'match'`` (and the ``verbose`` level) will be
displayed.
return_old : bool
If True, return the old verbosity level and old match.
Notes
-----
If ``verbose=='debug'``, then the ``vispy`` method emitting the log
message will be prepended to each log message, which is useful for
debugging. If ``verbose=='debug'`` or ``match is not None``, then a
small performance overhead is added. Thus it is suggested to only use
these options when performance is not crucial.
See also
--------
vispy.util.use_log_level
"""
# This method is responsible for setting properties of the handler and
# formatter such that proper messages (possibly with the vispy caller
# prepended) are displayed. Storing log messages is only available
# via the context handler (use_log_level), so that configuration is
# done by the context handler itself.
if isinstance(verbose, bool):
verbose = 'info' if verbose else 'warning'
if isinstance(verbose, str):
verbose = verbose.lower()
if verbose not in logging_types:
raise ValueError('Invalid argument "%s"' % verbose)
verbose = logging_types[verbose]
else:
raise TypeError('verbose must be a bool or string')
logger = logging.getLogger('vispy')
old_verbose = logger.level
old_match = _lh._vispy_set_match(match)
logger.setLevel(verbose)
if verbose <= logging.DEBUG:
_lf._vispy_set_prepend(True)
else:
_lf._vispy_set_prepend(False)
out = None
if return_old:
out = (old_verbose, old_match)
return out | 27,861 |
def test_penalty_htlc_tx_timeout(node_factory, bitcoind, chainparams):
""" Test that the penalizing node claims any published
HTLC transactions
Node topology:
l1 <-> l2 <-> l3 <-> l4
^---> l5
l1 pushes money to l5, who doesn't fulfill (freezing htlc across l2-l3)
l4 pushes money to l1, who doesn't fulfill (freezing htlc across l2-l3)
we snapshot l2
l2 pushes money to l3 (updating state)
l2 + l3 go offline; l2 is backed up from snapshot
l1 fails the channel with l2, fulfilling the stranded htlc onchain
l2 comes back online, force closes channel with l3
block chain advances, l2 broadcasts the timeout htlc_tx + fulfill htlc_tx
both of which have a delay. l2 goes ahead and 'steals back' their
output + the htlc they fulfill
l3 comes back online, sees l2's cheat. takes funds from htlc timeout tx
some blocks are mined. the dust settles.
we check the accounting.
"""
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
l1, l2, l3, l4, l5 = node_factory.get_nodes(
5,
opts=[
{
'disconnect': ['-WIRE_UPDATE_FULFILL_HTLC'],
'may_reconnect': True,
'dev-no-reconnect': None,
}, {
'plugin': coin_mvt_plugin,
'dev-no-reconnect': None,
'may_reconnect': True,
'allow_broken_log': True,
}, {
'plugin': coin_mvt_plugin,
'dev-no-reconnect': None,
'may_reconnect': True,
'allow_broken_log': True,
}, {
'dev-no-reconnect': None,
}, {
'disconnect': ['-WIRE_UPDATE_FULFILL_HTLC'],
'may_reconnect': True,
'dev-no-reconnect': None,
'allow_broken_log': True,
}
]
)
node_factory.join_nodes([l1, l2, l3, l4], wait_for_announce=True)
node_factory.join_nodes([l3, l5], wait_for_announce=True)
channel_id = first_channel_id(l2, l3)
# push some money so that 1 + 4 can both send htlcs
inv = l2.rpc.invoice(10**9 // 2, '1', 'balancer')
l1.rpc.pay(inv['bolt11'])
inv = l4.rpc.invoice(10**9 // 2, '1', 'balancer')
l2.rpc.pay(inv['bolt11'])
# now we send two 'sticky' htlcs, l1->l5 + l4->l1
amt = 10**8 // 2
sticky_inv_1 = l5.rpc.invoice(amt, '2', 'sticky')
route = l1.rpc.getroute(l5.info['id'], amt, 1)['route']
l1.rpc.sendpay(route, sticky_inv_1['payment_hash'])
l5.daemon.wait_for_log('dev_disconnect: -WIRE_UPDATE_FULFILL_HTLC')
sticky_inv_2 = l1.rpc.invoice(amt, '2', 'sticky')
route = l4.rpc.getroute(l1.info['id'], amt, 1)['route']
l4.rpc.sendpay(route, sticky_inv_2['payment_hash'])
l1.daemon.wait_for_log('dev_disconnect: -WIRE_UPDATE_FULFILL_HTLC')
wait_for(lambda: len(l2.rpc.listpeers(l3.info['id'])['peers'][0]['channels'][0]['htlcs']) == 2)
# make database snapshot of l2
l2.stop()
l2_db_path = os.path.join(l2.daemon.lightning_dir, chainparams['name'], 'lightningd.sqlite3')
l2_db_path_bak = os.path.join(l2.daemon.lightning_dir, chainparams['name'], 'lightningd.sqlite3.bak')
copyfile(l2_db_path, l2_db_path_bak)
l2.start()
sync_blockheight(bitcoind, [l2])
# push some money from l3->l2, so that the commit counter advances
l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
inv = l3.rpc.invoice(10**4, '1', 'push')
# Make sure gossipd in l2 knows it's active
wait_for(lambda: [c['active'] for c in l2.rpc.listchannels(l2.get_channel_scid(l3))['channels']] == [True, True])
l2.rpc.pay(inv['bolt11'])
# stop both nodes, roll back l2's database
l2.stop()
l3.stop()
copyfile(l2_db_path_bak, l2_db_path)
# start l2, now back a bit. force close channel with l3 while l3 is still offline
l2.start()
sync_blockheight(bitcoind, [l2])
l2.rpc.close(l3.info['id'], 1)
l2.daemon.wait_for_log('sendrawtx exit 0')
# reconnect with l1, which will fulfill the payment
l2.rpc.connect(l1.info['id'], 'localhost', l1.port)
l2.daemon.wait_for_log('got commitsig .*: feerate 15000, 0 added, 1 fulfilled, 0 failed, 0 changed')
l2.daemon.wait_for_log('coins payment_hash: {}'.format(sticky_inv_2['payment_hash']))
# l2 moves on for closed l3
bitcoind.generate_block(1)
l2.daemon.wait_for_log('to ONCHAIN')
l2.daemon.wait_for_logs(['Propose handling OUR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TX .* after 16 blocks',
'Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks',
'Propose handling OUR_UNILATERAL/THEIR_HTLC by OUR_HTLC_SUCCESS_TX .* after 0 blocks'])
l2.wait_for_onchaind_broadcast('OUR_HTLC_SUCCESS_TX',
'OUR_UNILATERAL/THEIR_HTLC')
bitcoind.generate_block(1)
l2.daemon.wait_for_log('Propose handling OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks')
# after 5 blocks, l2 reclaims both their DELAYED_OUTPUT_TO_US and their delayed output
bitcoind.generate_block(5, wait_for_mempool=0)
sync_blockheight(bitcoind, [l2])
l2.daemon.wait_for_logs(['Broadcasting OUR_DELAYED_RETURN_TO_WALLET .* to resolve OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US',
'Broadcasting OUR_DELAYED_RETURN_TO_WALLET .* to resolve OUR_UNILATERAL/DELAYED_OUTPUT_TO_US'])
bitcoind.generate_block(10, wait_for_mempool=2)
l2.wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1)
l2.daemon.wait_for_log('Propose handling OUR_HTLC_TIMEOUT_TX/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks')
# l3 comes back up, sees cheat, penalizes l2 (revokes the htlc they've offered;
# notes that they've successfully claimed to_local and the fulfilled htlc)
l3.start()
sync_blockheight(bitcoind, [l3])
l3.daemon.wait_for_logs(['Propose handling THEIR_REVOKED_UNILATERAL/OUR_HTLC by OUR_PENALTY_TX',
'Propose handling THEIR_REVOKED_UNILATERAL/THEIR_HTLC by OUR_PENALTY_TX',
'Propose handling THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM '
'by OUR_PENALTY_TX',
'Resolved THEIR_REVOKED_UNILATERAL/OUR_HTLC by OUR_HTLC_FULFILL_TO_THEM',
'Propose handling OUR_HTLC_FULFILL_TO_THEM/DELAYED_CHEAT_OUTPUT_TO_THEM'
' by OUR_PENALTY_TX',
'Resolved OUR_HTLC_FULFILL_TO_THEM/DELAYED_CHEAT_OUTPUT_TO_THEM '
'by THEIR_DELAYED_CHEAT',
'Resolved THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM '
'by THEIR_DELAYED_CHEAT',
'Resolved THEIR_REVOKED_UNILATERAL/THEIR_HTLC by THEIR_HTLC_TIMEOUT_TO_THEM',
'Propose handling THEIR_HTLC_TIMEOUT_TO_THEM/DELAYED_CHEAT_OUTPUT_TO_THEM by OUR_PENALTY_TX'])
bitcoind.generate_block(1, wait_for_mempool=2) # OUR_PENALTY_TX + OUR_HTLC_TIMEOUT_TO_US
l3.daemon.wait_for_log('Resolved THEIR_HTLC_TIMEOUT_TO_THEM/DELAYED_CHEAT_OUTPUT_TO_THEM '
'by our proposal OUR_PENALTY_TX')
l2.daemon.wait_for_log('Unknown spend of OUR_HTLC_TIMEOUT_TX/DELAYED_OUTPUT_TO_US')
# 100 blocks later, l3+l2 are both done
bitcoind.generate_block(100)
l3.daemon.wait_for_log('{}.*: onchaind complete, forgetting peer'.format(l2.info['id']))
l2.daemon.wait_for_log('{}.*: onchaind complete, forgetting peer'.format(l3.info['id']))
assert account_balance(l3, channel_id) == 0
assert account_balance(l2, channel_id) == 0 | 27,862 |
def export_concept_samples(num_samples = 100, path_to_file = './samples.csv', labeling_threshold = 0.9):
"""Samples 'num_samples' points each concept, labels it with the concept with highest membership as well
as all concepts with a membership above the labeling_threshold (relative to the highest membership concept)
and stores them as csv file under the given 'path_to_file'."""
# collect data points
samples = []
for concept in list(this._concepts.values()):
samples += concept.sample(num_samples)
# collect labels
labeled_samples = []
for sample in samples:
memberships = []
for name, concept in this._concepts.items():
memberships.append((name, concept.membership_of(sample)))
memberships.sort(key = lambda x: x[1], reverse = True)
l_sample = list(sample)
l_sample.append(memberships[0][0])
idx = 1
threshold = labeling_threshold * memberships[0][1]
while memberships[idx][1] > threshold:
l_sample.append(memberships[idx][0])
idx += 1
labeled_samples.append(l_sample)
with open(path_to_file, 'w') as f:
for l_sample in labeled_samples:
f.write("{0}\n".format(",".join(map(str, l_sample)))) | 27,863 |
def print_formatted_text(
output: Output,
formatted_text: AnyFormattedText,
style: BaseStyle,
style_transformation: Optional[StyleTransformation] = None,
color_depth: Optional[ColorDepth] = None,
) -> None:
"""
Print a list of (style_str, text) tuples in the given style to the output.
"""
fragments = to_formatted_text(formatted_text)
style_transformation = style_transformation or DummyStyleTransformation()
color_depth = color_depth or ColorDepth.default()
# Reset first.
output.reset_attributes()
output.enable_autowrap()
# Print all (style_str, text) tuples.
attrs_for_style_string = _StyleStringToAttrsCache(
style.get_attrs_for_style_str, style_transformation
)
for style_str, text, *_ in fragments:
attrs = attrs_for_style_string[style_str]
if attrs:
output.set_attributes(attrs, color_depth)
else:
output.reset_attributes()
# Eliminate carriage returns
text = text.replace("\r", "")
# Assume that the output is raw, and insert a carriage return before
# every newline. (Also important when the front-end is a telnet client.)
output.write(text.replace("\n", "\r\n"))
# Reset again.
output.reset_attributes()
output.flush() | 27,864 |
def check_satisfy_dataset(w, D, involved_predicates=[]):
"""
This function is to check whether all facts in ``D'' have been installed in each of ruler intervals
of the given Window ``w'' if facts in ruler intervals holds in ``D''.
Args:
w (a Window instance):
D (dictionary of dictionary object): contain all facts
involved_predicates (a list of str): contain all predicates that are needed to be checked.
Returns:
boolean
"""
for ruler_interval in w.ruler_intervals:
for predicate in involved_predicates:
if type(D[predicate]) == list:
interval_list = D[predicate]
if interval_intesection_intervallist(ruler_interval, interval_list) and Atom(predicate) not \
in w.get_ruler_intervals_literals()[ruler_interval]:
return False
else:
for entity, interval_list in D[predicate].items():
if interval_intesection_intervallist(ruler_interval, interval_list) and Atom(predicate) not \
in w.get_ruler_intervals_literals()[ruler_interval]:
return False
return True | 27,865 |
def interp_at(d, g, varargs=None, dim=None, dask="parallelized"):
"""
Interpolates a variable to another.
Example : varargs = [THETA, mld] : THETA(t, z, y, x) is interpolated with Z=mld(t, y, x)
"""
var, coordvar = varargs
dim = (
dim if dim is not None else set(d[var].dims).difference(d[coordvar].dims).pop()
)
X = d[dim].values
data = xr.apply_ufunc(
_interp1DAt,
d[var],
d[coordvar],
input_core_dims=[[dim], []],
dask=dask,
output_dtypes=[float],
kwargs={"X": X},
keep_attrs=True,
)
data.attrs.update(
long_name=d[var].attrs.get("long_name", var)
+ " interpolated to {} along {}".format(coordvar, dim),
name="{}_{}_{}".format(var, dim, coordvar),
)
return data | 27,866 |
def get_history():
"""Get command usage history from History.sublime-project"""
f = open('%s/%s/%s' % (sublime.packages_path(),
"TextTransmute",
"History.sublime-project"), 'r')
content = f.readlines()
f.close()
return [x.strip() for x in content] | 27,867 |
def inprogress(metric: Gauge, labels: Dict[str, str] = None) -> Callable[..., Any]:
"""
This decorator provides a convenient way to track in-progress requests
(or other things) in a callable.
This decorator function wraps a function with code to track how many
of the measured items are in progress.
The metric is incremented before calling the wrapped function and
decremented when the wrapped function is complete.
:param metric: a metric to increment and decrement. The metric object
being updated is expected to be a Gauge metric object.
:param labels: a dict of extra labels to associate with the metric.
:return: a coroutine function that wraps the decortated function
"""
if not isinstance(metric, Gauge):
raise Exception(
"inprogess decorator expects a Gauge metric but got: {}".format(metric)
)
def track(func):
"""
This function wraps a decorated callable with metric incremeting
and decrementing logic.
:param func: the callable to be tracked.
:returns: the return value from the decorated callable.
"""
@wraps(func)
async def func_wrapper(*args, **kwds):
metric.inc(labels)
rv = func(*args, **kwds)
if isinstance(rv, asyncio.Future) or asyncio.iscoroutine(rv):
rv = await rv
metric.dec(labels)
return rv
return func_wrapper
return track | 27,868 |
def b58_wrapper_to_b64_public_address(b58_string: str) -> Optional[str]:
"""Convert a b58-encoded PrintableWrapper address into a b64-encoded PublicAddress protobuf"""
wrapper = b58_wrapper_to_protobuf(b58_string)
if wrapper:
public_address = wrapper.public_address
public_address_bytes = public_address.SerializeToString()
return base64.b64encode(public_address_bytes).decode("utf-8")
return None | 27,869 |
def test_pick_identifier_with_cover_task(app, testdata):
"""Test to update cover_metadata."""
doc = testdata["documents"][1]
pick_identifier_with_cover(app, record=doc)
tasks.save_record.assert_called_once()
series = testdata["series"][1]
pick_identifier_with_cover(app, record=series)
assert tasks.save_record.call_count == 2 | 27,870 |
def render_to_AJAX(status, messages):
"""return an HTTP response for an AJAX request"""
xmlc = Context({'status': status,
'messages': messages})
xmlt = loader.get_template("AJAXresponse.xml")
response = xmlt.render(xmlc)
return HttpResponse(response) | 27,871 |
def laplace(loc=0.0, scale=1.0, size=None): # real signature unknown; restored from __doc__
"""
laplace(loc=0.0, scale=1.0, size=None)
Draw samples from the Laplace or double exponential distribution with
specified location (or mean) and scale (decay).
The Laplace distribution is similar to the Gaussian/normal distribution,
but is sharper at the peak and has fatter tails. It represents the
difference between two independent, identically distributed exponential
random variables.
Parameters
----------
loc : float or array_like of floats, optional
The position, :math:`\mu`, of the distribution peak. Default is 0.
scale : float or array_like of floats, optional
:math:`\lambda`, the exponential decay. Default is 1.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
a single value is returned if ``loc`` and ``scale`` are both scalars.
Otherwise, ``np.broadcast(loc, scale).size`` samples are drawn.
Returns
-------
out : ndarray or scalar
Drawn samples from the parameterized Laplace distribution.
Notes
-----
It has the probability density function
.. math:: f(x; \mu, \lambda) = \frac{1}{2\lambda}
\exp\left(-\frac{|x - \mu|}{\lambda}\right).
The first law of Laplace, from 1774, states that the frequency
of an error can be expressed as an exponential function of the
absolute magnitude of the error, which leads to the Laplace
distribution. For many problems in economics and health
sciences, this distribution seems to model the data better
than the standard Gaussian distribution.
References
----------
.. [1] Abramowitz, M. and Stegun, I. A. (Eds.). "Handbook of
Mathematical Functions with Formulas, Graphs, and Mathematical
Tables, 9th printing," New York: Dover, 1972.
.. [2] Kotz, Samuel, et. al. "The Laplace Distribution and
Generalizations, " Birkhauser, 2001.
.. [3] Weisstein, Eric W. "Laplace Distribution."
From MathWorld--A Wolfram Web Resource.
http://mathworld.wolfram.com/LaplaceDistribution.html
.. [4] Wikipedia, "Laplace distribution",
https://en.wikipedia.org/wiki/Laplace_distribution
Examples
--------
Draw samples from the distribution
>>> loc, scale = 0., 1.
>>> s = np.random.laplace(loc, scale, 1000)
Display the histogram of the samples, along with
the probability density function:
>>> import matplotlib.pyplot as plt
>>> count, bins, ignored = plt.hist(s, 30, density=True)
>>> x = np.arange(-8., 8., .01)
>>> pdf = np.exp(-abs(x-loc)/scale)/(2.*scale)
>>> plt.plot(x, pdf)
Plot Gaussian for comparison:
>>> g = (1/(scale * np.sqrt(2 * np.pi)) *
... np.exp(-(x - loc)**2 / (2 * scale**2)))
>>> plt.plot(x,g)
"""
pass | 27,872 |
def parse_params(environ, *include):
"""Parse out the filter, sort, etc., parameters from a request"""
if environ.get('QUERY_STRING'):
params = parse_qs(environ['QUERY_STRING'])
else:
params = {}
param_handlers = (
('embedded', params_serializer.unserialize_string, None),
('filter', params_serializer.unserialize_string, None),
('sort', params_serializer.unserialize_string, None),
('offset', int, 0),
('limit', int, 0),
('show_hidden', bool_field, False)
)
results = {}
if len(include) > 0:
include = set(include)
else:
include = None
for name, fn, default in param_handlers:
if include and name not in include:
continue
results[name] = parse_param(params, name, fn, default=default)
if not include or 'context' in include:
results['context'] = get_context(environ)
return results | 27,873 |
def _attribute_is_an_ipv4(
attribute_name, attribute_value, bridge_name, port_name
):
"""
Check if an attribute is an IPv4 address
:param attribute_name: The attribute name
:param attribute_value: The attribute value
:param bridge_name: The attribute bridge name
:param port_name: The attribute port name
"""
if type(attribute_value) != str or not helpers.IPv4_ADDRESS_MATCHER.match(
attribute_value
):
raise SetupOVSConfigException(
"Bridge {} Port {}: attribute {} must be an"
" IPv4 address".format(bridge_name, port_name, attribute_name)
) | 27,874 |
def create_dict_facade_for_object_vars_and_mapping_with_filters(cls, # type: Type[Mapping]
include, # type: Union[str, Tuple[str]]
exclude, # type: Union[str, Tuple[str]]
private_name_prefix=None # type: str
):
# type: (...) -> DictMethods
"""
:param cls:
:param include:
:param exclude:
:param private_name_prefix: if provided, only the fields not starting with this prefix will be exposed. Otherwise
all will be exposed
:return:
"""
public_fields_only = private_name_prefix is not None
def __iter__(self):
"""
Generated by @autodict.
Implements the __iter__ method from collections.Iterable by relying on a filtered vars(self)
:param self:
:return:
"""
myattrs = tuple(att_name for att_name in iterate_on_vars(self))
for att_name in chain(myattrs, (o for o in super(cls, self).__iter__() if o not in myattrs)):
# filter based on the name (include/exclude + private/public)
if is_attr_selected(att_name, include=include, exclude=exclude) and \
(not public_fields_only or not att_name.startswith(private_name_prefix)):
# use that name
yield att_name
def __getitem__(self, key):
"""
Generated by @autodict.
Implements the __getitem__ method from collections.Mapping by relying on a filtered getattr(self, key)
"""
if hasattr(self, key):
key = possibly_replace_with_property_name(self.__class__, key)
if is_attr_selected(key, include=include, exclude=exclude) and \
(not public_fields_only or not key.startswith(private_name_prefix)):
return getattr(self, key)
else:
try:
# noinspection PyUnresolvedReferences
return super(cls, self).__getitem__(key)
except Exception as e:
raise KeyError('@autodict generated dict view - {key} is a '
'hidden field and super[{key}] raises an exception: {etyp} {err}'
''.format(key=key, etyp=type(e).__name__, err=e))
else:
try:
# noinspection PyUnresolvedReferences
return super(cls, self).__getitem__(key)
except Exception as e:
raise KeyError('@autodict generated dict view - {key} is an '
'invalid field name (was the constructor called?). Delegating to '
'super[{key}] raises an exception: {etyp} {err}'
''.format(key=key, etyp=type(e).__name__, err=e))
return DictMethods(iter=__iter__, getitem=__getitem__) | 27,875 |
def now():
"""
此时的时间戳
:return:
"""
return int(time.time()) | 27,876 |
def get_yourContactINFO(rows2):
"""
Function that returns your personal contact info details
"""
yourcontactINFO = rows2[0]
return yourcontactINFO | 27,877 |
def hafnian(
A, loop=False, recursive=True, rtol=1e-05, atol=1e-08, quad=True, approx=False, num_samples=1000
): # pylint: disable=too-many-arguments
"""Returns the hafnian of a matrix.
For more direct control, you may wish to call :func:`haf_real`,
:func:`haf_complex`, or :func:`haf_int` directly.
Args:
A (array): a square, symmetric array of even dimensions.
loop (bool): If ``True``, the loop hafnian is returned. Default is ``False``.
recursive (bool): If ``True``, the recursive algorithm is used. Note:
the recursive algorithm does not currently support the loop hafnian.
If ``loop=True``, then this keyword argument is ignored.
rtol (float): the relative tolerance parameter used in ``np.allclose``.
atol (float): the absolute tolerance parameter used in ``np.allclose``.
quad (bool): If ``True``, the hafnian algorithm is performed with quadruple precision.
approx (bool): If ``True``, an approximation algorithm is used to estimate the hafnian. Note that
the approximation algorithm can only be applied to matrices ``A`` that only have non-negative entries.
num_samples (int): If ``approx=True``, the approximation algorithm performs ``num_samples`` iterations
for estimation of the hafnian of the non-negative matrix ``A``.
Returns:
np.int64 or np.float64 or np.complex128: the hafnian of matrix A.
"""
# pylint: disable=too-many-return-statements,too-many-branches
input_validation(A, rtol=rtol, atol=atol)
matshape = A.shape
if matshape == (0, 0):
return 1
if matshape[0] % 2 != 0 and not loop:
return 0.0
if np.allclose(np.diag(np.diag(A)), A, rtol=rtol, atol=atol):
if loop:
return np.prod(np.diag(A))
return 0
if matshape[0] % 2 != 0 and loop:
A = np.pad(A, pad_width=((0, 1), (0, 1)), mode="constant")
A[-1, -1] = 1.0
matshape = A.shape
if matshape[0] == 2:
if loop:
return A[0, 1] + A[0, 0] * A[1, 1]
return A[0][1]
if matshape[0] == 4:
if loop:
result = (
A[0, 1] * A[2, 3]
+ A[0, 2] * A[1, 3]
+ A[0, 3] * A[1, 2]
+ A[0, 0] * A[1, 1] * A[2, 3]
+ A[0, 1] * A[2, 2] * A[3, 3]
+ A[0, 2] * A[1, 1] * A[3, 3]
+ A[0, 0] * A[2, 2] * A[1, 3]
+ A[0, 0] * A[3, 3] * A[1, 2]
+ A[0, 3] * A[1, 1] * A[2, 2]
+ A[0, 0] * A[1, 1] * A[2, 2] * A[3, 3]
)
return result
return A[0, 1] * A[2, 3] + A[0, 2] * A[1, 3] + A[0, 3] * A[1, 2]
if approx:
if np.any(np.iscomplex(A)):
raise ValueError("Input matrix must be real")
if np.any(A < 0):
raise ValueError("Input matrix must not have negative entries")
if A.dtype == np.complex:
# array data is complex type
if np.any(np.iscomplex(A)):
# array values contain non-zero imaginary parts
return haf_complex(A, loop=loop, recursive=recursive, quad=quad)
# all array values have zero imaginary parts
return haf_real(np.float64(A.real), loop=loop, recursive=recursive, quad=quad)
if np.issubdtype(A.dtype, np.integer) and not loop:
# array data is an integer type, and the user is not
# requesting the loop hafnian
return haf_int(np.int64(A))
if np.issubdtype(A.dtype, np.integer) and loop:
# array data is an integer type, and the user is
# requesting the loop hafnian. Currently no
# integer function for loop hafnians, have to instead
# convert to float and use haf_real
A = np.float64(A)
return haf_real(
A, loop=loop, recursive=recursive, quad=quad, approx=approx, nsamples=num_samples
) | 27,878 |
def test_aws_lambda_handler_default_environment(mock_ec2paramstore, mock_create_app, mock_awsgi_response):
"""
Tests that the aws lambda handler correctly processes environ variables and sets up the app correctly without environmental variables set
"""
default_environ = {
"config_name": "ebr_board_config",
"vault_config_name": "ebr_board_vault_config",
"vault_creds_name": "ebr_board_vault_creds",
}
mock_ec2paramstore.return_value.get_parameters.return_value = {
"ebr_board_config": "config_data",
"ebr_board_vault_config": "vault_config_data",
"ebr_board_vault_creds": "vault_creds_data",
}
handler("test-event", "test-context")
mock_ec2paramstore.return_value.get_parameters.assert_called_with(list(default_environ.values()), decrypt=True)
mock_create_app.assert_called_with(
config="config_data", vault_config="vault_config_data", vault_creds="vault_creds_data", config_format="yaml"
)
mock_awsgi_response.assert_called_with(mock_create_app.return_value, "test-event", "test-context") | 27,879 |
def filter_out_nones(data):
"""
Filter out any falsey values from data.
"""
return (l for l in data if l) | 27,880 |
def start(args_string):
"""Launch and display a TensorBoard instance as if at the command line.
Args:
args_string: Command-line arguments to TensorBoard, to be
interpreted by `shlex.split`: e.g., "--logdir ./logs --port 0".
Shell metacharacters are not supported: e.g., "--logdir 2>&1" will
point the logdir at the literal directory named "2>&1".
"""
context = _get_context()
try:
import IPython
import IPython.display
except ImportError:
IPython = None
if context == _CONTEXT_NONE:
handle = None
print("Launching TensorBoard...")
else:
handle = IPython.display.display(
IPython.display.Pretty("Launching TensorBoard..."),
display_id=True,
)
def print_or_update(message):
if handle is None:
print(message)
else:
handle.update(IPython.display.Pretty(message))
parsed_args = shlex.split(args_string, comments=True, posix=True)
start_result = manager.start(parsed_args)
if isinstance(start_result, manager.StartLaunched):
_display(
port=start_result.info.port,
print_message=False,
display_handle=handle,
)
elif isinstance(start_result, manager.StartReused):
template = (
"Reusing TensorBoard on port {port} (pid {pid}), started {delta} ago. "
"(Use '!kill {pid}' to kill it.)"
)
message = template.format(
port=start_result.info.port,
pid=start_result.info.pid,
delta=_time_delta_from_info(start_result.info),
)
print_or_update(message)
_display(
port=start_result.info.port,
print_message=False,
display_handle=None,
)
elif isinstance(start_result, manager.StartFailed):
def format_stream(name, value):
if value == "":
return ""
elif value is None:
return "\n<could not read %s>" % name
else:
return "\nContents of %s:\n%s" % (name, value.strip())
message = (
"ERROR: Failed to launch TensorBoard (exited with %d).%s%s"
% (
start_result.exit_code,
format_stream("stderr", start_result.stderr),
format_stream("stdout", start_result.stdout),
)
)
print_or_update(message)
elif isinstance(start_result, manager.StartExecFailed):
the_tensorboard_binary = (
"%r (set by the `TENSORBOARD_BINARY` environment variable)"
% (start_result.explicit_binary,)
if start_result.explicit_binary is not None
else "`tensorboard`"
)
if start_result.os_error.errno == errno.ENOENT:
message = (
"ERROR: Could not find %s. Please ensure that your PATH contains "
"an executable `tensorboard` program, or explicitly specify the path "
"to a TensorBoard binary by setting the `TENSORBOARD_BINARY` "
"environment variable." % (the_tensorboard_binary,)
)
else:
message = "ERROR: Failed to start %s: %s" % (
the_tensorboard_binary,
start_result.os_error,
)
print_or_update(textwrap.fill(message))
elif isinstance(start_result, manager.StartTimedOut):
message = (
"ERROR: Timed out waiting for TensorBoard to start. "
"It may still be running as pid %d." % start_result.pid
)
print_or_update(message)
else:
raise TypeError(
"Unexpected result from `manager.start`: %r.\n"
"This is a TensorBoard bug; please report it." % start_result
) | 27,881 |
def BytesToGb(size):
"""Converts a disk size in bytes to GB."""
if not size:
return None
if size % constants.BYTES_IN_ONE_GB != 0:
raise calliope_exceptions.ToolException(
'Disk size must be a multiple of 1 GB. Did you mean [{0}GB]?'
.format(size // constants.BYTES_IN_ONE_GB + 1))
return size // constants.BYTES_IN_ONE_GB | 27,882 |
def build_report(drivers: dict, desc=False) -> [[str, str, str], ...]:
"""
Creates a race report: [[Driver.name, Driver.team, Driver.time], ...]
Default order of drivers from best time to worst.
"""
sorted_drivers = sort_drivers_dict(drivers, desc)
return [driver.get_stats for driver in sorted_drivers.values()] | 27,883 |
def add_sites_sheet(ws, cols, lnth):
"""
"""
for col in cols:
cell = "{}1".format(col)
ws[cell] = "='Capacity_km2_MNO'!{}".format(cell)
for col in cols[:2]:
for i in range(2, lnth):
cell = "{}{}".format(col, i)
ws[cell] = "='Capacity_km2_MNO'!{}".format(cell)
for col in cols[2:]:
for i in range(2, lnth):
cell = "{}{}".format(col, i)
part1 = "=MIN(IF('Lookups'!$H$3:$H$250>'Data_km2'!{}".format(cell)
part2 = ",'Lookups'!$E$3:$E$250))*Area!{}".format(cell)
ws[cell] = part1 + part2
ws.formula_attributes[cell] = {'t': 'array', 'ref': "{}:{}".format(cell, cell)}
columns = ['C','D','E','F','G','H','I','J','K','L']
ws = format_numbers(ws, columns, (1, 200), 'Comma [0]', 0)
set_border(ws, 'A1:L{}'.format(lnth-1), "thin", "000000")
return ws | 27,884 |
def testBinaryFile(filePath):
"""
Test if a file is in binary format
:param fileWithPath(str): File Path
:return:
"""
file = open(filePath, "rb")
#Read only a couple of lines in the file
binaryText = None
for line in itertools.islice(file, 20):
if b"\x00" in line:
#Return to the beginning of the binary file
file.seek(0)
#Read the file in one step
binaryText = file.read()
break
file.close()
#Return the result
return binaryText | 27,885 |
def collect(mail_domain):
"""
Attempt to connect to each MX hostname for mail_doman and negotiate STARTTLS.
Store the output in a directory with the same name as mail_domain to make
subsequent analysis faster.
"""
print "Checking domain %s" % mail_domain
mkdirp(os.path.join(CERTS_OBSERVED, mail_domain))
answers = dns.resolver.query(mail_domain, 'MX')
for rdata in answers:
mx_host = str(rdata.exchange).rstrip(".")
tls_connect(mx_host, mail_domain) | 27,886 |
def launch_experiment(
script,
run_slot,
affinity_code,
log_dir,
variant,
run_ID,
args,
python_executable=None,
set_egl_device=False,
):
"""Launches one learning run using ``subprocess.Popen()`` to call the
python script. Calls the script as:
``python {script} {slot_affinity_code} {log_dir} {run_ID} {*args}``
If ``affinity_code["all_cpus"]`` is provided, then the call is prepended
with ``tasket -c ..`` and the listed cpus (this is the most sure way to
keep the run limited to these CPU cores). Also saves the `variant` file.
Returns the process handle, which can be monitored.
Use ``set_egl_device=True`` to set an environment variable
``EGL_DEVICE_ID`` equal to the same value as the cuda index for the
algorithm. For example, can use with DMControl environment modified
to look for this environment variable when selecting a GPU for headless
rendering.
"""
slot_affinity_code = prepend_run_slot(run_slot, affinity_code)
affinity = affinity_from_code(slot_affinity_code)
pp = psutil.Process()
availabele_cpus = pp.cpu_affinity()
all_cpus = tuple([availabele_cpus[this_cpu%len(availabele_cpus)] for this_cpu in affinity['all_cpus']])
affinity['all_cpus'] = affinity['master_cpus'] = all_cpus
workers_cpus = tuple([tuple([availabele_cpus[this_cpu%len(availabele_cpus)] for this_cpu in this_worker_cpus]) for this_worker_cpus in affinity['workers_cpus']])
affinity['workers_cpus'] = workers_cpus
call_list = list()
if isinstance(affinity, dict) and affinity.get("all_cpus", False):
cpus = ",".join(str(c) for c in affinity["all_cpus"])
elif isinstance(affinity, list) and affinity[0].get("all_cpus", False):
cpus = ",".join(str(c) for aff in affinity for c in aff["all_cpus"])
else:
cpus = ()
if cpus:
call_list += ["taskset", "-c", cpus] # PyTorch obeys better than just psutil.
py = python_executable if python_executable else sys.executable or "python"
call_list += [py, script, "-a",slot_affinity_code,"-d", log_dir,"-i", str(run_ID)]
call_list += [str(a) for a in args]
save_variant(variant, log_dir)
print("\ncall string:\n", " ".join(call_list))
if set_egl_device and affinity.get("cuda_idx", None) is not None:
egl_device_id = str(affinity["cuda_idx"])
egl_env = os.environ.copy()
egl_env["EGL_DEVICE_ID"] = egl_device_id
print(f"Assigning EGL_DEVICE_ID={egl_device_id}")
p = subprocess.Popen(call_list, env=egl_env)
else:
p = subprocess.Popen(call_list)
return p | 27,887 |
def dup_max_norm(f, K):
"""
Returns maximum norm of a polynomial in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_max_norm(-x**2 + 2*x - 3)
3
"""
if not f:
return K.zero
else:
return max(dup_abs(f, K)) | 27,888 |
def greedy_algorithm(pieces, material_size):
"""Implementation of the First-Fit Greedy Algorithm
Inputs:
pieces - list[] of items to place optimally
material_size - length of Boards to cut from, assumes unlimited supply
Output:
Optimally laid out BoardCollection.contents, which is a list[] of Boards"""
bc = BoardCollection()
bc.append(Board(material_size))
pieces.sort(reverse=True) # sort in ascending order
# we must copy pieces, else our actual list will get modified
for piece in pieces.copy():
piece_added = False # for recording state: did we add this piece to BoardCollection yet?
# if piece fits, add it on that Board, remove it from the list, mark it as such and break out of for loop
for board in bc.contents:
if board.space_remaining >= piece:
board.insert(piece)
pieces.remove(piece)
piece_added = True
break
# if it hasn't been added yet, make a new Board and put it there
if piece_added is False:
bc.append(Board(material_size))
bc.last.insert(piece)
pieces.remove(piece)
return bc.contents | 27,889 |
def start_tv_session(hypes):
"""
Run one evaluation against the full epoch of data.
Parameters
----------
hypes : dict
Hyperparameters
Returns
-------
tuple
(sess, saver, summary_op, summary_writer, threads)
"""
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.merge_all_summaries()
# Create a saver for writing training checkpoints.
if 'keep_checkpoint_every_n_hours' in hypes['solver']:
kc = hypes['solver']['keep_checkpoint_every_n_hours']
else:
kc = 10000.0
saver = tf.train.Saver(max_to_keep=utils.cfg.max_to_keep,
keep_checkpoint_every_n_hours=kc)
# Create a session for running Ops on the Graph.
sess = tf.Session()
# Run the Op to initialize the variables.
init = tf.initialize_all_variables()
sess.run(init)
# Start the queue runners.
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
# Instantiate a SummaryWriter to output summaries and the Graph.
summary_writer = tf.train.SummaryWriter(hypes['dirs']['output_dir'],
graph=sess.graph)
return sess, saver, summary_op, summary_writer, coord, threads | 27,890 |
def factor_size(value, factor):
"""
Factors the given thumbnail size. Understands both absolute dimensions
and percentages.
"""
if type(value) is int:
size = value * factor
return str(size) if size else ''
if value[-1] == '%':
value = int(value[:-1])
return '{0}%'.format(value * factor)
size = int(value) * factor
return str(size) if size else '' | 27,891 |
def categoryProfile(request, pk):
"""
Displays the profile of a :class:`gestion.models.Category`.
pk
The primary key of the :class:`gestion.models.Category` to display profile.
"""
category = get_object_or_404(Category, pk=pk)
return render(request, "gestion/category_profile.html", {"category": category}) | 27,892 |
def test_tokens_mysql():
""" testing for mysql specific parsing """
for testname in sorted(glob.glob('../tests/test-tokens_mysql-*.txt')):
testname = os.path.basename(testname)
yield run_tokens_mysql, testname | 27,893 |
def eoms(_x, t, _params):
"""Rigidy body equations of motion.
_x is an array/list in the following order:
q1: Yaw q2: Lean |-(Euler 3-1-2 angles used to orient A
q3: Pitch /
q4: N[1] displacement of mass center.
q5: N[2] displacement of mass center.
q6: N[3] displacement of mass center.
u1: A[1] measure number of angular velocity
u2: A[2] measure number of angular velocity
u3: A[3] measure number of angular velocity
u4: N[1] velocity of mass center.
u5: N[2] velocity of mass center.
u6: N[3] velocity of mass center.
_params is an array/list in the following order:
m: Mass of first pendulum point mass.
g: Gravitational constant.
I11: Principal moment of inertia about A[1]
I22: Principal moment of inertia about A[2]
I33: Principal moment of inertia about A[3]
"""
# Unpack function arguments
q1, q2, q3, q4, q5, q6, u1, u2, u3, u4, u5, u6 = _x
# Unpack function parameters
m, g, I11, I22, I33 = _params
# Trigonometric functions
c2 = cos(q2)
c3 = cos(q3)
s3 = sin(q3)
t2 = tan(q2)
# Calculate return values
q1d = c3*u3/c2 - s3*u1/c2
q2d = c3*u1 + s3*u3
q3d = s3*t2*u1 - c3*t2*u3 + u2
q4d = u4
q5d = u5
q6d = u6
u1d = (I22 - I33)*u2*u3/I11
u2d = (I33 - I11)*u1*u3/I22
u3d = -(I22 - I11)*u1*u2/I33
u4d = 0
u5d = 0
u6d = g
# Return calculated values
return [q1d, q2d, q3d, q4d, q5d, q6d, u1d, u2d, u3d, u4d, u5d, u6d] | 27,894 |
def memoize_with_hashable_args(func):
"""Decorator for fast caching of functions which have hashable args.
Note that it will convert np.NaN to None for caching to avoid this common
case causing a cache miss.
"""
_cached_results_ = {}
hash_override = getattr(func, "__hash_override__", None)
if hash_override is None:
hash_override = get_hash(func)
@wraps(func)
def memoized(*args):
try:
lookup_args = tuple(x if pd.notnull(x) else None for x in args)
res = _cached_results_[lookup_args]
except KeyError:
res = func(*args)
_cached_results_[lookup_args] = res
return res
memoized._cached_results_ = _cached_results_ # pylint: disable=protected-access
memoized.__hash_override__ = hash_override
return memoized | 27,895 |
def _download_from_s3(fname, bucket, key, overwrite=False, anon=True):
"""Download object from S3 to local file
Parameters
----------
fname : str
File path to which to download the object
bucket : str
S3 bucket name
key : str
S3 key for the object to download
overwrite : bool
If True, overwrite file if it already exists.
If False, skip download and return. Default: False
anon : bool
Whether to use anonymous connection (public buckets only).
If False, uses the key/secret given, or boto’s credential
resolver (client_kwargs, environment, variables, config files,
EC2 IAM server, in that order). Default: True
"""
# Create the directory and file if necessary
fs = s3fs.S3FileSystem(anon=anon)
if overwrite or not op.exists(fname):
Path(op.dirname(fname)).mkdir(parents=True, exist_ok=True)
fs.get("/".join([bucket, key]), fname) | 27,896 |
def restore_cursor() -> None:
"""Restore cursor position as saved by `save_cursor`."""
terminal.write("\x1b[u") | 27,897 |
def _fill_feature_values(data_values, feature_names, languages):
"""Adds feature values to languages dictionary."""
logging.info("Filling feature values for languages ...")
for value_id in range(len(data_values)):
values = data_values[value_id]
feature_name = values[_PARAM_ID]
cur_value_code = feature_name + "-" + str(values["Value"])
if cur_value_code != values["Code_ID"]:
raise ValueError("Invalid value code: %s" % cur_value_code)
val = feature_names[feature_name]["Codes"].index(cur_value_code) + 1
if val != values["Value"]:
raise ValueError("Invalid value: %s" % val)
languages[values["Language_ID"]][feature_name] = val | 27,898 |
def create_nem_xml(
emane_model: "EmaneModel",
config: Dict[str, str],
nem_file: str,
transport_definition: str,
mac_definition: str,
phy_definition: str,
server: DistributedServer,
) -> None:
"""
Create the nem xml document.
:param emane_model: emane model to create xml
:param config: all current configuration values
:param nem_file: nem file path to write
:param transport_definition: transport file definition path
:param mac_definition: mac file definition path
:param phy_definition: phy file definition path
:param server: remote server node
will run on, default is None for localhost
:return: nothing
"""
nem_element = etree.Element("nem", name=f"{emane_model.name} NEM")
if is_external(config):
nem_element.set("type", "unstructured")
else:
etree.SubElement(nem_element, "transport", definition=transport_definition)
etree.SubElement(nem_element, "mac", definition=mac_definition)
etree.SubElement(nem_element, "phy", definition=phy_definition)
if server is not None:
create_file(nem_element, "nem", nem_file, server)
else:
create_file(nem_element, "nem", nem_file)
emane_model.session.distributed.execute(
lambda x: create_file(nem_element, "nem", nem_file, x)
) | 27,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.