content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def make_non_pad_mask(lengths, xs=None, length_dim=-1):
"""Make mask tensor containing indices of non-padded part.
Args:
lengths (LongTensor or List): Batch of lengths (B,).
xs (Tensor, optional): The reference tensor. If set, masks will be the same shape as this tensor.
length_dim (int, optional): Dimension indicator of the above tensor. See the example.
Returns:
ByteTensor: mask tensor containing indices of padded part.
Examples:
With only lengths.
>>> lengths = [5, 3, 2]
>>> make_non_pad_mask(lengths)
masks = [[1, 1, 1, 1 ,1],
[1, 1, 1, 0, 0],
[1, 1, 0, 0, 0]]
With the reference tensor.
>>> xs = torch.zeros((3, 2, 4))
>>> make_non_pad_mask(lengths, xs)
tensor([[[1, 1, 1, 1],
[1, 1, 1, 1]],
[[1, 1, 1, 0],
[1, 1, 1, 0]],
[[1, 1, 0, 0],
[1, 1, 0, 0]]], dtype=torch.uint8)
>>> xs = torch.zeros((3, 2, 6))
>>> make_non_pad_mask(lengths, xs)
tensor([[[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0]],
[[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0]],
[[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0]]], dtype=torch.uint8)
With the reference tensor and dimension indicator.
>>> xs = torch.zeros((3, 6, 6))
>>> make_non_pad_mask(lengths, xs, 1)
tensor([[[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0]],
[[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]],
[[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]]], dtype=torch.uint8)
>>> make_non_pad_mask(lengths, xs, 2)
tensor([[[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0]],
[[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0]],
[[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0]]], dtype=torch.uint8)
"""
return ~make_pad_mask(lengths, xs, length_dim) | 45c60ed4119958448960db38702e7a31724bce77 | 3,634,300 |
import logging
def get_time_cols(X: pd.DataFrame, labels: bool = False) -> pd.Series:
"""Get time columns."""
X = pd.DataFrame(X)
logger = logging.getLogger(__name__)
is_time = X.dtypes.apply(lambda x: issubclass(x.type, np.datetime64))
n_features = np.sum(is_time)
logger.info("The number of time features is {}.".format(n_features))
if labels:
return X.columns[is_time]
return is_time | c8d350a9ecd5c89fe9d44d31feef5851bd36639e | 3,634,301 |
import json
from sys import version
import time
def load(filename):
"""Loads result data from the given json file.
Args:
filename: The name of the input file.
"""
with open(filename, 'r') as fp:
data = json.load(fp)
log.debug(f'Read json from {filename}', data)
if data['version'] == version:
d = data['runinfo']
runinfo_data = RunInfo(name=d['name'],
version=d['version'],
datetime=time.from_timestr(d['datetime']),
precision=d['precision'],
backend=d['backend'],
grid=d['grid'],
compiler=d['compiler'],
hostname=d['hostname'],
clustername=d['clustername'])
elif data['version'] == 0.4:
runtime_data = data['runtime']
config_data = data['config']
runinfo_data = RunInfo(name=runtime_data['name'],
version=runtime_data['version'],
datetime=time.from_timestr(
runtime_data['datetime']),
precision=runtime_data['precision'],
backend=runtime_data['backend'],
grid=runtime_data['grid'],
compiler=runtime_data['compiler'],
hostname=config_data['hostname'],
clustername=config_data['clustername'])
elif data['version'] != version:
raise ValueError(f'Unknown result file version "{data["version"]}"')
times_data = [Time(stencil=d['stencil'], measurements=d['measurements'])
for d in data['times']]
result = Result(runinfo=runinfo_data,
times=times_data,
domain=data['domain'],
datetime=time.from_timestr(data['datetime']),
version=data['version'])
log.info(f'Successfully loaded result from {filename}')
return result | ad2c04b01e652f26f7ac0be2c7bff5d18024ecdb | 3,634,302 |
def default_browser():
"""
Return the name of the default Browser for this system.
"""
return 'firefox' | a5df3959983bcc11fb59b0aea44a0e6ed42cc579 | 3,634,303 |
def dmp_ground(c, u):
"""
Return a multivariate constant.
Examples
========
>>> from sympy.polys.densebasic import dmp_ground
>>> dmp_ground(3, 5)
[[[[[[3]]]]]]
>>> dmp_ground(1, -1)
1
"""
if not c:
return dmp_zero(u)
for i in range(u + 1):
c = [c]
return c | de3a5743aa4ded69ee0df6aa6d4664919264ccbd | 3,634,304 |
import torch
def bkb(gp_model, inducing_points, q_bar=1):
"""Update the GP model using BKB algorithm.
Parameters
----------
gp_model: ExactGP
model to update
inducing_points: torch.Tensor
Tensor of dimension [N x d_x]
q_bar: float
float with algorithm parameter.
"""
gp_model.eval()
# Scaled predictive variance of arms under current model.
p = q_bar * gp_model(inducing_points).variance / (gp_model.likelihood.noise)
q = Bernoulli(probs=p.clamp_(0, 1)).sample()
idx = torch.where(q == 1)[0]
if len(idx) == 0: # the GP has to have at least one point.
idx = [0]
return inducing_points[idx] | 7ac7e4f273dcd3014868bab802da162ca4e01dec | 3,634,305 |
import shlex
import tempfile
import io
import os
import sys
import subprocess
import time
def run_duplicate_streams(cmd, timeout=_default_timeout()):
"""
<Purpose>
Provide a function that executes a command in a subprocess and, upon
termination, returns its exit code and the contents of what was printed to
its standard streams.
* Might behave unexpectedly with interactive commands.
* Might not duplicate output in real time, if the command buffers it (see
e.g. `print("foo")` vs. `print("foo", flush=True)` in Python 3).
<Arguments>
cmd:
The command and its arguments. (list of str, or str)
Splits a string specifying a command and its argument into a list
of substrings, if necessary.
timeout: (default see settings.SUBPROCESS_TIMEOUT)
If the timeout expires, the child process will be killed and waited
for and then subprocess.TimeoutExpired will be raised.
<Exceptions>
securesystemslib.exceptions.FormatError:
If the `cmd` is a list and does not match
securesystemslib.formats.LIST_OF_ANY_STRING_SCHEMA.
OSError:
If the given command is not present or non-executable.
subprocess.TimeoutExpired:
If the process does not terminate after timeout seconds. Default
is `settings.SUBPROCESS_TIMEOUT`
<Side Effects>
The side effects of executing the given command in this environment.
<Returns>
A tuple of command's exit code, standard output and standard error
contents.
"""
if isinstance(cmd, str):
cmd = shlex.split(cmd)
else:
formats.LIST_OF_ANY_STRING_SCHEMA.check_match(cmd)
# Use temporary files as targets for child process standard stream redirects
# They seem to work better (i.e. do not hang) than pipes, when using
# interactive commands like `vi`.
stdout_fd, stdout_name = tempfile.mkstemp()
stderr_fd, stderr_name = tempfile.mkstemp()
try:
with io.open(stdout_name, "r") as stdout_reader, \
os.fdopen(stdout_fd, "w") as stdout_writer, \
io.open(stderr_name, "r") as stderr_reader, \
os.fdopen(stderr_fd, "w") as stderr_writer:
# Store stream results in mutable dict to update it inside nested helper
_std = {"out": "", "err": ""}
def _duplicate_streams():
"""Helper to read from child process standard streams, write their
contents to parent process standard streams, and build up return values
for outer function.
"""
# Read until EOF but at most `io.DEFAULT_BUFFER_SIZE` bytes per call.
# Reading and writing in reasonably sized chunks prevents us from
# subverting a timeout, due to being busy for too long or indefinitely.
stdout_part = stdout_reader.read(io.DEFAULT_BUFFER_SIZE)
stderr_part = stderr_reader.read(io.DEFAULT_BUFFER_SIZE)
sys.stdout.write(stdout_part)
sys.stderr.write(stderr_part)
sys.stdout.flush()
sys.stderr.flush()
_std["out"] += stdout_part
_std["err"] += stderr_part
# Start child process, writing its standard streams to temporary files
proc = subprocess.Popen(cmd, stdout=stdout_writer,
stderr=stderr_writer, universal_newlines=True)
proc_start_time = time.time()
# Duplicate streams until the process exits (or times out)
while proc.poll() is None:
# Time out as Python's `subprocess` would do it
if (timeout is not None and
time.time() > proc_start_time + timeout):
proc.kill()
proc.wait()
raise subprocess.TimeoutExpired(cmd, timeout)
_duplicate_streams()
# Read/write once more to grab everything that the process wrote between
# our last read in the loop and exiting, i.e. breaking the loop.
_duplicate_streams()
finally:
# The work is done or was interrupted, the temp files can be removed
os.remove(stdout_name)
os.remove(stderr_name)
# Return process exit code and captured streams
return proc.poll(), _std["out"], _std["err"] | f00454770145543e342d027f33ab36f07ce6f358 | 3,634,306 |
def splinter_remote_url(request):
"""Remote webdriver url.
:return: URL of remote webdriver.
"""
return request.config.option.splinter_remote_url | 17bf9bf3ebd7296a2305fe9edeb7168fbca7db10 | 3,634,307 |
import shlex
import traceback
def run(*args, **kwargs):
"""Run the external command. See ``subprocess.check_output``."""
# normalize args
if len(args) == 1:
if isinstance(args[0], str):
args = shlex.split(args[0], posix=IS_POSIX)
else:
args = args[0]
if args[0] in ("python", "putup", "pip", "tox", "pytest", "pre-commit"):
raise SystemError("Please specify an executable with explicit path")
opts = dict(stderr=STDOUT, universal_newlines=True)
opts.update(kwargs)
try:
return check_output(args, **opts)
except CalledProcessError as ex:
print("Error while running command:")
print(args)
print(opts)
traceback.print_exc()
msg = "******************** Terminal ($? = {}) ********************\n{}"
print(msg.format(ex.returncode, ex.output))
raise | d546ec949ec97b418f43319a6da90ef019bba52c | 3,634,308 |
def tfresize_image(image, size=(cfg.IMG_W, cfg.IMG_H)):
""" Resize image. """
return tf.image.resize(image, size) | e18fbe2b2ad467e459a0615e088e52279d54d8fc | 3,634,309 |
def history():
"""Show history of transactions"""
# Get information about stocks that the transactions
transactions = db.execute(
"SELECT symbol, shares, price_per_share, price, time FROM transactions WHERE user_id = ?",
session["user_id"],
)
return render_template("history.html", transactions=transactions) | 53aee51f5a77e6f00b55915b0d1c8d2f611bf9d5 | 3,634,310 |
def serve_static_file(request, filename, root=MEDIA_ROOT, force_content_type=None):
"""
Basic handler for serving up static media files.
Accepts an optional ``root`` (filepath string, defaults to ``MEDIA_ROOT``) parameter.
Accepts an optional ``force_content_type`` (string, guesses if ``None``) parameter.
"""
file_contents = static_file(filename, root)
if force_content_type is None:
ct = content_type(filename)
else:
ct = force_content_type
return Response(file_contents, content_type=ct) | b7fcb058e381ba8045ea4a14122510ddb2af3ca7 | 3,634,311 |
import numpy
def crystal_fh2(input_dictionary,phot_in,theta=None,forceratio=0):
"""
:param input_dictionary: as resulting from bragg_calc()
:param phot_in: photon energy in eV
:param theta: incident angle (half of scattering angle) in rad
:return: a dictionary with structure factor
"""
# outfil = input_dictionary["outfil"]
# fract = input_dictionary["fract"]
rn = input_dictionary["rn"]
dspacing = numpy.array(input_dictionary["dspacing"])
nbatom = numpy.array(input_dictionary["nbatom"])
atnum = numpy.array(input_dictionary["atnum"])
temper = numpy.array(input_dictionary["temper"])
G_0 = numpy.array(input_dictionary["G_0"])
G = numpy.array(input_dictionary["G"])
G_BAR = numpy.array(input_dictionary["G_BAR"])
f0coeff = numpy.array(input_dictionary["f0coeff"])
npoint = numpy.array(input_dictionary["npoint"])
energy = numpy.array(input_dictionary["energy"])
fp = numpy.array(input_dictionary["f1"])
fpp = numpy.array(input_dictionary["f2"])
#X.J. Yu, slsyxj@nus.edu.sg
ZCOL = numpy.array(input_dictionary["zcol"])
FCOL = numpy.array(input_dictionary["fraction"])
UCOL = numpy.array(input_dictionary["unique_AtomicName"])
LCOL = numpy.array(input_dictionary["list_AtomicName"])
#---------------------------------------------------------
phot_in = numpy.array(phot_in,dtype=float).reshape(-1)
toangstroms = codata.h * codata.c / codata.e * 1e10
itheta = numpy.zeros_like(phot_in)
for i,phot in enumerate(phot_in):
if theta is None:
itheta[i] = numpy.arcsin(toangstroms*1e-8/phot/2/dspacing)
else:
itheta[i] = theta
# print("energy= %g eV, theta = %15.13g deg"%(phot,itheta[i]*180/numpy.pi))
if phot < energy[0] or phot > energy[-1]:
raise Exception("Photon energy %g eV outside of valid limits [%g,%g]"%(phot,energy[0],energy[-1]))
if forceratio == 0:
ratio = numpy.sin(itheta[i]) / (toangstroms / phot)
else:
ratio = 1 / (2 * dspacing * 1e8)
# print("Ratio: ",ratio)
F0 = numpy.zeros(nbatom)
#X.J. Yu, slsyxj@nus.edu.sg
F000 = numpy.zeros(nbatom)
for j in range(nbatom):
icentral = int(f0coeff.shape[1]/2)
F0[j] = f0coeff[j,icentral]
F000[j] = F0[j] #X.J. Yu, slsyxj@nus.edu.sg
for i in range(icentral):
F0[j] += f0coeff[j,i] * numpy.exp(-1.0*f0coeff[j,i+icentral+1]*ratio**2)
F000[j] += f0coeff[j,i] #actual number of electrons carried by each atom, X.J. Yu, slsyxj@nus.edu.sg
# print("F0: ",F0,xraylib.FF_Rayl(int(atnum[j]),ratio))
# ;C
# ;C Interpolate for the atomic scattering factor.
# ;C
for j,ienergy in enumerate(energy):
if ienergy > phot:
break
nener = j - 1
F1 = numpy.zeros(nbatom,dtype=float)
F2 = numpy.zeros(nbatom,dtype=float)
F = numpy.zeros(nbatom,dtype=complex)
for j in range(nbatom):
F1[j] = fp[j,nener] + (fp[j,nener+1] - fp[j,nener]) * \
(phot - energy[nener]) / (energy[nener+1] - energy[nener])
F2[j] = fpp[j,nener] + (fpp[j,nener+1] - fpp[j,nener]) * \
(phot - energy[nener]) / (energy[nener+1] - energy[nener])
# print("F1,F2",F1,F2)
r_lam0 = toangstroms * 1e-8 / phot
for j in range(nbatom):
F[j] = F0[j] + F1[j] + 1j * F2[j]
# print("F",F)
F_0 = 0.0 + 0.0j
FH = 0.0 + 0.0j
FH_BAR = 0.0 + 0.0j
FHr = 0.0 + 0.0j
FHi = 0.0 + 0.0j
FH_BARr = 0.0 + 0.0j
FH_BARi = 0.0 + 0.0j
CI = 0.0 + 1.0j
TEMPER_AVE = 1.0
#X.J. Yu, slsyxj@nus.edu.sg
#Occupancy for FH already included in G in Bragg_Calc function
BOOL_UCOL = UCOL[0]==''
for j in range(nbatom):
FH += G[j] * F[j] * 1.0
FHr += G[j] * (F0[j] + F1[j])* 1.0
FHi += G[j] * F2[j] * 1.0
#charged atom, the number of electrons not equal to atum anymore,while
# it is euqal to F000, and notably, fractial occupancy need consideration here
# occupancy till now, only consider in calculation of G, and G_BAR in bragg_calc
#comment out: X.J. Yu, slsyxj@nus.edu.sg
#
# F_0 += G_0[j] * ( atnum[j] + F1[j] + 1j * F2[j] ) * 1.0
#
FN = F000[j] + F1[j] + CI * F2[j]
if BOOL_UCOL: #normal crystal
F_0 += FN*numpy.sum( numpy.where(ZCOL==atnum[j],FCOL,0.0))
else: #complicate compound crystals
F_0 += FN*numpy.sum( numpy.where(LCOL==UCOL[j],FCOL,0.0))
TEMPER_AVE *= (temper[j])**(G_0[j]/(G_0.sum()))
FH_BAR += (G_BAR[j] * F[j] * 1.0)
FH_BARr += (G_BAR[j] * (F0[j] + F1[j]) *1.0)
FH_BARi += (G_BAR[j] * F2[j] * 1.0)
# print("TEMPER_AVE: ",TEMPER_AVE)
# ;C
# ;C multiply by the average temperature factor
# ;C
FH *= TEMPER_AVE
FHr *= TEMPER_AVE
FHi *= TEMPER_AVE
FH_BAR *= TEMPER_AVE
FH_BARr *= TEMPER_AVE
FH_BARi *= TEMPER_AVE
STRUCT = numpy.sqrt(FH * FH_BAR)
# ;C
# ;C PSI_CONJ = F*( note: PSI_HBAR is PSI at -H position and is
# ;C proportional to fh_bar but PSI_CONJ is complex conjugate os PSI_H)
# ;C
psi_over_f = rn * r_lam0**2 / numpy.pi
psi_h = rn * r_lam0**2 / numpy.pi * FH
psi_hr = rn * r_lam0**2 / numpy.pi * FHr
psi_hi = rn * r_lam0**2 / numpy.pi * FHi
psi_hbar = rn * r_lam0**2 / numpy.pi * FH_BAR
psi_hbarr = rn * r_lam0**2 / numpy.pi * FH_BARr
psi_hbari = rn * r_lam0**2 / numpy.pi * FH_BARi
psi_0 = rn * r_lam0**2 / numpy.pi * F_0
psi_conj = rn * r_lam0**2 / numpy.pi * FH.conjugate()
# ;
# ; Darwin width
# ;
# print(rn,r_lam0,STRUCT,itheta)
ssvar = rn * (r_lam0**2) * STRUCT / numpy.pi / numpy.sin(2.0*itheta)
spvar = ssvar * numpy.abs((numpy.cos(2.0*itheta)))
ssr = ssvar.real
spr = spvar.real
# ;C
# ;C computes refractive index.
# ;C ([3.171] of Zachariasen's book)
# ;C
REFRAC = (1.0+0j) - r_lam0**2 * rn * F_0 / 2/ numpy.pi
DELTA_REF = 1.0 - REFRAC.real
ABSORP = 4.0 * numpy.pi * (-REFRAC.imag) / r_lam0
THETA_B =r_lam0/(1-(DELTA_REF/numpy.sin(itheta)**2))/2.0/dspacing
THETA_B = numpy.arcsin(THETA_B)
txt = ""
txt += '\n******************************************************'
txt += '\n at energy = '+repr(phot)+' eV'
txt += '\n = '+repr(r_lam0*1e8)+' Angstroms'
txt += '\n and at angle = '+repr(itheta*180.0/numpy.pi)+' degrees'
txt += '\n = '+repr(itheta)+' rads'
txt += '\n******************************************************'
for j in range(nbatom):
txt += '\n '
txt += '\nFor atom '+repr(j+1)+':'
txt += '\n fo + fp+ i fpp = '
txt += '\n '+repr(F0[j])+' + '+ repr(F1[j].real)+' + i'+ repr(F2[j])+" ="
txt += '\n '+repr(F0[j] + F1[j] + 1j * F2[j])
txt += '\n Z = '+repr(atnum[j])
txt += '\n Temperature factor = '+repr(temper[j])
txt += '\n '
txt += '\n Structure factor F(0,0,0) = '+repr(F_0)
txt += '\n Structure factor FH = ' +repr(FH)
txt += '\n Structure factor FH_BAR = ' +repr(FH_BAR)
txt += '\n Structure factor F(h,k,l) = '+repr(STRUCT)
txt += '\n '
txt += '\n Psi_0 = ' +repr(psi_0)
txt += '\n Psi_H = ' +repr(psi_h)
txt += '\n Psi_HBar = '+repr(psi_hbar)
txt += '\n '
txt += '\n Psi_H(real) Real and Imaginary parts = ' + repr(psi_hr)
txt += '\n Psi_H(real) Modulus = ' + repr(numpy.abs(psi_hr))
txt += '\n Psi_H(imag) Real and Imaginary parts = ' + repr(psi_hi)
txt += '\n Psi_H(imag) Modulus = ' + repr(abs(psi_hi))
txt += '\n Psi_HBar(real) Real and Imaginary parts = '+ repr(psi_hbarr)
txt += '\n Psi_HBar(real) Modulus = ' + repr(abs(psi_hbarr))
txt += '\n Psi_HBar(imag) Real and Imaginary parts = '+ repr(psi_hbari)
txt += '\n Psi_HBar(imag) Modulus = ' + repr(abs(psi_hbari))
txt += '\n '
txt += '\n Psi/F factor = ' + repr(psi_over_f)
txt += '\n '
txt += '\n Average Temperature factor = ' + repr(TEMPER_AVE)
txt += '\n Refraction index = 1 - delta - i*beta'
txt += '\n delta = ' + repr(DELTA_REF)
txt += '\n beta = ' + repr(1.0e0*REFRAC.imag)
txt += '\n Absorption coeff = ' + repr(ABSORP)+' cm^-1'
txt += '\n '
txt += '\n e^2/(mc^2)/V = ' + repr(rn)+' cm^-2'
txt += '\n d-spacing = ' + repr(dspacing*1.0e8)+' Angstroms'
txt += '\n SIN(theta)/Lambda = ' + repr(ratio)
txt += '\n '
txt += '\n Darwin width for symmetric s-pol [microrad] = ' + repr(2.0e6*ssr)
txt += '\n Darwin width for symmetric p-pol [microrad] = ' + repr(2.0e6*spr)
return {"PHOT":phot, "WAVELENGTH":r_lam0*1e-2 ,"THETA":itheta,"THETAcor":THETA_B, "F_0":F_0, "FH":FH, "FH_BAR":FH_BAR,
"STRUCT":STRUCT, "psi_0":psi_0, "psi_h":psi_h, "psi_hbar":psi_hbar,
"DELTA_REF":DELTA_REF, "REFRAC":REFRAC, "ABSORP":ABSORP, "RATIO":ratio,
"ssr":ssr, "spr":spr, "psi_over_f":psi_over_f, "info":txt} | 438f1491b4458358f58b9212a094dcc2f499369e | 3,634,312 |
import re
def split_list_item_by_taking_words_in_parentheses(item):
"""This function goes through items in a list and creates a new item with only the words inside the parentheses."""
species_pop_name = item.split('(')[0].split(',')
if len(species_pop_name) > 1:
species_pop_name = species_pop_name[1][1:] + species_pop_name[0]
else:
species_pop_name = species_pop_name[0][:-1]
binomial_nomenclature = re.findall(r'\([^()]*\)', item)
binomial_nomenclature = [x.strip('()') for x in binomial_nomenclature]
return species_pop_name, binomial_nomenclature[0] | 2d8543611007e799d089c77b79ae7263cba36a30 | 3,634,313 |
import re
def _set_arxiv_info(paper):
"""
Retrieve paper information from the html.
:param paper: SubmittedPaper object to scrape html information.
:type paper: SubmittedPaper
:return: SubmittedPaper object with html information retrieved.
:rtype: SubmittedPaper
"""
# Remove all the muck that screws up the BeautifulSoup parser
# Will fail on PDF submission, so take care of that exception first
try:
fixed_html =re.sub(re.compile("<!--.*?-->", re.DOTALL), "", paper.html)
soup = bs(fixed_html, 'lxml')
paper.errors = '0'
except:
return None
# Grab the Title, Date, and Authors, and all the other stuff
try:
paper.title = soup.find('h1',
{'class': 'title mathjax'}).contents[1].string
except:
paper.errors = '1'
paper.title = 'Error Grabbing Title'
#print paper.title + "\n"
try:
authors = soup.find('div', {'class': 'authors'})
authors = authors.findAll('a')
paper.author_number = len(authors)
# Convert the authors to strings only and replace the relative links
author_list = []
for i in authors:
author_list.append(str(i).replace('/find/',
'http://arxiv.org/find/'))
paper.author = ', '.join(author_list[0:4])
# Kill all affiliation marks since some have them and some don't; done
# in two steps to take care of nested parens
paper.author = re.sub(r'\([^()]*\)', '', paper.author)
paper.author = re.sub(r'\([^()]*\)', '', paper.author)
except:
paper.errors = '1'
paper.author = 'Error Grabbing Authors'
# print paper.errors + paper.author + "\n"
try:
date = soup.find('div', {'class':'submission-history'})
date = date.findAll(text=True) # Remove HTML tags
date = list(dict.fromkeys(date)) # ignoring multiple \n's
date = date[-1].split() # Most recent revision date will be the last
paper.date = date[1] + ' ' + date[2] + ' ' + date[3]
except:
paper.errors = '1'
paper.date = 'Error Grabbing Date'
# print paper.errors + paper.date + "\n"
try:
paper.abstract = soup.find('blockquote',
{'class': 'abstract mathjax'}).contents[2]
except:
paper.errors = '1'
paper.abstract = 'Error Grabbing Abstract'
# print paper.errors + paper.abstract + "\n"
try:
sources = soup.find('div', {'class': 'full-text'})
sources = sources.findAll('a')
source_list=[]
for source in sources:
source = str(source)
source = source.replace('/ps','http://arxiv.org/ps')
source = source.replace('/pdf','http://arxiv.org/pdf')
source = source.replace('/format','http://arxiv.org/format')
source = source.replace('PostScript','PS')
source = source.replace('Other formats','Other')
if 'license' in source:
continue
source_list.append(source)
paper.sources = ' '.join(source_list)
except:
paper.errors = '1'
paper.sources = ''
# print paper.errors + paper.sources + "\n"
try:
paper.subject = soup.find('span', {'class': 'primary-subject'}).string
except:
paper.errors = '1'
paper.subject = 'Error Grabbing Subject'
# print paper.errors + paper.subject + "\n"
#try:
#paper.comments = soup.find('td', {'class':'tablecell comments'}).string
#except:
#paper.errors = "1"
#paper.comments = "Error Grabbing Comments"
return paper | b37103d1ae3aa0175c49884ed2133b28ab69c327 | 3,634,314 |
def fetch_accountTransactions(accountNum):
"""
Function to return all the transaction related to an account number provided as a parameter. This
function assumes that the user has been previously authenticated and that the request is for an
account they own.
Args:
accountNum (int): User's account number
Returns:
transactions_modified (list): A list containing (dict) elements with info about each account,
including wire transfers
"""
## Search for any transactions that has originated from or destined to that account. If
## no transactions are found, simply return an empty string
try:
transactions = Transaction.objects.filter(Q(transaction_origin=Account.objects.get(account_number=accountNum)) | Q(transaction_destination=Account.objects.get(account_number=accountNum)))
except Transaction.DoesNotExist:
transactions = []
## For each transaction found, add a new dict to transactions_modified to display information
## to the user. M2M fields transaction_origin and transaction_destination hasve to be converted
## to string to show user
transactions_modified = []
for i in range(len(transactions)):
transactions_modified.append({})
transactions_modified[i]["transaction_id"] = transactions[i].transaction_id
transactions_modified[i]["transaction_amount"] = "${:,.2f}".format(transactions[i].transaction_amount)
transactions_modified[i]["transaction_time"] = transactions[i].transaction_time
transactions_modified[i]["transaction_name"] = transactions[i].transaction_name
transactions_modified[i]["transaction_origin"] = list(transactions[i].transaction_origin.all())[0].account_number
transactions_modified[i]["transaction_destination"] = list(transactions[i].transaction_destination.all())[0].account_number
transactions_modified[i]["transaction_origin_balance"] = "${:,.2f}".format(transactions[i].transaction_origin_balance)
transactions_modified[i]["transaction_destination_balance"] = "${:,.2f}".format(transactions[i].transaction_destination_balance)
## Search for any wire transactions for that account
try:
wire_transactions = WireTransaction.objects.filter(transaction_origin=Account.objects.get(account_number=accountNum))
except WireTransaction.DoesNotExist:
wire_transactions = []
## Repeat same process for adding dict element to a list
wire_transactions_modified = []
for i in range(len(wire_transactions)):
wire_transactions_modified.append({})
wire_transactions_modified[i]["transaction_id"] = wire_transactions[i].transaction_id
wire_transactions_modified[i]["transaction_amount"] = "${:,.2f}".format(wire_transactions[i].transaction_amount)
wire_transactions_modified[i]["transaction_time"] = wire_transactions[i].transaction_time
wire_transactions_modified[i]["transaction_name"] = wire_transactions[i].transaction_name
wire_transactions_modified[i]["transaction_origin"] = list(wire_transactions[i].transaction_origin.all())[0].account_number
wire_transactions_modified[i]["transaction_origin_balance"] = "${:,.2f}".format(wire_transactions[i].transaction_origin_balance)
## Add any wire transactions found to the original transactions_modified list
transactions_modified.extend(wire_transactions_modified)
## Sort the transactions by the time it was processed, starting from latest first
transactions_modified.sort(key = lambda x: (x["transaction_time"]), reverse=True)
return transactions_modified | dfce7891a38817775fb6aee198e39bf9f985cf47 | 3,634,315 |
import os
def resolve_all(dirs, *paths):
"""
Returns a list of paths created by joining `paths` onto each dir in `dirs` using `os.path.join` and discarding all
join results that do not exist.
:param dirs: A list of dir strings to resolve against
:param paths: Path components to join onto each dir in `dirs` with `os.path.join`
:return: A list of paths created by joining `paths` onto each dir in `dirs` using `os.path.join` and discarding all
join results that do not exist.
"""
return [d for d in join_all(dirs, *paths) if os.path.exists(d)] | 7a40dba8b81e3c1a240fd8d8a6f30124e2325bc0 | 3,634,316 |
def main(items=None, printmd=None, printcal=None,
found=False, filename_template='${collection}/${date}/${id}',
save=None, download=None, requester_pays=False, headers=None, **kwargs):
""" Main function for performing a search """
if items is None:
## if there are no items then perform a search
search = Search.search(headers=headers, **kwargs)
## Commenting out found logic until functions correctly.
if found:
num = search.found(headers=headers)
print('%s items found' % num)
return num
items = search.items(headers=headers)
else:
# otherwise, load a search from a file
items = ItemCollection.open(items)
print('%s items found' % len(items))
# print metadata
if printmd is not None:
print(items.summary(printmd))
# print calendar
if printcal:
print(items.calendar(printcal))
# save all metadata in JSON file
if save is not None:
items.save(filename=save)
# download files given `download` keys
if download is not None:
if 'ALL' in download:
# get complete set of assets
download = set([k for i in items for k in i.assets])
for key in download:
items.download(key=key, filename_template=filename_template, requester_pays=requester_pays)
return items | 62b562a9c450f966a6ed6fc3f9a8e37865d900f3 | 3,634,317 |
def get_utility_flow(heat_utilities, agent):
"""Return the total utility duty of heat utilities for given agent in GJ/hr"""
if isinstance(agent, str): agent = HeatUtility.get_agent(agent)
return sum([i.flow * i.agent.MW for i in heat_utilities if i.agent is agent]) / 1e3 | c4fa194d321c4db2bd9b423bc9a1c76a7274f212 | 3,634,318 |
from unittest.mock import patch
async def test_flux_with_custom_start_stop_times(hass, legacy_patchable_time):
"""Test the flux with custom start and stop times."""
platform = getattr(hass.components, "test.light")
platform.init()
assert await async_setup_component(
hass, light.DOMAIN, {light.DOMAIN: {CONF_PLATFORM: "test"}}
)
await hass.async_block_till_done()
ent1 = platform.ENTITIES[0]
# Verify initial state of light
state = hass.states.get(ent1.entity_id)
assert STATE_ON == state.state
assert state.attributes.get("xy_color") is None
assert state.attributes.get("brightness") is None
test_time = dt_util.utcnow().replace(hour=17, minute=30, second=0)
sunset_time = test_time.replace(hour=17, minute=0, second=0)
sunrise_time = test_time.replace(hour=5, minute=0, second=0)
def event_date(hass, event, now=None):
if event == SUN_EVENT_SUNRISE:
return sunrise_time
return sunset_time
with patch(
"homeassistant.components.flux.switch.dt_utcnow", return_value=test_time
), patch(
"homeassistant.components.flux.switch.get_astral_event_date",
side_effect=event_date,
):
assert await async_setup_component(
hass,
switch.DOMAIN,
{
switch.DOMAIN: {
"platform": "flux",
"name": "flux",
"lights": [ent1.entity_id],
"start_time": "6:00",
"stop_time": "23:30",
}
},
)
await hass.async_block_till_done()
turn_on_calls = async_mock_service(hass, light.DOMAIN, SERVICE_TURN_ON)
await hass.services.async_call(
switch.DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: "switch.flux"},
blocking=True,
)
async_fire_time_changed(hass, test_time)
await hass.async_block_till_done()
call = turn_on_calls[-1]
assert call.data[light.ATTR_BRIGHTNESS] == 147
assert call.data[light.ATTR_XY_COLOR] == [0.504, 0.385] | 0e6b4cb257d93524fa596150cc197d94eb83e908 | 3,634,319 |
def pad(value, digits, to_right=False):
"""Only use for positive binary numbers given as strings.
Pads to the left by default, or to the right using to_right flag.
Inputs: value -- string of bits
digits -- number of bits in representation
to_right -- Boolean, direction of padding
Output: string of bits of length 'digits'
Raises exception if value is larger than digits in length.
Example:
pad('0010', 6) -> '000010'
pad('0010', 6, True) -> '001000'
"""
len_val = len(value)
assert len_val <= digits
rem_digits = digits - len_val
if to_right:
return value + "0"*rem_digits
else:
return "0"*rem_digits + value | 98476653ccafeba0a9d81b9193de0687dbf9d85c | 3,634,320 |
def abort(state: State) -> Process:
"""End of aborted workflow."""
return Abort(state) | a67e51351a948db0670f7695123cb9dfea28d7e2 | 3,634,321 |
def my_todos():
"""This takes the user to the homepage."""
return render_template('index.html') | 1427c1d71b46c487e74a4cd475823de41628b32d | 3,634,322 |
import tempfile
import os
def setup_directories(
create_report_directory=True,
create_publish_directory=False,
temporary_work_directory=None,
):
"""
Setup a temporary directory, a report directory under it (created if necessary),
and the publish directory (not created by default if necessary).
"""
if not temporary_work_directory:
temporary_work_directory = tempfile.TemporaryDirectory()
report_directory = os.path.join(temporary_work_directory.name, "report")
if create_report_directory:
os.makedirs(report_directory)
publish_directory = os.path.join(temporary_work_directory.name, "publish")
if create_publish_directory:
os.makedirs(publish_directory)
return temporary_work_directory, report_directory, publish_directory | c54db5354523653d87fc8baee0739b401fa5351b | 3,634,323 |
def build_network(cfg):
""" Build the network based on the cfg
Args:
cfg (dict): a dict of configuration
Returns:
network (nn.Module)
"""
network = None
pretrained = cfg['model']['pretrained']
kwargs = {
'num_classes': cfg['model']['num_classes'],
}
if cfg['model']['base'] == 'convnext_tiny':
network = convnext_tiny(pretrained **kwargs)
elif cfg['model']['base'] == 'convnext_small':
network = convnext_tiny(pretrained **kwargs)
elif cfg['model']['base'] == 'convnext_base':
network = convnext_tiny(pretrained **kwargs)
elif cfg['model']['base'] == 'convnext_large':
network = convnext_tiny(pretrained **kwargs)
elif cfg['model']['base'] == 'cd_convnext_tiny':
network = cd_convnext_tiny(**kwargs)
elif cfg['model']['base'] == 'cd_convnext_small':
network = cd_convnext_small(**kwargs)
elif cfg['model']['base'] == 'cd_convnext_base':
network = cd_convnext_base(**kwargs)
elif cfg['model']['base'] == 'cd_convnext_large':
network = cd_convnext_large(**kwargs)
elif cfg['model']['base'] == 'resnet50':
network = resnet_50(img_ch=3, **kwargs)
elif cfg['model']['base'] == 'resnet101':
network = resnet_101(img_ch=3, **kwargs)
elif cfg['model']['base'] == 'resnet152':
network = resnet_152(img_ch=3, **kwargs)
elif cfg['model']['base'] == 'cd_resnet50':
network = cd_resnet_50(img_ch=3, **kwargs)
elif cfg['model']['base'] == 'cd_resnet101':
network = cd_resnet_101(img_ch=3, **kwargs)
elif cfg['model']['base'] == 'cd_resnet152':
network = cd_resnet_152(img_ch=3, **kwargs)
elif cfg['model']['base'] == 'cdcn':
network = cdcn()
else:
raise NotImplementedError
return network | 0fe83885846a5b12b580487ff59eb9c07d35035e | 3,634,324 |
def check_band_below_faint_limits(bands, mags):
"""
Check if a star's magnitude for a certain band is below the the
faint limit for that band.
Parameters
----------
bands : str or list
Band(s) to check (e.g. ['SDSSgMag', 'SDSSiMag'].
mags : float or list
Magnitude(s) of the band(s) corresponding to the band(s) in the
bands variable
Returns
-------
list : a new list of bands that are above the faint limit (ie - use-able bands)
"""
if isinstance(bands, str):
bands = [bands]
if isinstance(mags, float):
mags = [mags]
new_bands = []
for band, mag in zip(bands, mags):
if 'SDSSgMag' in band and mag >= 24:
continue
elif 'SDSSrMag' in band and mag >= 24:
continue
elif 'SDSSiMag' in band and mag >= 23:
continue
elif 'SDSSzMag' in band and mag >= 22:
continue
else:
new_bands.append(band)
return new_bands | 9e26fcef5bf79b4480e93a5fe9acd7416337cf09 | 3,634,325 |
def findNmin_ballot_comparison_rates(alpha, gamma, r1, s1, r2, s2,
reported_margin, N, null_lambda=1):
"""
Compute the smallest sample size for which a ballot comparison
audit, using Kaplan-Markov, with the given statistics could stop
Parameters
----------
alpha : float
risk limit
gamma : float
value > 1 to inflate the error bound, to avoid requiring full hand count for a single 2-vote overstatement
r1 : int
hypothesized rate of ballots that overstate any
margin by one vote but no margin by two votes
s1 : int
hypothesizedrate of ballots that understate any margin by
exactly one vote, and every margin by at least one vote
r2 : int
hypothesizedrate of ballots that overstate any margin by two votes
s2 : int
hypothesizedrate of ballots that understate every margin by two votes
reported_margin : float
the smallest reported margin *in votes* between a winning
and losing candidate in the contest as a whole, including any other strata
N : int
number of votes cast in the stratum
null_lambda : float
fraction of the overall margin (in votes) to test for in the stratum. If the overall margin is reported_margin,
test that the overstatement in this stratum does not exceed null_lambda*reported_margin
Returns
-------
n
"""
U_s = 2*N/reported_margin
denom = (np.log(1 - null_lambda/(U_s*gamma)) -
r1*np.log(1 - 1/(2*gamma))- \
r2*np.log(1 - 1/gamma) - \
s1*np.log(1 + 1/(2*gamma)) - \
s2*np.log(1 + 1/gamma) )
return np.ceil(np.log(alpha)/denom) if denom < 0 else np.nan | a3d1f33cdcbbfb10bd0a4f2fff343b752056a37b | 3,634,326 |
from typing import List
import json
def patch_item(news_id, patches):
"""Apply the patches to the given news ID. If the categories change, they will be updated
in the related NewsCategoriesMapping. Returns the modified JSON presentation."""
news = News.query.filter_by(NewsID=news_id).first()
result = {}
# Make sure it is a list
if not isinstance(patches, List):
# Probably a JSON string
patches = json.loads(patches)
try:
for patch in patches:
# The RFC spec tells to return an object that has all the modifications
result = apply_patch(news, patch, result)
except ValueError as ve:
# Invalid "op" in the patch. Cancel.
print("ERROR! Invalid OP in patch:\n{}".format(ve))
db.session.rollback()
return {"success": False, "message": "Invalid operation in patch"}
except AssertionError:
# Test "op" did not pass the comparison. Cancel.
print("ERROR! TEST did not match comparison in patch")
db.session.rollback()
return {"success": False, "message": "Comparison test failed in the patch"}
except TypeError:
# When passing non-array to categories
print("Only lists are allowed in categories")
db.session.rollback()
return {"success": False, "message": "Only lists are allowed in categories"}
except Exception as e:
print("ERROR! General error - will rollback")
print(e)
# Any unknown error should also cancel the entire patch
db.session.rollback()
return {"success": False, "message": str(e)}
news.Updated = get_datetime()
db.session.commit()
return result | 6292641585b8d8e11d3440ee517e93bbe5b239f9 | 3,634,327 |
def get_sample_prediction(session, regression):
"""Generate and return a sample prediction formatted specifically for table creation.
Args:
session: A SQLalchemy session object
regression: A regression object from four_factor_regression.py
Returns:
A DataOperator object initialized with a prediction from regression
"""
one_row_dataframe = regression.predictors.loc[[0]]
sample_prediction = predict_game(session, regression, one_row_dataframe)
data = DataOperator(sample_prediction)
return data | f2e32aa2b3e892158a47479f21b11893ab553821 | 3,634,328 |
def log_variables(y_dataset, variables_to_log):
"""
take the log of given variables
:param variables_to_log: [list of str] variables to take the log of
:param y_dataset: [xr dataset] the y data
:return: [xr dataset] the data logged
"""
for v in variables_to_log:
y_dataset[v].load()
y_dataset[v].loc[:, :] = y_dataset[v] + 1e-6
y_dataset[v].loc[:, :] = xr.ufuncs.log(y_dataset[v])
return y_dataset | 442455781ea52734f12336d7ceca4016987cee9e | 3,634,329 |
def find_place_num(n, m):
"""
"""
if n==1 or m==1:
return 1
else:
return find_place_num(n-1, m) + find_place_num(n, m-1) | 632e06db2eb2e2eebdb1c5b34bea36124843a960 | 3,634,330 |
def AddWorkerpoolUpdateArgs(parser, release_track):
"""Set up all the argparse flags for updating a workerpool.
Args:
parser: An argparse.ArgumentParser-like object.
release_track: A base.ReleaseTrack-like object.
Returns:
The parser argument with workerpool flags added in.
"""
return AddWorkerpoolArgs(parser, release_track, update=True) | 51028000d8059e660d6a97a0d8ac3e82f4bca9fb | 3,634,331 |
def check_vibes(x0, y0, z0, x1, y1, z1, deadzone=750):
"""
Return boolean if the accelerometer senses vibration.
This module has a range of 1500 while resting.
The default dead zone is 750 because 1500 is quite rare.
Values for this can be measured with min-max.py before coded here.
"""
total = diff(x0, y0, z0, x1, y1, z1)
if total > deadzone:
return True
return False | a1bae213f6fa1166cb2f69b52cb22700b0b83811 | 3,634,332 |
def voiced_seg(sig,fs,f0,stepTime):
"""
Voiced segments
sig: Speech signal
fs: Sampling frequency
f0: Pitch contour
stepTime: Step size (in seconds) used to computed the f0 contour.
"""
yp = f0.copy()
yp[yp!=0] = 1
#In case the starting point is F0 and not 0
if yp[0] == 1:
np.insert(yp, 0, 1)
if yp[-1:] == 1:
np.insert(yp, 0, len(yp)-1)
#---------------------
ydf = np.diff(yp)
lim_end = np.where(ydf==-1)[0]+1
lim_ini = np.where(ydf==1)[0]+1
#Voiced segments
v_segm = []
tm = []
vcont = np.zeros(len(sig))
for idx in range(len(lim_ini)):
#------------------------------------
#Voiced segments
tini = int(lim_ini[idx]*stepTime*fs)
tend = int(lim_end[idx]*stepTime*fs)
if int(tend-tini)>int(0.04*fs):
# print(tini,tend)
vcont[tini:tend] = 1
v_segm.append(sig[tini:tend])
tm.append(np.hstack([lim_ini[idx]*stepTime,lim_end[idx]*stepTime]))
vseg = {'Voiced_segments':v_segm,
'Voiced_times':tm,
'Voiced_labels':vcont}
return vseg | 354383c23e1019a9d68de41bb2a62c575001e0ee | 3,634,333 |
def scale_min_max(x, min_in, max_in, min_out, max_out):
"""Scales linearly"""
return np.clip((((max_out - min_out) * (x - min_in)) / (max_in - min_in)) + min_out, min_out, max_out) | 7e77d541e5ae329393adb1a0461a8c2081fb48e1 | 3,634,334 |
def ast_to_z3(inspected_function: dict):
"""
Get the inspected object from the ast visit
and call each mapper from extracted object to its generated Z3 conditions
then, concatenate their results in a single string.
:param inspected_function generated from the AST Visit
:return:
"""
local_variables = [x['declarations'][0] for x in inspected_function['local_variables']]
formal_parameters = inspected_function['formal_parameters']
symbolic_state_variables = _add_symbolic_state_variables(inspected_function['accessed_state_variables'])
symbolic_variable_decl_str = _add_symbolic_variables_declaration(local_variables, formal_parameters)
symbolic_variable_constraints_str = _add_symbolic_constraints(local_variables, formal_parameters)
symbolic_expressions = _add_symbolic_expressions(
inspected_function['local_variables'], inspected_function['map_id_variable_name'])
symbolic_require_expression = _add_symbolic_require_expressions(inspected_function['require_expression_map'])
overflow_constraints = _add_overflow_constraints(inspected_function['candidate_for_overflow'])
require_conditions = _requires_constraints(inspected_function['require_nodes'])
require_conditions_overflow = _add_require_condition_with_overflow(
require_conditions, inspected_function['candidate_for_overflow'])
generate_str = symbolic_state_variables + symbolic_variable_decl_str
generate_str = generate_str + symbolic_variable_constraints_str
generate_str = generate_str + symbolic_expressions + symbolic_require_expression
generate_str = generate_str + overflow_constraints + require_conditions_overflow
return generate_str | ba94a2265673c98e5cbd5691c88c9fdadf0aa2f4 | 3,634,335 |
def freight_june_2014():
"""Find the number of freight of the month"""
for i in fetch_data_2013():
if i[1] == "Freight" and i[4] == "June":
num_0 = i[6]
return int(num_0) | 571d8e819d6abdf8786c114284de8b5542552be9 | 3,634,336 |
def add_license_creation_fields(license_mapping):
"""
Return an updated ``license_mapping`` of license data adding license status
fields needed for license creation.
"""
license_mapping.update(
is_active=False,
reviewed=False,
license_status="NotReviewed",
)
return license_mapping | 3856c434a672150c09af4b5e4c7fd9fa55014d5c | 3,634,337 |
def filegroup(space, fname):
""" filegroup - Gets file group """
return _filegroup(space, fname) | a5b361d048c37e176e78ac2d6ffbebb3604a79cd | 3,634,338 |
def convert_examples_to_features(examples, tokenizer, label_list=None, max_seq_length=128):
"""Loads a data file into a list of `InputBatch`s."""
features = []
for (ex_index, example) in enumerate(examples):
tokens_a = tokenizer.tokenize(example)
tokens_b = None
if None:
tokens_b = tokenizer.tokenize(example.text_b)
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambigiously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
segment_ids = [0] * len(tokens)
if tokens_b:
tokens += tokens_b + ["[SEP]"]
segment_ids += [1] * (len(tokens_b) + 1)
# mask to keep track of the beginning of each sub-word piece
sub_word_masks = [0 if t.startswith('##') else 1 for t in tokens]
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
zero_padding = [0] * (max_seq_length - len(input_ids))
one_padding = [1] * (max_seq_length - len(input_ids))
input_ids += zero_padding
input_mask += zero_padding
segment_ids += zero_padding
sub_word_masks += one_padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(sub_word_masks) == max_seq_length
# TODO
if ex_index < 0:
logger.info("*** Example ***")
logger.info("ex_index: %s" % (ex_index))
logger.info("tokens: %s" % " ".join([str(x) for x in tokens]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
features.append(
InputFeatures(
guid=ex_index,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
sub_word_masks=sub_word_masks,
))
return features | f16969b6409e945be03ac3d99092bc0d46919c18 | 3,634,339 |
import numpy as np
def make_reference_bands_inline(wannier_bands, vasp_bands, efermi=None):
"""
Compare bandstructure results from wannier and vasp.
Takes two input array.bands nodes, stores them if they're not already
stored. Takes the relevant bands from the vasp bandstructure and stores and outputs
them in a node with linkname 'bandcmp'.
Also returns a parameter data node with linkname 'bandinfo' containing
fermi energy, bandgap etc of the reference bandstructure.
"""
assert isinstance(wannier_bands, BANDS_CLS)
assert isinstance(vasp_bands, BANDS_CLS)
assert hasattr(wannier_bands, 'labels')
assert hasattr(vasp_bands, 'labels')
if vasp_bands.labels:
assert vasp_bands.labels == wannier_bands.labels
kpcomp = vasp_bands.get_kpoints() == wannier_bands.get_kpoints()
assert kpcomp.all(), 'kpoints may not differ'
owindow = get_outer_window(wannier_bands)
wbands = wannier_bands.get_bands()
vbands, vocc = vasp_bands.get_bands(also_occupations=True)
# throw away spin dimension if appropriate
if vbands.ndim == 3:
vbands = vbands[0]
vocc = vocc[0]
# grab the vbands within the outer_window
# find wich bands within the window match
# by searching for the best fit using the sum of square errors
vbands_window = np.empty(wbands.shape)
vocc_window = np.empty(wbands.shape)
w_nbands = wbands.shape[1]
ref_nbands = vbands.shape[1]
for band_idx in range(w_nbands):
errs = [band_error(wbands[:, band_idx], vbands[:, i]) for i in range(ref_nbands)]
minerr = np.argmin(errs)
vbands_window[:, band_idx] = vbands[:, minerr]
vocc_window[:, band_idx] = vocc[:, minerr]
# For the future:
# * find each band's index (s, px, py, ...)
# * store the legend with the comparison node
# find fermi energy from vasp_bands parent or work without it
if not efermi:
try:
efermi = vasp_bands.inp.bands.out.results.get_dict()['efermi']
except Exception: # pylint: disable=broad-except
pass
ref_gap_info = band_gap(vbands_window, vocc_window, efermi)
ref_info = DataFactory('parameter')()
ref_info.update_dict({'bandgap': ref_gap_info})
ref_info.update_dict({'efermi': efermi})
ref_info.update_dict({'outer_window': owindow})
ref_bands = DataFactory('array.bands')()
ref_bands.set_kpointsdata(wannier_bands)
ref_bands.set_bands(vbands_window, occupations=vocc_window)
return {'bands': ref_bands, 'info': ref_info} | 5369bbd8accf640a79b7ccfaa1971441b93a148d | 3,634,340 |
from matplotlib import colors
def streamline(ctx, a):
""" Represents Streamline.vi. """
plotid = a.plotid()
x = a.dbl_1d('x') # X loc of arrows
y = a.dbl_1d('y') # Y loc of arrows
u = a.dbl_2d('u') # Arrow X component
v = a.dbl_2d('v') # Arrow Y component
c = a.dbl_2d('c') # Vals for colormap
density = a.float('density')
arrowsize = a.float('arrowsize')
line = a.line()
cmap = a.colormap()
ctx.set(plotid)
ctx.fail_if_polar()
ctx.fail_if_log_symlog()
# All dimensions must be greater than 5 or pyplot.streamplot breaks
if any([(dim==2 or dim==4) for dim in (u.shape+v.shape)]):
return
# We need some data to plot; if not present, simply return (no error)
if np.product(u.shape + v.shape) == 0:
return
# Apply default values for optional args
if len(x) == 0:
x = np.arange(u.shape[1])
if len(y) == 0:
y = np.arange(u.shape[0])
# Check for MPL-required regularity
def check_coords(arr):
if np.any(~np.isfinite(arr)):
return False
tol = 1e-5
arr = arr[1:] - arr[0:-1] # get deltas (1st der)
arr = arr[1:] - arr[0:-1] # deltas of deltas (2nd der)
return not np.any(np.abs(arr) > tol)
if not check_coords(x):
raise errors.InputNotRegular("X Coordinate array elements must be monotonic, uniformly spaced, and contain only finite values")
if not check_coords(y):
raise errors.InputNotRegular("Y Coordinate array elements must be monotonic, uniformly spaced, and contain only finite values")
# Clip all to same size and remove NaNs
if len(c) != 0:
x, y, u, v, c = filters.filter_2d(x, y, u, v, c)
else:
x, y, u, v = filters.filter_2d(x, y, u, v)
# Data must be at least 2x2
if u.shape[0] < 2 or u.shape[1] < 2:
return
# Ignore invalid values
if density is not None and density < 0:
density = None
if density is not None and density == 0:
return # MPL can't handle this
if arrowsize is not None and arrowsize < 0:
arrowsize = None
# Bug in MPL
if arrowsize is not None and arrowsize == 0:
arrowsize = 0.00001
# Color priority: (1) line.color, (2) 2D colormap array, (3) black
if line.color is not None:
color = line.color
elif len(c) != 0:
color = c
else:
color = 'k'
# There's a bug in MPL which breaks colormapping with the default options
# to Normalize. Only happens with streamplot. So we manually initialize
# the Normalize range.
if isinstance(color, np.ndarray):
vmin = cmap.vmin if cmap.vmin is not None else color.min()
vmax = cmap.vmax if cmap.vmax is not None else color.max()
norm = colors.Normalize(vmin=vmin, vmax=vmax)
else:
norm = None
# Better-looking defaults
if density is None:
density = 1.5
k = { 'density': density,
'arrowsize': arrowsize,
'color': color,
'linewidth': line.width,
'cmap': cmap.map,
'norm': norm }
remove_none(k)
out = plt.streamplot(x, y, u, v, **k)
# For Colorbar.vi
if isinstance(color, np.ndarray):
# MPL's streamplot return object is bizarre and can't be used directly
# as an argument to colorbar(). So use the .line attribute.
ctx.mappable = out.lines | d6547d2a150557d715d421f606ed5c86b35bce37 | 3,634,341 |
from typing import Optional
import contextlib
def check_reorgs_task(self) -> Optional[int]:
"""
:return: Number of oldest block with reorg detected. `None` if not reorg found
"""
with contextlib.suppress(LockError):
with only_one_running_task(self):
logger.info("Start checking of reorgs")
reorg_service: ReorgService = ReorgServiceProvider()
first_reorg_block_number = reorg_service.check_reorgs()
if first_reorg_block_number:
logger.warning(
"Reorg found for block-number=%d", first_reorg_block_number
)
# Stopping running tasks is not possible with gevent
reorg_service.recover_from_reorg(first_reorg_block_number)
return first_reorg_block_number | 58a4d97b9176ae304ad86038b578d5864821191f | 3,634,342 |
import sys
def InputChecking(str_inputFileName_genotype, str_inputFileName_phenotype):
"""
To check the numbers of sample are consistent in genotype and phenotype data.
Args:
str_inputFileName_genotype (str): File name of input genotype data
str_inputFileName_phenotype (str): File name of input phenotype data
Returns:
(tuple): tuple containing:
- int_num_genotype (int): The sample number of genotype data
- int_num_phenotype (int): The sample number of phenotype data
"""
### check file name exist
if str_inputFileName_genotype is None:
sys.exit("There is no input genotype file.")
if str_inputFileName_phenotype is None:
sys.exit("There is no input phenotype file.")
### count lines of input files
int_num_genotype = sum(1 for line in open(str_inputFileName_genotype))
int_num_phenotype = sum(1 for line in open(str_inputFileName_phenotype))
### count sample num. in genotype file
with open(str_inputFileName_genotype, 'r') as file_inputFile:
list_line = file_inputFile.readline().strip().split(" ")
int_num_genotype_sample = (len(list_line) - 5) / 3
if int_num_genotype_sample != int_num_phenotype:
sys.exit("The number of samples in genotype file does not match the number of samples in phenotype file.")
return int_num_genotype, int_num_phenotype | c1cf089a2018d2ab99f35e374f55626458698764 | 3,634,343 |
def is_all_gathered():
""" Determines if all languages have had their download goals
accomplished
"""
for lang in gathered:
if len(gathered[lang]) < NUM_TO_GATHER:
return False
return True | ad35ce939f2ca847310c6de27541fa48e64142ef | 3,634,344 |
def greedy_eval_Q(Q: QTable, this_environment, nevaluations: int = 1):
"""
Evaluate Q function greediely with epsilon=0
:returns average cumulative reward, the expected reward after resetting the environment, episode length
"""
cumuls = []
for _ in range(nevaluations):
evaluation_state = this_environment.reset()
episode_length, cummulative_reward = 0, 0
expected_reward = np.max(Q[evaluation_state])
greedy = make_epsilon_greedy_policy(Q, 0, this_environment.action_space.n)
while True: # roll out episode
evaluation_action = np.random.choice(list(range(this_environment.action_space.n)),
p=greedy(evaluation_state))
s_, evaluation_reward, evaluation_done, _ = this_environment.step(evaluation_action)
cummulative_reward += evaluation_reward
episode_length += 1
if evaluation_done:
break
evaluation_state = s_
cumuls.append(cummulative_reward)
return np.mean(cumuls), expected_reward, episode_length | 296cf19b0090d488ef6ca717585114da7d8fc143 | 3,634,345 |
def lowercase(obj):
""" Make dictionary lowercase """
if isinstance(obj, dict):
return {k.lower(): lowercase(v) for k, v in obj.items()}
elif isinstance(obj, (list, set, tuple)):
t = type(obj)
return t(lowercase(o) for o in obj)
elif isinstance(obj, str):
return obj.lower()
else:
return obj | 08b0addd87ef7ba5c016ebee50790e8d5e31042b | 3,634,346 |
def link_translate(course, html):
""" return html string with ~/ and ~~/ links translated
into the appropriate course and site urls """
# for site course, url ends with / ; for others, it doesn't.
if course.url[-1] == '/':
course_url_with_slash = course.url
else:
course_url_with_slash = course.url + '/'
#
# I want this translation to only happen within markdown links
# so that "$ cd ~/foo" doesn't get garbled.
# So far my regex to do that is apparently buggy ..
# currently I'm settling for only translating these patterns
# at the beginning of a markdown link, i.e. [](~/foo)
# at looking for "(~/" or "(~~/".
#
# For this to see the markdown syntax, it must be run
# before markdown2html.
#
# 1 :
#html = html.replace(r'~~/', r'/' + url_base + r'/')
html = html.replace('(~~/', '(/' + url_base + r'/')
# ... trying to do that only do that within markdown links.
#html = re.sub(r'(\[[^\]]*\]\([^)]*)\~\~\/([^)]*\))',
# r'\1' + '/' + url_base + '/' + r'\2', html)
#
# 2 :
#html = html.replace(r'~/', course_url_with_slash)
html = html.replace('(~/', '(' + course_url_with_slash)
# again, trying to do that only within markdown links.
#html = re.sub(r'(\[[^\]]*\]\([^)]*)\~\/([^)]*\))',
# r'\1' + course_url_with_slash + r'\2', html)
return html | f6dbb87a324407ef4046c394ba06d5163982139b | 3,634,347 |
def _workaround_for_datetime(obj):
"""Workaround for numpy#4983: buffer protocol doesn't support
datetime64 or timedelta64.
"""
if _is_datetime_dtype(obj):
obj = obj.view(np.int64)
return obj | 8091d264a175e9ecf8ba6bb9621bb10f5940919d | 3,634,348 |
import argparse
import os
def getOptions():
"""Function to pull arguments"""
parser = argparse.ArgumentParser(description="Removes samples from the design file" \
"belonging to the user-specified group(s).")
# Standar Input
standar = parser.add_argument_group(title="Standard input",
description= "Standard input for SECIM tools.")
standar.add_argument("-i","--input",dest="input", action='store',
required=True, help="Input dataset in wide format.")
standar.add_argument("-d","--design",dest="design", action='store',
required=True, help="Design file.")
standar.add_argument("-id","--uniqID",dest="uniqID",action="store",
required=True, help="Name of the column with unique" \
"dentifiers.")
standar.add_argument("-g","--group", dest="group", action='store',
required=False, help="Name of column in design file" \
"with Group/treatment information.")
# Tool Especific
tool = parser.add_argument_group(title="Tool specific input",
description= "Input that is especific for this tool.")
tool.add_argument("-dp","--drops", dest="drops", action='store',
required=True, help="Name of the groups in your"\
"group/treatment column that you want to remove from the design file.")
# Output Paths
output = parser.add_argument_group(title='Output paths',
description="Paths for the output files")
output.add_argument("-o","--out",dest="out",action="store",
required=True,help="Output path for the new design file")
args = parser.parse_args()
# Standardize paths
args.out = os.path.abspath(args.out)
args.input = os.path.abspath(args.input)
args.design = os.path.abspath(args.design)
# Split groups/samples to drop
args.drops = args.drops.split(",")
return (args) | e86fda9acc65f90f968a3be2b2238370910b866b | 3,634,349 |
def sql_dynamic_row_count_redshift(schemas: list) -> str:
"""Generates an SQL statement that counts the number of rows in
every table in a specific schema(s) in a Redshift database"""
sql_schemas = ', '.join(f"'{schema}'" for schema in schemas)
return f"""
WITH table_list AS (
SELECT schemaname, tablename
FROM pg_tables
,(SELECT top 1 1 FROM ppw_e2e_helper.dual)
WHERE schemaname IN ({sql_schemas}))
SELECT LISTAGG(
'SELECT ''' || LOWER(tablename) || ''' tbl, COUNT(*) row_count FROM ' || schemaname || '."' || tablename || '"',
' UNION ') WITHIN GROUP ( ORDER BY tablename )
|| 'ORDER BY tbl'
FROM table_list
""" | a98fdc11a82144cf7cce152ed7cc6c2e071cd596 | 3,634,350 |
from datetime import datetime
def eow(date: datetime.date, offset: int = 0, weekday: str = "SUN") -> datetime.date:
"""
Returns the end of the week, i.e. the first date on or after the given date
whose weekday is equal to the the :code:`weekday` argument, and
offset by a given number of weeks.
Weekday must be one of :code:`'MON'`, :code:`'TUE'`, :code:`'WED'`, :code:`'THU'`,
:code:`'FRI'`, :code:`'SAT'` or :code:`'SUN'`.
Parameters
----------
date : datetime.date
the date from which to determine the end of the week
offset : int, optional
the number of weeks from which to offset the most recent end
of the week (default is 0)
weekday : str, optional
the weekday which defines the end of the week (default is :code:`"SUN"`)
Returns
-------
:code:`datetime.date`
The end of the week
Examples
--------
>>> today = datetime.date(2020, 1, 15) #Wednesday
>>> eow(today)
datetime.date(2020, 1, 20) #Sunday (by default)
>>> eow(today, 1)
datetime.date(2020, 1, 27) #following Sunday
>>> eow(today, weekday="THU")
datetime.date(2020, 1, 16) #first Thursday on or after today
>>> eow(today, weekday="THU", offset=-1)
datetime.date(2020, 1, 8) #most recent Thursday strictly before today
"""
if isinstance(weekday, str):
weekday = constants.WEEKDAYS[weekday]
return date + datetime.timedelta((weekday - date.weekday()) % 7 + offset * 7) | 3020faaf9edce3912d3627878136d52151699e9b | 3,634,351 |
def dequote(s):
"""
from: http://stackoverflow.com/questions/3085382/python-how-can-i-strip-first-and-last-double-quotes
If a string has single or double quotes around it, remove them.
Make sure the pair of quotes match.
If a matching pair of quotes is not found, return the string unchanged.
"""
if (s[0] == s[-1]) and s.startswith(("'", '"')):
return s[1:-1]
return s | 41c5e5fed901d70472dd6eef1ada7d53d395002c | 3,634,352 |
def has_issue_tracker(function):
"""
Decorator that checks if the current pagure project has the
issue tracker active
If not active returns a 404 page
"""
@wraps(function)
def check_issue_tracker(*args, **kwargs):
repo = flask.g.repo
if not flask.g.issues_enabled or not repo.settings.get(
"issue_tracker", True
):
flask.abort(
404, description="No issue tracker found for this project"
)
# forbid all POST requests if the issue tracker is made read-only
if flask.request.method == "POST" and repo.settings.get(
"issue_tracker_read_only", False
):
flask.abort(
401,
description="The issue tracker for this project is read-only",
)
return function(*args, **kwargs)
return check_issue_tracker | f2efa5755ffd013175456ca7fa6cea8ea386569e | 3,634,353 |
from typing import List
def find_deprecated_usages(
schema: GraphQLSchema, ast: DocumentNode
) -> List[GraphQLError]:
"""Get a list of GraphQLError instances describing each deprecated use."""
type_info = TypeInfo(schema)
visitor = FindDeprecatedUsages(type_info)
visit(ast, TypeInfoVisitor(type_info, visitor))
return visitor.errors | 6dd7673d76885de4d66f7e479382108be7ed88de | 3,634,354 |
import torch
def construct_embedding_mask(V):
"""
Construct a mask for a batch of embeddings given node sizes.
Parameters
----------
V: (batch_size) actual number of nodes per set (tensor)
Returns
-------
mask: (batch_size) x (n_nodes) binary mask (tensor)
"""
batch_size = len(V)
max_n_nodes = torch.max(V).int()
mask = torch.zeros(batch_size, max_n_nodes, device = str(V.device))
for i, cnt in enumerate(V):
mask[i, :cnt.int()] = 1
return mask | e149691bfac855911cd3074009eb6b97b2d723a0 | 3,634,355 |
def url_form(url):
"""Takes the SLWA photo url and returns the photo url. Note this
function is heavily influenced by the format of the catalogue and could be
easily broken if the Library switches to a different url structure.
"""
if url[-4:] != '.png' and url[-4:] != '.jpg':
url = url + '.jpg'
return url | 7469850ffb6877ca116a28251d204024e15bc407 | 3,634,356 |
from datetime import datetime
def timedelta_from_now(delta):
"""
Add a timedelta to now, plus a fudge factor of a few seconds.
Most useful for chaining with Django's builtin filter "timeuntil",
for producing a humanized timedelta.
"""
return datetime.datetime.utcnow() + delta + datetime.timedelta(seconds=5) | 769739e3ca5304dfe0cd9a82c3c39be338cd5d03 | 3,634,357 |
import os
def myglob(paths,include=('/**/*.ipynb',),exclude=None,exclude_default=\
('/**/*_tested.ipynb','**/testnotebooks.ipynb',\
'**/*Template.ipynb')):
"""Find files that match some patterns, but exclude other patterns.
For paths given by `paths`, find all file path names that match any pattern
in `include`, but exclude all file path names that match any pattern in
`exclude` or `exclude_default`.
Parameters:
-----------
paths : str or tuple
Either one (if string) or multiple (if tuple of strings) paths to scan
for the desired file path names. `paths`, or the members of `paths`,
can be either file name paths, or directories to search. Each directory
will be searched recursively.
include: str or tuple
File path name pattern (if string) or tuple of patterns to match found
file path names against. By default, all ''*ipynb' files will be found.
exclude: str or tuple
File path name pattern (if string) or tuple of patterns to be excluded
from the final set of matched file path names. Default is None.
exclude_default: str or tuple
File path name pattern (if string) or tuple of patterns, to be excluded
by default from the final set of matched file path names. This is in
addition to any exclusion patterns given in `exclude`. By default,
excluded patterns comprise: notebook files ending in '*_tested.ipynb',
files named 'testnotebooks.ipynb', and notebook files ending in
'*Template.ipynb'.
Examples:
---------
Find all ''*.ipynb' files in directories ''../a/b/dir1/'' and ./XYZ/dir2/'
but exclude those ending in '_foobar.ipynb':
```python
paths = ('../a/b/dir1/', './XYZ/dir2/')
exclude = '**/*_foobar.ipynb'
files = myglob(paths=paths,exclude=exclude)
```
"""
if isinstance(paths,str):
paths = (paths,)
files = set([_ for _ in paths if os.path.isfile(_)])
dirs = [_ for _ in paths if os.path.isdir(_)]
# set of all files to be excluded
files_exclude = set()
if len(dirs) > 0:
# set of all matched files
files = files.union( set(glob_with_pattern(paths,patterns=include)) )
if exclude is not None:
aux = set(glob_with_pattern(dirs,patterns=exclude))
files_exclude = files_exclude.union(aux)
if exclude_default is not None:
aux = set(glob_with_pattern(dirs,patterns=exclude_default))
files_exclude = files_exclude.union(aux)
# all-excluded
return sorted(files - files_exclude) | deb9a769a6614f7c07d72cca2f4ad12d01db7aa0 | 3,634,358 |
import os
def _filepaths(directory, full_paths=True):
"""Get the filenames in the directory.
Args:
directory: Directory with the files
full_paths: Give full paths if True
Returns:
result: List of filenames
"""
# Initialize key variables
if bool(full_paths) is True:
result = [
os.path.join(directory, filename) for filename in os.listdir(
directory) if os.path.isfile(
os.path.join(directory, filename))]
else:
result = [filename for filename in os.listdir(
directory) if os.path.isfile(os.path.join(directory, filename))]
return result | 1a40cb2f3f940a911690f862fd8711d84d90fc94 | 3,634,359 |
def multivariate_gaussian(pos, mu, sigma):
"""
Calculate the multivariate Gaussian distribution on array pos.
Source: https://scipython.com/blog/visualizing-the-bivariate-gaussian-distribution/
:param pos: numpy array, constructed by packing the meshed arrays of variables
x1, x2, .. xk into its last dimension
:param mu: numpy array, means of gaussians
:param sigma: multidimensional numpy array, covariance matrix of gaussians
:return: multivariate gaussian distribution on the array 'pos;
"""
n = mu.shape[0]
sigma_det = np.linalg.det(sigma)
sigma_inv = np.linalg.inv(sigma)
n = np.sqrt((2 * np.pi)**n * sigma_det)
# This einsum call calculates (x-mu)T.Sigma-1.(x-mu) in a vectorized way across all the input variables.
fac = np.einsum('...k,kl,...l->...', pos - mu, sigma_inv, pos - mu)
return np.exp(-fac / 2.0) / n | b8a2e2851f27737332ffcba8deab6c02a455979d | 3,634,360 |
import subprocess
def du(path):
"""disk usage in human readable format (e.g. '2,1GB')"""
return subprocess.check_output(['du', '-sh', path]).split()[0].decode('utf-8') | 90a17e31a3b3c760baf27e5b6c5551321df7989d | 3,634,361 |
def Rpivot(p, q, Mb):
"""
Given an augmented matrix Mb, Mb = M|b, this gives the output of the
pivot entry [i, j] in or below row p, and in or to the right of column q.
"""
# n is the number of columns of M, which is one less than that of Mb.
m = len(Mb)
n = len(Mb[0]) - 1
# Initialize i, j to p, q, and we will not go above or leftwards of p, q.
i = p
j = q
# Iterate through the columns of Mb to find its first nonzero column.
for y in range(q, n):
if [Mb[x][y] for x in range(p, m)] == [0] * (m - p):
j = j + 1
else:
break
# Iterate through the rows of M from p to n-1.
for x in range(p, n):
# Adds one to row index i if column i is all zeros from column j
# to column n.
if Mb[x][j:n] == [0] * (n - j + 1):
i = i + 1
else:
break
return [i, j] | 155be98d8560bf42cea928e8b1da6e14e3e7762d | 3,634,362 |
def create3d_vector(name=None, source='default'):
"""%s
:param name: The name of the created object
:type name: `str`_
:param source: The object to inherit from.
Can be a 3d_vector, or a string name of a 3d_vector.
:type source: `str`_ or :class:`vcs.dv3d.Gf3Dvector`
:returns: A 3d_vector graphics method object
:rtype: vcs.dv3d.Gf3Dvector
"""
name, source = check_name_source(name, source, '3d_vector')
return dv3d.Gf3Dvector(name, source) | bfbc821de05c6817d7980d6b0fdb27341aa12911 | 3,634,363 |
def _GenerateJSONForTestResults(options, log_processor):
"""Generates or updates a JSON file from the gtest results XML and upload the
file to the archive server.
The archived JSON file will be placed at:
www-dir/DEST_DIR/buildname/testname/results.json
on the archive server. NOTE: This will be deprecated.
Args:
options: command-line options that are supposed to have build_dir,
results_directory, builder_name, build_name and test_output_xml values.
log_processor: An instance of PerformanceLogProcessor or similar class.
Returns:
True upon success, False upon failure.
"""
results_map = None
try:
if (os.path.exists(options.test_output_xml) and
not _UsingGtestJson(options)):
results_map = gtest_slave_utils.GetResultsMapFromXML(
options.test_output_xml)
else:
if _UsingGtestJson(options):
sys.stderr.write('using JSON summary output instead of gtest XML\n')
else:
sys.stderr.write(
('"%s" \\ "%s" doesn\'t exist: Unable to generate JSON from XML, '
'using log output.\n') % (os.getcwd(), options.test_output_xml))
# The file did not get generated. See if we can generate a results map
# from the log output.
results_map = gtest_slave_utils.GetResultsMap(log_processor)
except Exception as e:
# This error will be caught by the following 'not results_map' statement.
print 'Error: ', e
if not results_map:
print 'No data was available to update the JSON results'
# Consider this non-fatal.
return True
build_dir = os.path.abspath(options.build_dir)
slave_name = options.builder_name or slave_utils.SlaveBuildName(build_dir)
generate_json_options = copy.copy(options)
generate_json_options.build_name = slave_name
generate_json_options.input_results_xml = options.test_output_xml
generate_json_options.builder_base_url = '%s/%s/%s/%s' % (
config.Master.archive_url, DEST_DIR, slave_name, options.test_type)
generate_json_options.master_name = options.master_class_name or _GetMaster()
generate_json_options.test_results_server = config.Master.test_results_server
print _GetMasterString(generate_json_options.master_name)
generator = None
try:
if options.revision:
generate_json_options.chrome_revision = options.revision
else:
chrome_dir = chromium_utils.FindUpwardParent(build_dir, 'third_party')
generate_json_options.chrome_revision = _GetRevision(chrome_dir)
if options.webkit_revision:
generate_json_options.webkit_revision = options.webkit_revision
else:
webkit_dir = chromium_utils.FindUpward(
build_dir, 'third_party', 'WebKit', 'Source')
generate_json_options.webkit_revision = _GetRevision(webkit_dir)
# Generate results JSON file and upload it to the appspot server.
generator = gtest_slave_utils.GenerateJSONResults(
results_map, generate_json_options)
except Exception as e:
print 'Unexpected error while generating JSON: %s' % e
sys.excepthook(*sys.exc_info())
return False
# The code can throw all sorts of exceptions, including
# slave.gtest.networktransaction.NetworkTimeout so just trap everything.
# Earlier versions of this code ignored network errors, so until a
# retry mechanism is added, continue to do so rather than reporting
# an error.
try:
# Upload results JSON file to the appspot server.
gtest_slave_utils.UploadJSONResults(generator)
except Exception as e:
# Consider this non-fatal for the moment.
print 'Unexpected error while uploading JSON: %s' % e
sys.excepthook(*sys.exc_info())
return True | c3883a92fd11686862d6dd576205e894872b85d1 | 3,634,364 |
def unrank(n, rk):
"""Return the permutation of rank rk in Sn."""
P = [0] * n
# Store (j+1)! for calculation.
fac = 1
for j in xrange(n-1):
fac *= (j+1)
d = (rk % (fac * (j+2))) / fac
rk -= d * fac
P[n-j-2] = d
for i in xrange(n-j-1,n):
if P[i] > d-1:
P[i] += 1
return P | e9da99b2341fc03ab67346f463b60d084b8802df | 3,634,365 |
def _compute_influence_kernel(iter, dqd, iter_scale,
radius,learning_rate):
"""Compute the neighborhood kernel for some iteration.
Parameters
----------
iter : int
The iteration for which to compute the kernel.
dqd : array (nrows x ncolumns)
This is one quadrant of Euclidean distances between Kohonen unit
locations.
iter_scale: float
Value of the iteration scaling for decaying function of lrate
radius: integer
Value of the max radius of the neighborhood function to decay
learning_rate: float
Value of the learning rate to decay with epochs
Return:
-------
infl: sparse csr matrix
curr_max_radius: current neighborhood kernel radius
curr_lrate: current value of the learning rate
"""
# compute radius decay for this iteration
# same for learning rate
curr_max_radius = radius * np.exp(-1.0 * iter / iter_scale)
curr_lrate = learning_rate * np.exp(-1.0 * iter / iter_scale)
# compute Gaussian influence kernel
infl = np.exp((-1.0 * dqd) / (2 * curr_max_radius * iter))
infl *= curr_lrate
# hard-limit kernel to max radius
infl[dqd > curr_max_radius] = 0.
return infl | fc353b48b11dd85ccbdd280e43cd17da8f7a6f3d | 3,634,366 |
from typing import Union
from typing import Dict
def get_as_dict(x: Union[Dict, Sentence]) -> Dict:
"""Return an object as a dictionary of its attributes."""
if isinstance(x, dict):
return x
else:
try:
return x._asdict()
except AttributeError:
return x.__dict__ | 9d85131564324c021a3ff7f2a068e5a0e80dc59e | 3,634,367 |
def get_user_sector_name(db_id):
"""
Get the user defined sector for the given database id string.
Raise an exception if not found.
"""
found, sector = user_sector_finder("id", int(db_id))
if found:
return make_sector_description(sector[0], True)
else:
raise NotFoundException(db_id, "User sector not found.") | 2ee958e8d151f0ffc3e44d5ded405c660baed014 | 3,634,368 |
from pathlib import Path
def delete_author_by_id(
author_id: int = Path(...,
title="The Id of the author to be deleted", ge=0),
sql: Session = Depends(db_session),
current_user: UsersBase = Depends(get_current_active_user)
):
"""delete a specific author"""
if current_user.Status:
raise HTTPException(status_code=400, detail="Inactive user")
result = delete_author(sql, author_id=author_id)
return result | 78e0c57dbcc7c76d60cfbbc28a7abfeca728d712 | 3,634,369 |
import random
def assign_judges_to_track(track, judges):
"""
Assign judges to projects in a single, specified track with all the projects they will be looking at
:param str track: name of the track we want judges for
:param list judges: a list of the judges allocated for this track
:return: a dictionary with the keys as judge names and values as an array of project numbers
:rtype: dict
"""
track_projects = our_tracks[track]
total_views = len(track_projects) * 3
assignments = {} # The final result, with the keys as judge names and values as an array of project numbers
project_views = [] # The total pool of projects that can be assigned to judges
judge_idx = 0 # used to loop through all the available judges as we assign projects
count = 0 # reused throughout
edge = False # if we hit the edge case where all the projects left to be assigned are already assigned to the last judge
edge_idx = len(judges) - 1
# Set up arrays
for judge in judges:
assignments[judge] = []
for project in track_projects:
# project_views[project] = 0
for i in range(VIEWS_PER_PROJ):
project_views.append(project)
# if there are less than 5 judges, there will be suffient overlap since each project will be seen with every other project at least once
if len(judges) < 6:
for project in project_views:
#print(assignments[judges[judge_idx]])
#print(project)
assignments[judges[judge_idx]].append(project)
projects[project]["num_judges"][track] += 1
judge_idx = (judge_idx + 1) % len(judges) # updates the judge index; reset once the last judge is reached
return assignments
# at least 6 judges; Add projects to a judge's array
while len(project_views) > 0: # while there are still projects left to be assigned
random_proj = random.choice(project_views)
count += 1 # for edge test
if random_proj in assignments[judges[judge_idx]]: # if this judge already has this project assigned to them
if count > len(project_views) + 2: # if we have tried len(project_views)+1 number of random projects, we are probably at the edge case
edge = True
break # break to go to the edge case
else:
continue # if not at the edge, keep trying to get a new project
# we now have a new project and can add it to this judge's assignments
assignments[judges[judge_idx]].append(random_proj)
projects[random_proj]["num_judges"][track] += 1
project_views.remove(random_proj)
count = 0
judge_idx = (judge_idx + 1) % len(judges) # updates the judge index; reset once the last judge is reached
# we can assume that all the projects left to be assigned have already been assigned to this last judge,
# so start from the beginning and start assign a proj from judge 1 to the last judge, then judge 2 to second last
if edge:
count = 0 # used to sequentially step through the array of judges from index 0
jLen = len(assignments[judges[count]]) # goal length of assignments
while edge_idx != judge_idx-1: # keeps going until we get to the judge we terminated the previous loop at, edge_idx starts at the last judge
random_proj = random.choice(assignments[judges[count]])
if random_proj not in assignments[judges[edge_idx]]:
assignments[judges[edge_idx]].append(random_proj)
assignments[judges[count]].remove(random_proj)
count += 1 # next sequential judge
edge_idx -= 1
# if there are still unassigned projects left assign them to the first x judges
if len(project_views) > 0:
edge_idx = 0 # sequentially step through the first judges to add the unassigned ones to them
for project in project_views:
assignments[judges[edge_idx]].append(project)
projects[project]["num_judges"][track] += 1
edge_idx = (edge_idx + 1) % len(judges)
return assignments | 981e52d3d8079835f39fe8f8510174bb351d3682 | 3,634,370 |
def get_current_user(current_user):
"""
User route to get current user
Parameters
----------
Registered/Admin access
Returns
-------
User Data
"""
sql_query = "SELECT * FROM diyup.users WHERE email_address=%s"
cur = mysql.connection.cursor()
cur.execute(sql_query, (current_user[0],))
user = cur.fetchone()
cur.close()
if not user:
return jsonify({'message' : 'No user found!'}), 404
user_data = {}
user_data['email_address'] = current_user[0]
user_data['username'] = current_user[1]
user_data['is_admin'] = current_user[3]
user_data['password'] = '****'
user_data['avatar'] = current_user[4]
return jsonify({'current user' : user_data}), 200 | 98c42555a5153ed06516e528b210a6163e2559e0 | 3,634,371 |
import json
def incident_exists(name, message, status):
"""
Check if an incident with these attributes already exists
"""
incidents = cachet.Incidents(endpoint=ENDPOINT)
all_incidents = json.loads(incidents.get())
for incident in all_incidents['data']:
if name == incident['name'] and \
status == incident['status'] and \
message.strip() == incident['message'].strip():
return True
return False | d087b9917a233625a731d6a41389ae9532a6f68d | 3,634,372 |
def compute_down(expr):
""" Compute the expression on the entire inputs
inputs match up to leaves of the expression
"""
return expr | 71677a16093d82a28c1d153c9385b33c01b4dd24 | 3,634,373 |
def NEV_to_HLA(survey, NEV, cov=True):
"""
Transform from NEV to HLA coordinate system.
Params:
survey: (n,3) array of floats
The [md, inc, azi] survey listing array.
NEV: (d,3) or (3,3,d) array of floats
The NEV coordinates or covariance matrices.
cov: boolean
If cov is True then a (3,3,d) array of covariance matrices
is expecte, else a (d,3) array of coordinates.
Returns:
Either a transformed (n,3) array of HLA coordinates or an
(3,3,n) array of HLA covariance matrices.
"""
trans = get_transform(survey)
if cov:
HLAs = [
np.dot(np.dot(t, NEV.T[i]), t.T) for i, t in enumerate(trans)
]
HLAs = np.vstack(HLAs).reshape(-1,3,3).T
else:
NEV = NEV.reshape(-1,3)
HLAs = [
np.dot(NEV[i], t.T) for i, t in enumerate(trans)
]
return HLAs | 984e6f400d1415b0fde087ad913c4cff05ef6b0c | 3,634,374 |
def create_read_model_cmd() -> list:
"""Create TaiSEIA device model request protocol data."""
return SAInfoRequestPacket.create(
sa_info_type=SARegisterServiceIDEnum.READ_MODEL
).to_pdu() | 11efc850a056facd25279336eda7ce8868a248f4 | 3,634,375 |
def generate_mandatory_attributes(diagnostic_cubes, model_id_attr=None):
"""
Function to generate mandatory attributes for new diagnostics that are
generated using several different model diagnostics as input to the
calculation. If all input diagnostics have the same attribute use this,
otherwise set a default value.
Args:
diagnostic_cubes (list):
List of diagnostic cubes used in calculating the new diagnostic
model_id_attr (str or None):
Name of attribute used to identify source model for blending,
if required
Returns:
dict: Dictionary of mandatory attribute "key": "value" pairs.
"""
missing_value = object()
attr_dicts = [cube.attributes for cube in diagnostic_cubes]
required_attributes = [model_id_attr] if model_id_attr else []
attributes = MANDATORY_ATTRIBUTE_DEFAULTS.copy()
for attr in MANDATORY_ATTRIBUTES + required_attributes:
unique_values = set(d.get(attr, missing_value) for d in attr_dicts)
if len(unique_values) == 1 and missing_value not in unique_values:
(attributes[attr],) = unique_values
elif attr in required_attributes:
msg = (
'Required attribute "{}" is missing or '
"not the same on all input cubes"
)
raise ValueError(msg.format(attr))
return attributes | c8abbf1a9fd42dfafbd7e1bd957dee8f6bef1a00 | 3,634,376 |
def _convert_to_slices(indices, max_nslice_frac=0.1):
"""
Convert list of indices to a list of slices.
Parameters
----------
indices : list
A 1D list of integers for array indexing.
max_nslice_frac : float
A float from 0 -- 1. If the number of slices
needed to represent input 'indices' divided by len(indices)
exceeds this fraction, then we determine that we cannot
easily represent 'indices' with a list of slices.
Returns
-------
list
list of slice objects used to represent indices
bool
If True, indices is easily represented by slices
(max_nslice_frac condition met), otherwise False
Notes
-----
Example:
if: indices = [1, 2, 3, 4, 10, 11, 12, 13, 14]
then: slices = [slice(1, 5, 1), slice(11, 15, 1)]
"""
# check for integer index
if isinstance(indices, (int, np.integer)):
indices = [indices]
# check for already a slice
if isinstance(indices, slice):
return [indices], True
# assert indices is longer than 2, or return trivial solutions
if len(indices) == 0:
return [slice(0, 0, 0)], False
elif len(indices) == 1:
return [slice(indices[0], indices[0] + 1, 1)], True
elif len(indices) == 2:
return [slice(indices[0], indices[1] + 1, indices[1] - indices[0])], True
# setup empty slices list
Ninds = len(indices)
slices = []
# iterate over indices
for i, ind in enumerate(indices):
if i == 0:
# start the first slice object
start = ind
last_step = indices[i + 1] - ind
continue
# calculate step from previous index
step = ind - indices[i - 1]
# if step != last_step, this ends the slice
if step != last_step:
# append to list
slices.append(slice(start, indices[i - 1] + 1, last_step))
# check if this is the last element
if i == Ninds - 1:
# append last element
slices.append(slice(ind, ind + 1, 1))
continue
# setup next step
start = ind
last_step = indices[i + 1] - ind
# check if this is the last element
elif i == Ninds - 1:
# end slice and append
slices.append(slice(start, ind + 1, step))
# determine whether slices are a reasonable representation
Nslices = len(slices)
passed = (float(Nslices) / len(indices)) < max_nslice_frac
return slices, passed | acf0bb5800b7fe5c96836a973b090f748a02875a | 3,634,377 |
def log2(x, dtype=None):
"""
Base-2 logarithm of `x`.
Note:
Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
not supported.
Args:
x (Tensor): Input tensor.
dtype (:class:`mindspore.dtype`, optional): Default: :class:`None`. Overrides the dtype of the
output Tensor.
Returns:
Tensor or scalar. This is a scalar if `x` is a scalar.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore.numpy as np
>>> x = np.array([2, 4, 8]).astype('float16')
>>> output = np.log2(x)
>>> print(output)
[1. 2. 3.]
"""
tensor_2 = _make_tensor(2, x.dtype)
def _log2(x):
return F.log(x) / F.log(tensor_2)
return _apply_tensor_op(_log2, x, dtype=dtype) | 8c99294cc5efb6c31447428cd930bb01641298a8 | 3,634,378 |
async def init_app():
"""
Инициализируем приложение
:return:
"""
app = web.Application()
try:
app.db = await asyncpg.pool.create_pool(get_config())
except asyncpg.exceptions.InvalidCatalogNameError:
raise Exception('DSN сконфигурирован неверно '
'или БД не проинициализированна корректно')
app.router.add_view('/', ImageView)
app.jinja = env
app.upload_path = './upload'
app.add_routes([web.static('/upload', app.upload_path),
web.static('/static', './static')])
app.on_shutdown.append(shutdown)
return app | 73caac054abe8b356e8e167d3cc815c6254e8dd3 | 3,634,379 |
def pad_array(
x: np.ndarray, to_multiple: int = None, to_size: int = None, axis: int = 0
):
"""Pads an array either to a multiple of `to_multiple` or to the
exact length `to_size` along `axis`.
Parameters
----------
x: np.ndarray
The array to pad.
to_multiple: int, optional
If given, `x` is padded so that it's length is the nearest
larger multiple of `to_multiple`.
to_size: int, optional
If given, `x` is padded so that it's length is exactly
`to_size`. Default is None, and instead `to_multiple` is used.
Exactly one of `to_size` and `to_multiple` must be given.
axis: int
The axis along which to pad. Default is axis 0.
Returns
-------
padded: np.ndarray
The padded array.
"""
if to_multiple is not None and to_size is not None:
raise ValueError("Only one of `to_multiple` and `to_size` should be given.")
if to_size is not None:
to_pad = to_size - x.shape[axis]
if to_pad < 0:
raise ValueError("The length of `x` is already greater than `to_size`.")
elif to_multiple is not None:
to_pad = int(np.ceil(x.shape[axis] / to_multiple)) * to_multiple - x.shape[axis]
else:
raise ValueError("One of `to_multiple` and `to_size` must be given.")
if to_pad == 0:
return x.copy() # A copy is expected when padding
pad = [(0, 0) for _ in x.shape]
pad[axis] = (0, to_pad)
return np.pad(x, pad) | 9a8116fac37b794fe7f2e6e216c722e1f8261068 | 3,634,380 |
def fill_defaults(value, default):
""" #TODO: no longer needed, remove. """
if value is None:
value = default
elif isinstance(value, int):
value = tuple([value] * 3)
elif isinstance(value, tuple):
value = tuple(item if item else default[i] for i, item in enumerate(value))
return value | 5767775b7d945aae63ecfd30fbefbae82438c3e9 | 3,634,381 |
from typing import Sequence
def ihfft2(x, s=None, axes=(-2, -1), norm="backward", name=None):
"""
Compute the two dimensional inverse FFT of a real spectrum.
This is really `ihfftn` with different defaults.
For more details see `ihfftn`.
Args:
x(Tensor): Input tensor
s(Sequence[int], optional): Shape of the real input to the inverse FFT.
axes(Sequance[int], optional): The axes over which to compute the
inverse fft. Default is the last two axes.
norm(str, optional): {"backward", "ortho", "forward"}. Default is
"backward".
name(str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name` .
Returns:
out(Tensor) : The result of the inverse hermitian 2-D FFT.
Examples:
.. code-block:: python
import numpy as np
import paddle
x = np.mgrid[:5, :5][0].astype(np.float64)
xp = paddle.to_tensor(x)
ihfft2_xp = paddle.fft.ihfft2(xp).numpy()
print(ihfft2_xp)
# [[ 2. +0.j 0. +0.j 0. +0.j ]
# [-0.5-0.68819096j 0. +0.j 0. +0.j ]
# [-0.5-0.16245985j 0. +0.j 0. +0.j ]
# [-0.5+0.16245985j 0. +0.j 0. +0.j ]
# [-0.5+0.68819096j 0. +0.j 0. +0.j ]]
"""
_check_at_least_ndim(x, 2)
if s is not None:
if not isinstance(s, Sequence) or len(s) != 2:
raise ValueError(
"Invalid FFT argument s ({}), it should be a sequence of 2 integers."
.format(s))
if axes is not None:
if not isinstance(axes, Sequence) or len(axes) != 2:
raise ValueError(
"Invalid FFT argument axes ({}), it should be a sequence of 2 integers."
.format(axes))
return ihfftn(x, s, axes, norm, name) | 2814a48a449affdc8c057dda6d75cd17e09acdc0 | 3,634,382 |
def pp_ajaximg(context, nodelist, *args, **kwargs):
"""
Modifies the request session data to prep it for AJAX requests to the Ajax image uploader.
"""
context.push()
namespace = get_namespace(context)
request = kwargs.get('request', None)
obj = kwargs.get('object', None)
if obj is not None and request is not None:
ctype = ContentType.objects.get_for_model(obj)
request.session['object_pk'] = obj.pk
request.session['content_pk'] = ctype.pk
output = nodelist.render(context)
context.pop()
return output | ffc9f3fd2d945614582885bb19ba60575bd4a03d | 3,634,383 |
def bgr2rgb(x, dim=-3):
"""Reverses the channel dimension. See :func:`channel_flip`"""
return channel_flip(x, dim) | dc943e7e814f7d2b0cd4f83ec4cb9bf10aad6924 | 3,634,384 |
def get_all_predictions():
"""
Function for getting the predictions of models on the mnist test dataset.
:return: a dictionary with predictions of all models.
"""
all_results = {}
model_paths = ["ffnn_models", "dropout_models"]
sizes = [1000, 2500, 7000, 19000, 50000]
pred, correct = None, None
for folder in model_paths:
for size in sizes:
path = folder + "/model_" + str(size)
if len(all_results) == 0:
pred, correct = get_int_predictions(folder + "/model_" + str(size), load_MNIST)
all_results["y"] = correct
all_results[path] = pred
else:
all_results[path] = get_int_predictions(folder + "/model_" + str(size), load_MNIST)[0]
print(f"Test-set accuracy = {np.mean(np.equal(all_results[path], correct)):.4f} for model {path}")
return all_results | 246606cac76b58e98e5a7130a4a0631ddcb69508 | 3,634,385 |
def moving_avg(timeseries, days=7, dropna=False, win_type=None, params=None):
"""Takes a centred moving average of a time series over a window with user-defined width and shape. Note: when taking a N-day centred moving average, the first and last N//2 days won't return a value and are effectively lost, BUT they are returned as nans. Also note that the moving average applied over missing data will result in an effectively larger window - try to make sure there is no missing data in the range the data is averaged.
Parameters
----------
timeseries : pandas.DataFrame
Pandas DataFrame with chronologically ordered datetime objects as indices and a single column with float values
days : int
Width of the averaging window in days. Resulting value is recorded in the centre of the window. 7 days by default. If the number is uneven, the window is asymmetric toward the past.
dropnan : boolean
If True: only return dates that have an averaged value - i.e. drop some values at the start and the end.
win_type : str
Type of window, determining the weight of the values in the window. Choice between 'boxcar', 'triang', 'blackman', 'hamming', 'bartlett', 'parzen', 'bohman', 'blackmanharris', 'nuttall', 'barthann', 'kaiser' (needs parameter: beta), 'gaussian' (needs parameter: std), 'general_gaussian' (needs parameters: power, width), 'slepian' (needs parameter: width), 'exponential' (needs parameter: tau). If win_type=None (default) all points are evenly weighted.
params : float or list of floats
Parameters values used in some window types. 'kaiser': beta, 'gaussian': std, 'general_gaussian': [power, width], 'slepian': width, 'exponential': tau
Returns
------
timeseries_avg : pandas.DataFrame
Pandas DataFrame with chronologically ordered datetime objects as indices and single column with moving-averaged float values
TO DO
-----
Different win_types are not yet supported, because combining a non-integer window (such as '7D') cannot be combined with non-equal weighing. This can be worked around by first converting the datetime indices to regular indices, saving the datetime indices in a list, then applying centred moving average, and linking the resulting list back to the original dates.
"""
col_name = timeseries.columns[0]
ts_temp = pd.DataFrame(data=timeseries.values.copy())
if win_type in [None, 'boxcar', 'triang', 'blackman', 'hamming', 'bartlett', 'parzen', 'bohman', 'blackmanharris', 'nuttall', 'barthann']:
ts_temp = ts_temp.rolling(window=days, center=True, win_type=win_type).mean()
params = [params]
if win_type == 'kaiser':
ts_temp = ts_temp.rolling(window=days, center=True, win_type=win_type).mean(beta=params[0])
if win_type == 'gaussian':
ts_temp = ts_temp.rolling(window=days, center=True, win_type=win_type).mean(std=params[0])
if win_type == 'general_gaussian':
ts_temp = ts_temp.rolling(window=days, center=True, win_type=win_type).mean(power=params[0], width=params[1])
if win_type == 'slepian':
ts_temp = ts_temp.rolling(window=days, center=True, win_type=win_type).mean(width=params[0])
if win_type == 'exponential':
ts_temp = ts_temp.rolling(window=days, center=True, win_type=win_type).mean(tau=params[0])
timeseries_avg = pd.DataFrame(data=ts_temp.values.copy(), index=timeseries.index.copy(), columns=[col_name])
if dropna:
timeseries_avg.dropna(inplace=True)
return timeseries_avg | d7d6cd61d61d341beb3c37f7abe307c65ebe72a2 | 3,634,386 |
def get_defined_lvls(inp_str):
""" gets a list which specifies what levels have been defined
"""
levels_def_pattern = ('level' + one_or_more(SPACE) + capturing(one_or_more(NONSPACE)))
defined_levels = all_captures(levels_def_pattern, inp_str)
return defined_levels | 624766b6e3b5f17a721e9deacf316bc6793a64f7 | 3,634,387 |
from typing import Tuple
def requests_per_process(process_count: int, conf) -> Tuple[int, int]:
"""Divides how many requests each forked process will make."""
return (
int(conf.concurrency / process_count),
int(conf.requests / process_count),
) | 00af7a63471201c3fffcfb610f74a745ca326b68 | 3,634,388 |
def load_into_bamfile(meshdata, subfiles, model):
"""Uses pycollada and panda3d to load meshdata and subfiles and
write out to a bam file on disk"""
if os.path.isfile(model.bam_file):
print 'returning cached bam file'
return model.bam_file
mesh = load_mesh(meshdata, subfiles)
model_name = model.model_json['full_path'].replace('/', '_')
if model.model_type == 'progressive' and model.model_subtype == 'full':
progressive_stream = model.model_json['metadata']['types']['progressive'].get('progressive_stream')
if progressive_stream is not None:
print 'LOADING PROGRESSIVE STREAM'
data = model.prog_data
try:
mesh = add_back_pm.add_back_pm(mesh, StringIO(data), 100)
print '-----'
print 'SUCCESSFULLY ADDED BACK PM'
print '-----'
except:
f = open(model.bam_file, 'w')
f.close()
raise
print 'loading into bamfile', model_name, mesh
scene_members = pandacore.getSceneMembers(mesh)
print 'got scene members', model_name, mesh
rotateNode = GeomNode("rotater")
rotatePath = NodePath(rotateNode)
matrix = numpy.identity(4)
if mesh.assetInfo.upaxis == collada.asset.UP_AXIS.X_UP:
r = collada.scene.RotateTransform(0,1,0,90)
matrix = r.matrix
elif mesh.assetInfo.upaxis == collada.asset.UP_AXIS.Y_UP:
r = collada.scene.RotateTransform(1,0,0,90)
matrix = r.matrix
rotatePath.setMat(Mat4(*matrix.T.flatten().tolist()))
for geom, renderstate, mat4 in scene_members:
node = GeomNode("primitive")
node.addGeom(geom)
if renderstate is not None:
node.setGeomState(0, renderstate)
geomPath = rotatePath.attachNewNode(node)
geomPath.setMat(mat4)
print 'created np', model_name, mesh
if model.model_type != 'optimized_unflattened' and model.model_type != 'progressive':
print 'ABOUT TO FLATTEN'
rotatePath.flattenStrong()
print 'DONE FLATTENING'
print 'flattened', model_name, mesh
wrappedNode = pandacore.centerAndScale(rotatePath)
wrappedNode.setName(model_name)
wrappedNode.writeBamFile(model.bam_file)
print 'saved', model_name, mesh
wrappedNode = None
return model.bam_file | 0ba3e498f400bdf8ebfbcbf6075a749634c13ff3 | 3,634,389 |
def gettree(number, count=3):
"""
Сформировать дерево каталогов
"""
result = []
newline = str(number)
while len(newline) % count:
newline = '0' + newline
for i in range(0, len(newline)//count):
result.append(newline[i*count:i*count+count])
return result | 55fcec36ef3a50a949ed4f2d12103374fcfd13b0 | 3,634,390 |
def to_unserialized_json(obj):
"""
Convert a wire encodeable object into structured Python objects that
are JSON serializable.
:param obj: An object that can be passed to ``wire_encode``.
:return: Python object that can be JSON serialized.
"""
return _cached_dfs_serialize(obj) | 9d173afca84f5af3617420be1ba04e5ecb3e4242 | 3,634,391 |
import os
import yaml
def install_simulation(
version,
sim_dir,
rel_name,
run_dir,
vel_mod_dir,
srf_file,
stoch_file,
stat_file_path,
vs30_file_path,
vs30ref_file_path,
check_vm,
fault_yaml_path,
root_yaml_path,
v1d_full_path,
cybershake_root,
v1d_dir=platform_config[PLATFORM_CONFIG.VELOCITY_MODEL_DIR.name],
site_specific=False,
site_v1d_dir=None,
hf_stat_vs_ref=None,
sim_params_file=None,
seed=HF_DEFAULT_SEED,
logger: Logger = get_basic_logger(),
extended_period=False,
vm_perturbations=False,
ignore_vm_perturbations=False,
vm_qpqs_files=False,
ignore_vm_qpqs_files=False,
components=None,
):
"""Installs a single simulation"""
run_name = simulation_structure.get_fault_from_realisation(rel_name)
lf_sim_root_dir = simulation_structure.get_lf_dir(sim_dir)
hf_dir = simulation_structure.get_hf_dir(sim_dir)
bb_dir = simulation_structure.get_bb_dir(sim_dir)
im_calc_dir = simulation_structure.get_im_calc_dir(sim_dir)
dir_list = [sim_dir, lf_sim_root_dir, hf_dir, bb_dir, im_calc_dir]
version = str(version)
if not os.path.isdir(cybershake_root):
dir_list.insert(0, cybershake_root)
shared.verify_user_dirs(dir_list)
template_path = os.path.join(
platform_config[PLATFORM_CONFIG.TEMPLATES_DIR.name], "gmsim", version
)
root_params_dict = utils.load_yaml(
os.path.join(template_path, ROOT_DEFAULTS_FILE_NAME)
)
root_params_dict["ims"][RootParams.extended_period.value] = extended_period
root_params_dict[RootParams.version.value] = version
root_params_dict[RootParams.stat_file.value] = stat_file_path
root_params_dict[RootParams.stat_vs_est.value] = vs30_file_path
root_params_dict[RootParams.stat_vs_ref.value] = vs30ref_file_path
root_params_dict["hf"][RootParams.seed.value] = seed
if components is not None:
if not set(components).issubset(set(Components.iterate_str_values())):
message = f"{components} are not all in {Components}"
logger.critical(message)
raise ValueError(message)
root_params_dict["ims"][RootParams.component.value] = components
# Fault params
fault_params_dict = {
FaultParams.root_yaml_path.value: root_yaml_path,
FaultParams.vel_mod_dir.value: vel_mod_dir,
}
fault_params_dict["hf"] = {
FaultParams.site_specific.value: site_specific,
FaultParams.site_v1d_dir.value: site_v1d_dir,
}
# VM params
vm_params_path = simulation_structure.get_vm_params_yaml(vel_mod_dir)
# Sim Params
sim_params_dict = {
SimParams.fault_yaml_path.value: fault_yaml_path,
SimParams.run_name.value: rel_name,
SimParams.user_root.value: cybershake_root,
SimParams.run_dir.value: run_dir,
SimParams.sim_dir.value: sim_dir,
SimParams.srf_file.value: srf_file,
SimParams.vm_params.value: vm_params_path,
}
if stat_file_path is not None:
sim_params_dict[SimParams.stat_file.value] = stat_file_path
if check_vm:
vm_params_dict = utils.load_yaml(vm_params_path)
dt = root_params_dict["dt"]
sim_duration = vm_params_dict["sim_duration"]
nt = float(sim_duration) / dt
if not isclose(nt, round(nt)):
logger.critical(
"Simulation dt does not match sim duration. This will result in errors during BB. Simulation duration"
" must be a multiple of dt. Ignoring fault. Simulation_duration: {}. dt: {}.".format(
sim_duration, dt
)
)
return None, None, None
sim_params_dict["emod3d"] = {}
vm_pert_file = simulation_structure.get_realisation_VM_pert_file(
cybershake_root, rel_name
)
if vm_perturbations:
# We want to use the perturbation file
if os.path.exists(vm_pert_file):
# The perturbation file exists, use it
root_params_dict["emod3d"]["model_style"] = 3
sim_params_dict["emod3d"]["pertbfile"] = vm_pert_file
else:
# The perturbation file does not exist. Raise an exception
message = f"The expected perturbation file {vm_pert_file} does not exist. Generate or move this file to the given location."
logger.error(message)
raise FileNotFoundError(message)
elif not ignore_vm_perturbations and os.path.exists(vm_pert_file):
# We haven't used either flag and the perturbation file exists. Raise an error and make the user deal with it
message = f"The perturbation file {vm_pert_file} exists. Reset and run installation with the --ignore_vm_perturbations flag if you do not wish to use it."
logger.error(message)
raise FileExistsError(message)
else:
# The perturbation file doesn't exist or we are explicitly ignoring it. Keep going
pass
qsfile = simulation_structure.get_fault_qs_file(cybershake_root, rel_name)
qpfile = simulation_structure.get_fault_qp_file(cybershake_root, rel_name)
if vm_qpqs_files:
# We want to use the Qp/Qs files
if os.path.exists(qsfile) and os.path.exists(qpfile):
# The Qp/Qs files exist, use them
root_params_dict["emod3d"]["useqsqp"] = 1
sim_params_dict["emod3d"]["qsfile"] = qsfile
sim_params_dict["emod3d"]["qpfile"] = qpfile
else:
# At least one of the Qp/Qs files do not exist. Raise an exception
message = f"The expected Qp/Qs files {qpfile} and/or {qsfile} do not exist. Generate or move these files to the given location."
logger.error(message)
raise FileExistsError(message)
elif not ignore_vm_qpqs_files and (
os.path.exists(qsfile) or os.path.exists(qpfile)
):
# We haven't used either flag but the Qp/Qs files exist. Raise an error and make the user deal with it
message = f"The Qp/Qs files {qpfile}, {qsfile} exist. Reset and run installation with the --ignore_vm_qpqs_files flag if you do not wish to use them."
logger.error(message)
raise FileExistsError(message)
else:
# Either the Qp/Qs files don't exist, or we are explicitly ignoring them. Keep going
pass
sim_params_dict["hf"] = {SimParams.slip.value: stoch_file}
sim_params_dict["bb"] = {}
shared.show_horizontal_line(c="*")
logger.info("installing bb")
install_bb(
stat_file_path,
root_params_dict,
v1d_dir=v1d_dir,
v1d_full_path=v1d_full_path,
site_v1d_dir=site_v1d_dir,
hf_stat_vs_ref=hf_stat_vs_ref,
logger=logger,
)
logger.info("installing bb finished")
if sim_params_file is not None and os.path.isfile(sim_params_file):
with open(sim_params_file) as spf:
extra_sims_params = yaml.safe_load(spf)
for key, value in extra_sims_params.items():
# If the key exists in both dictionaries and maps to a dictionary in both, then merge them
if (
isinstance(value, dict)
and key in sim_params_dict
and isinstance(sim_params_dict[key], dict)
):
sim_params_dict[key].update(value)
else:
sim_params_dict.update({key: value})
return root_params_dict, fault_params_dict, sim_params_dict | 3dd01b1e44d734cfda7063b83eaf0f99bec2b2c8 | 3,634,392 |
def get_negatives(all_contexts, vocab, counter, K):
"""返回负采样中的噪声词."""
# 索引为1、2、...(索引0是词表中排除的未知标记)
sampling_weights = [
counter[vocab.to_tokens(i)]**0.75 for i in range(1, len(vocab))
]
all_negatives, generator = [], RandomGenerator(sampling_weights)
for contexts in all_contexts:
negatives = []
while len(negatives) < len(contexts) * K:
neg = generator.draw()
# 噪声词不能是上下文词
if neg not in contexts:
negatives.append(neg)
all_negatives.append(negatives)
return all_negatives | 471dcd8afac5cc02507a32bd150229e6b34bcf8a | 3,634,393 |
def _get_msvc_vars(repository_ctx, paths):
"""Get the variables we need to populate the MSVC toolchains."""
msvc_vars = dict()
vc_path = find_vc_path(repository_ctx)
missing_tools = None
if not vc_path:
repository_ctx.template(
"vc_installation_error.bat",
paths["@bazel_tools//tools/cpp:vc_installation_error.bat.tpl"],
{"%{vc_error_message}": ""},
)
else:
missing_tools = _find_missing_vc_tools(repository_ctx, vc_path)
if missing_tools:
message = "\r\n".join([
"echo. 1>&2",
"echo Visual C++ build tools seems to be installed at %s 1>&2" % vc_path,
"echo But Bazel can't find the following tools: 1>&2",
"echo %s 1>&2" % ", ".join(missing_tools),
"echo. 1>&2",
])
repository_ctx.template(
"vc_installation_error.bat",
paths["@bazel_tools//tools/cpp:vc_installation_error.bat.tpl"],
{"%{vc_error_message}": message},
)
if not vc_path or missing_tools:
write_builtin_include_directory_paths(repository_ctx, "msvc", [], file_suffix = "_msvc")
msvc_vars = {
"%{msvc_env_tmp}": "msvc_not_found",
"%{msvc_env_path}": "msvc_not_found",
"%{msvc_env_include}": "msvc_not_found",
"%{msvc_env_lib}": "msvc_not_found",
"%{msvc_cl_path}": "vc_installation_error.bat",
"%{msvc_ml_path}": "vc_installation_error.bat",
"%{msvc_link_path}": "vc_installation_error.bat",
"%{msvc_lib_path}": "vc_installation_error.bat",
"%{dbg_mode_debug_flag}": "/DEBUG",
"%{fastbuild_mode_debug_flag}": "/DEBUG",
"%{msvc_cxx_builtin_include_directories}": "",
}
return msvc_vars
env = setup_vc_env_vars(repository_ctx, vc_path)
escaped_paths = escape_string(env["PATH"])
escaped_include_paths = escape_string(env["INCLUDE"])
escaped_lib_paths = escape_string(env["LIB"])
escaped_tmp_dir = escape_string(_get_temp_env(repository_ctx).replace("\\", "\\\\"))
llvm_path = ""
if _use_clang_cl(repository_ctx):
llvm_path = find_llvm_path(repository_ctx)
if not llvm_path:
auto_configure_fail("\nUSE_CLANG_CL is set to 1, but Bazel cannot find Clang installation on your system.\n" +
"Please install Clang via http://releases.llvm.org/download.html\n")
cl_path = find_llvm_tool(repository_ctx, llvm_path, "clang-cl.exe")
link_path = find_llvm_tool(repository_ctx, llvm_path, "lld-link.exe")
if not link_path:
link_path = find_msvc_tool(repository_ctx, vc_path, "link.exe")
lib_path = find_llvm_tool(repository_ctx, llvm_path, "llvm-lib.exe")
if not lib_path:
lib_path = find_msvc_tool(repository_ctx, vc_path, "lib.exe")
else:
cl_path = find_msvc_tool(repository_ctx, vc_path, "cl.exe")
link_path = find_msvc_tool(repository_ctx, vc_path, "link.exe")
lib_path = find_msvc_tool(repository_ctx, vc_path, "lib.exe")
msvc_ml_path = find_msvc_tool(repository_ctx, vc_path, "ml64.exe")
escaped_cxx_include_directories = []
for path in escaped_include_paths.split(";"):
if path:
escaped_cxx_include_directories.append("\"%s\"" % path)
if llvm_path:
clang_version = _get_clang_version(repository_ctx, cl_path)
clang_dir = llvm_path + "\\lib\\clang\\" + clang_version
clang_include_path = (clang_dir + "\\include").replace("\\", "\\\\")
escaped_cxx_include_directories.append("\"%s\"" % clang_include_path)
clang_lib_path = (clang_dir + "\\lib\\windows").replace("\\", "\\\\")
escaped_lib_paths = escaped_lib_paths + ";" + clang_lib_path
support_debug_fastlink = _is_support_debug_fastlink(repository_ctx, link_path)
write_builtin_include_directory_paths(repository_ctx, "msvc", escaped_cxx_include_directories, file_suffix = "_msvc")
msvc_vars = {
"%{msvc_env_tmp}": escaped_tmp_dir,
"%{msvc_env_path}": escaped_paths,
"%{msvc_env_include}": escaped_include_paths,
"%{msvc_env_lib}": escaped_lib_paths,
"%{msvc_cl_path}": cl_path,
"%{msvc_ml_path}": msvc_ml_path,
"%{msvc_link_path}": link_path,
"%{msvc_lib_path}": lib_path,
"%{dbg_mode_debug_flag}": "/DEBUG:FULL" if support_debug_fastlink else "/DEBUG",
"%{fastbuild_mode_debug_flag}": "/DEBUG:FASTLINK" if support_debug_fastlink else "/DEBUG",
"%{msvc_cxx_builtin_include_directories}": " " + ",\n ".join(escaped_cxx_include_directories),
}
return msvc_vars | 417a11a3fe766f4f1136c934f0f0574feb7261c2 | 3,634,394 |
def edmonds(V, E, root):
"""Recursive application of Edmonds' algorithm according
to Wikipedia's description [0].
[0] https://en.wikipedia.org/wiki/Edmonds'_algorithm#Description
:param V: set of vertices
:type V: [int, ...]
:param E: a set of edges
:type E: [(int, int, float), ...]
:param root: root vertex
:type root: int
:return: a subset of `E` representing a min-weight arborescence
:rtype: [(int, int, float), ...]
"""
print(("c computing spanning arborescence of minimum weight "
" for {} with root={}").format(E, root))
E = list(filter(lambda e: e[1] != root, E))
E = remove_multiedges(E)
P = cheapest_edges(root, E)
print("c P = {}".format(P))
C = find_cycle(P)
if not C:
print("c found no cycle, returning {}".format(P))
return P
else:
print("c found a cycle: {}".format(C))
C_V = set(e[0] for e in C).union(set(e[1] for e in C))
v_c = max(V) + 1
E_prime = []
correspondence = {}
for (s, d, w) in E:
if s not in C_V and d in C_V:
fe = filter(lambda e: e[0] == pi(d, E) and e[1] == d, E)
incoming_weight = list(map(lambda e: e[2], fe))[0]
correspondence[s, v_c, w - incoming_weight] = (s, d, w)
E_prime.append((s, v_c, w - incoming_weight))
elif s in C_V and d not in C_V:
correspondence[v_c, d, w] = (s, d, w)
E_prime.append((v_c, d, w))
elif s not in C_V and d not in C_V:
correspondence[s, d, w] = (s, d, w)
E_prime.append((s, d, w))
D_prime = (V + [v_c], E_prime)
A_prime = edmonds(D_prime[0], D_prime[1], root)
u, v, w = correspondence[unique_edge(v_c, A_prime)]
assert v in C_V
A_prime_corr = list(map(lambda e: correspondence[e], A_prime))
C_wo_pi_v = list(filter(lambda e: e[0] != pi(v, E) and e[1] != v, C))
print("c returning arborescence {}".format(A_prime_corr + C_wo_pi_v))
return A_prime_corr + C_wo_pi_v | 3a4ec311e3e167dc3634a3d87dca0628d7c78938 | 3,634,395 |
import pymultinest
def run_multinest(loglikelihood, prior, dumper, nDims, nlive, root, ndump,
eff, seed=-1):
"""Run MultiNest.
See https://arxiv.org/abs/0809.3437 for more detail
Parameters
----------
loglikelihood: :obj:`callable`
probability function taking a single parameter:
- theta: numpy.array
physical parameters, `shape=(nDims,)`
returning a log-likelihood (float)
prior: :obj:`callable`
tranformation function taking a single parameter
- cube: numpy.array
hypercube parameters, `shape=(nDims,)`
returning physical parameters (`numpy.array`)
dumper: :obj:`callable`
access function called every nlive iterations giving a window onto
current live points. Single parameter, no return:
- live:
`numpy.array` live parameters and loglikelihoods,
`shape=(nlive,nDims+1)`
nDims: int
Dimensionality of sampling space
nlive: int
Number of live points
root: str
base name for output files
ndump: int
How many iterations between dumper function calls
eff: float
Efficiency of MultiNest
seed: int
Seed for sampler. Optional, no default seed.
"""
def multinest_prior(cube, ndim, nparams):
theta = prior(as_array(cube, shape=(nparams,)))
for i, elem in enumerate(theta):
cube[i] = elem
def multinest_loglikelihood(cube, ndim, nparams):
return loglikelihood(as_array(cube, shape=(nparams,)))
def multinest_dumper(nSamples, nlive, nPar,
physLive, posterior, paramConstr,
maxLogLike, logZ, logZerr, nullcontext):
dumper(physLive[:, :-1], physLive[:, -1],
posterior[:, :-2], posterior[:, -2])
pymultinest.run(multinest_loglikelihood, multinest_prior, nDims,
resume=False, verbose=True, dump_callback=multinest_dumper,
n_iter_before_update=ndump//10, n_live_points=nlive,
outputfiles_basename=root, sampling_efficiency=eff,
evidence_tolerance=0.01, seed=seed) | f388c52ebe8d3a27914d7b7966167e608db274dd | 3,634,396 |
def tema(close, timeperiod=30):
"""Triple Exponential Moving Average 三重指数移动平均线
The triple exponential moving average was designed to smooth
price fluctuations, thereby making it easier to identify trends
without the lag associated with traditional moving averages (MA).
It does this by taking multiple exponential moving averages (EMA)
of the original EMA and subtracting out some of the lag.
:param close:
:param timeperiod:
:return:
"""
return TEMA(close, timeperiod) | 74f8b5b465caa3de4417db2afa36375125761153 | 3,634,397 |
import re
def load_single_model(save_path_stem, load_results_df=True):
"""load in the model, loss df, and model df found at the save_path_stem
we also send the model to the appropriate device
"""
try:
if load_results_df:
results_df = pd.read_csv(save_path_stem + '_results_df.csv')
else:
results_df = pd.read_csv(save_path_stem + '_results_df.csv', nrows=1)
except FileNotFoundError as e:
if load_results_df:
raise e
results_df = None
loss_df = pd.read_csv(save_path_stem + '_loss.csv')
model_history_df = pd.read_csv(save_path_stem + "_model_history.csv")
if 'test_subset' not in loss_df.columns or 'test_subset' not in model_history_df.columns:
# unclear why this happens, it's really strange
test_subset = re.findall('_(c[\d,]+)_', save_path_stem)[0]
if not test_subset.startswith('c'):
raise Exception("Can't grab test_subset from path %s, found %s!" %
(save_path_stem, test_subset))
# this will give it the same spacing as the original version
test_subset = ', '.join(test_subset[1:].split(','))
if "test_subset" not in loss_df.columns:
loss_df['test_subset'] = test_subset
if "test_subset" not in model_history_df.columns:
model_history_df['test_subset'] = test_subset
model = load_LogGaussianDonut(save_path_stem)
return model, loss_df, results_df, model_history_df | c42b55e98cf663ea5db778b289a66229f9fc244b | 3,634,398 |
def gray2jet(img):
"""[0,1] grayscale to [0.255] RGB"""
jet = plt.get_cmap("jet")
return np.uint8(255.0 * jet(img)[:, :, 0:3]) | 7ea522cc5361e24d67bcde6760a178ff83a6deb4 | 3,634,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.