content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def update_resnet(model, debug=False):
"""
Update a ResNet model to use :class:`EltwiseSum` for the skip
connection.
Args:
model (:class:`torchvision.models.resnet.ResNet`): ResNet model.
debug (bool): If True, print debug statements.
Returns:
model (:class:`torchvision.models.resnet.ResNet`): ResNet model
that uses :class:`EltwiseSum` for the skip connections. The forward
functions of :class:`torchvision.models.resnet.BasicBlock` and
:class:`torch.models.resnet.Bottleneck` are modified.
"""
assert isinstance(model, ResNet)
def bottleneck_forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out = self.skip(out, identity)
out = self.relu(out)
return out
def basicblock_forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out = self.skip(out, identity)
out = self.relu(out)
return out
for module_name, module in model.named_modules():
if isinstance(module, Bottleneck):
module.skip = EltwiseSum()
module.forward = bottleneck_forward.__get__(module)
elif isinstance(module, BasicBlock):
module.skip = EltwiseSum()
module.forward = basicblock_forward.__get__(module)
else:
continue
if debug:
print('Adding EltwiseSum as skip connection in {}.'.format(
module_name))
return model
| 16,600
|
def docker(cmd='--help'):
"""
Wrapper for docker
"""
set_env()
template = 'docker {cmd}'.format(cmd=cmd)
run(template)
| 16,601
|
def sameSize(arguments) -> bool:
"""Checks whether given vectors are the same size or not"""
sameLength = True
initialSize = len(vectors[arguments[0]])
for vector in arguments:
if len(vectors[vector]) != initialSize:
sameLength = False
return sameLength
| 16,602
|
def write_bed_file(
simulated_individuals,
bim_SNP_names,
output_name,
bim_SNP_complete_pos,
bim_SNP_nucleotides,
population_ID,
test_functionality,
):
"""
Purpose
-------
to write the simulated data into realistic (bed, bim, fam) filesets. Does not include phenotypes at this point.
Parameters
----------
sampled_individuals: an Nx(B+1)xS numpy array containing N sets of B whole chromosomes, each of which have S snps.
Each row in the ith (B+1)xS subset will contribute one genomic segment. Those (B+1) genomic
segments will be concatenated to comprise the ith simulated individual.
bim_SNP_names: a list of SNP's rsIDs from the input bed file.
output_name: name of the output bed file, which annotates the chromosome that it belongs to.
bim_SNP_complete_pos: Sx3 numpy array. Columns comprise the first, third, and fourth columns from the input bim file.
bim_SNP_nucleotides: Sx2 numpy array. Columns comprise the fifth and sixth columns from the input bim file (i.e. major and minor alleles).
CAUTION: minor alleles with frequencies near 50% may become the major allele after the simulation
because the simulated allele frequency always deviates from the real allele frequency by a small ammout.
This makes plink flip the sign of r values for simulated SNP pairs relative to real SNP pairs
if plink's --keep-allele-order flag is not used when computing the r values with plink.
population_ID: An input argument that is concatenated to each sample's row index to comprise columns 1 and 2 for the output fam file.
If no input argument is selected, then it includes the popilation ID from the 1000 genomes input plink fileset. If
the input plink files are custom, then it includes an empty string as the population_ID.
Returns
-------
It returns nothing. It only writes the simulated data into plink files.
"""
simulated_IDs = np.array(
[population_ID + "_" + str(i) for i in range(1, len(simulated_individuals) + 1)]
)
metadata = {
"fid": simulated_IDs,
"iid": simulated_IDs,
"sex": np.array([2] * len(simulated_IDs)),
"pheno": np.array([-9] * len(simulated_IDs)),
"chromosome": bim_SNP_complete_pos.T[0],
"sid": bim_SNP_names,
"cm_position": bim_SNP_complete_pos.T[1],
"bp_position": bim_SNP_complete_pos.T[2],
"allele_1": bim_SNP_nucleotides.T[0],
"allele_2": bim_SNP_nucleotides.T[1],
}
to_bed(output_name, simulated_individuals, properties=metadata, count_A1=True)
if test_functionality == "test_units":
bed_reader = open_bed(output_name, count_A1=True, num_threads=1)
output_bed_file = bed_reader.read(dtype="int8")
output_bim_file = (
pd.read_csv(
output_name[:-4] + ".bim", delimiter="\t", header=None, dtype=str
)
.to_numpy()
.astype("str")
)
output_fam_file = (
pd.read_csv(
output_name[:-4] + ".fam", delimiter=" ", header=None, dtype=str
)
.to_numpy()
.astype("str")
)
unit_tester(output_bed_file, "correct_write_bed_file_output.bed", None)
unit_tester(output_bim_file, "correct_write_bed_file_output.bim", None)
unit_tester(output_fam_file, "correct_write_bed_file_output.fam", None)
| 16,603
|
def db_read(src_path, read_type=set, read_int=False):
"""Read string data from a file into a variable of given type.
Read from the file at 'src_path', line by line, skipping certain lines and
removing trailing whitespace.
If 'read_int' is True, convert the resulting string to int.
Return read data as an object of the desired type specified by 'read_type'.
"""
def skip(s):
"""Bool func. for skipping a line. "#%# " is chosen as a comment
indicator. """
return s == "\n" or s.startswith("#%# ")
if read_type is list:
result = list()
with open(src_path, "r") as f:
for i in f.readlines():
if not skip(i):
result.append(int(i.strip()) if read_int else i.strip())
elif read_type is set:
result = set()
with open(src_path, "r") as f:
for i in f.readlines():
if not skip(i):
result.add(int(i.strip()) if read_int else i.strip())
elif read_type is dict:
# Process the lines in pairs: First the key, then the corresponding
# value, and then the next key... and so on.
result = dict()
with open(src_path, "r") as f:
key_temp = ""
for i in f.readlines():
if not skip(i):
if key_temp:
result[key_temp] = (
int(i.strip()) if read_int else i.strip()
)
key_temp = ""
else:
key_temp = (int(i.strip()) if read_int else i.strip())
elif read_type is str:
# Only read the first line of the file, strip and return it:
with open(src_path, "r") as f:
result = f.readline().rstrip()
else:
logger.error("db_read: read_type is not list, str, set or dict.")
return None
return result
| 16,604
|
def get_config_keys():
"""Parses Keys.java to extract keys to be used in configuration files
Args: None
Returns:
list: A list of dict containing the following keys -
'key': A dot separated name of the config key
'description': A list of str
"""
desc_re = re.compile(r"(/\*\*\n|\s+\*/|\s+\*)")
key_match_re = re.compile(r"\(\n(.+)\);", re.DOTALL)
key_split_re = re.compile(r",\s+", re.DOTALL)
keys = []
with open(_KEYS_FILE, "r") as f:
config = re.findall(
r"(/\*\*.*?\*/)\n\s+(public static final Config.*?;)", f.read(), re.DOTALL
)
for i in config:
try:
key_match = key_match_re.search(i[1])
if key_match:
terms = [x.strip() for x in key_split_re.split(key_match.group(1))]
key = terms[0].replace('"', "")
description = [
x.strip().replace("\n", "")
for x in desc_re.sub("\n", i[0]).strip().split("\n\n")
]
if len(terms) == 3:
description.append(f"Default: {terms[2]}")
keys.append(
{
"key": key,
"description": description,
}
)
except IndexError:
# will continue if key_match.group(1) or terms[0] does not exist
# for some reason
pass
return keys
| 16,605
|
def tick_update_position(tick, tickxs, tickys, labelpos):
"""Update tick line and label position and style."""
tick.label1.set_position(labelpos)
tick.label2.set_position(labelpos)
tick.tick1line.set_visible(True)
tick.tick2line.set_visible(False)
tick.tick1line.set_linestyle('-')
tick.tick1line.set_marker('')
tick.tick1line.set_data(tickxs, tickys)
tick.gridline.set_data(0, 0)
| 16,606
|
def new():
"""Create a new community."""
return render_template('invenio_communities/new.html')
| 16,607
|
def interpolate(results_t, results_tp1, dt, K, c2w, img_wh):
"""
Interpolate between two results t and t+1 to produce t+dt, dt in (0, 1).
For each sample on the ray (the sample points lie on the same distances, so they
actually form planes), compute the optical flow on this plane, then use softsplat
to splat the flows. Finally use MPI technique to compute the composite image.
Used in test time only.
Inputs:
results_t, results_tp1: dictionaries of the @render_rays function.
dt: float in (0, 1)
K: (3, 3) intrinsics matrix (MUST BE THE SAME for results_t and results_tp1!)
c2w: (3, 4) current pose (MUST BE THE SAME for results_t and results_tp1!)
img_wh: image width and height
Outputs:
(img_wh[1], img_wh[0], 3) rgb interpolation result
(img_wh[1], img_wh[0]) depth of the interpolation (in NDC)
"""
device = results_t['xyzs_fine'].device
N_rays, N_samples = results_t['xyzs_fine'].shape[:2]
w, h = img_wh
rgba = torch.zeros((h, w, 4), device=device)
depth = torch.zeros((h, w), device=device)
c2w_ = torch.eye(4)
c2w_[:3] = c2w
w2c = torch.inverse(c2w_)[:3]
w2c[1:] *= -1 # "right up back" to "right down forward" for cam projection
P = K @ w2c # (3, 4) projection matrix
grid = create_meshgrid(h, w, False, device) # (1, h, w, 2)
xyzs = results_t['xyzs_fine'] # equals results_tp1['xyzs_fine']
zs = rearrange(results_t['zs_fine'], '(h w) n2 -> h w n2', w=w, h=h)
# static buffers
static_rgb = rearrange(results_t['static_rgbs_fine'],
'(h w) n2 c -> h w n2 c', w=w, h=h, c=3)
static_a = rearrange(results_t['static_alphas_fine'], '(h w) n2 -> h w n2 1', w=w, h=h)
# compute forward buffers
xyzs_w = ray_utils.ndc2world(rearrange(xyzs, 'n1 n2 c -> (n1 n2) c'), K)
xyzs_fw_w = ray_utils.ndc2world(
rearrange(xyzs+results_t['transient_flows_fw'],
'n1 n2 c -> (n1 n2) c'), K) # fw points with full flow
xyzs_fw_w = xyzs_w + dt*(xyzs_fw_w-xyzs_w) # scale the flow with dt
uvds_fw = P[:3, :3] @ rearrange(xyzs_fw_w, 'n c -> c n') + P[:3, 3:]
uvs_fw = uvds_fw[:2] / uvds_fw[2]
uvs_fw = rearrange(uvs_fw, 'c (n1 n2) -> c n1 n2', n1=N_rays, n2=N_samples)
uvs_fw = rearrange(uvs_fw, 'c (h w) n2 -> n2 h w c', w=w, h=h)
of_fw = rearrange(uvs_fw-grid, 'n2 h w c -> n2 c h w', c=2)
transient_rgb_t = rearrange(results_t['transient_rgbs_fine'],
'(h w) n2 c -> n2 c h w', w=w, h=h, c=3)
transient_a_t = rearrange(results_t['transient_alphas_fine'],
'(h w) n2 -> n2 1 h w', w=w, h=h)
transient_rgba_t = torch.cat([transient_rgb_t, transient_a_t], 1)
# compute backward buffers
xyzs_bw_w = ray_utils.ndc2world(
rearrange(xyzs+results_tp1['transient_flows_bw'],
'n1 n2 c -> (n1 n2) c'), K) # bw points with full flow
xyzs_bw_w = xyzs_w + (1-dt)*(xyzs_bw_w-xyzs_w) # scale the flow with 1-dt
uvds_bw = P[:3, :3] @ rearrange(xyzs_bw_w, 'n c -> c n') + P[:3, 3:]
uvs_bw = uvds_bw[:2] / uvds_bw[2]
uvs_bw = rearrange(uvs_bw, 'c (n1 n2) -> c n1 n2', n1=N_rays, n2=N_samples)
uvs_bw = rearrange(uvs_bw, 'c (h w) n2 -> n2 h w c', w=w, h=h)
of_bw = rearrange(uvs_bw-grid, 'n2 h w c -> n2 c h w', c=2)
transient_rgb_tp1 = rearrange(results_tp1['transient_rgbs_fine'],
'(h w) n2 c -> n2 c h w', w=w, h=h, c=3)
transient_a_tp1 = rearrange(results_tp1['transient_alphas_fine'],
'(h w) n2 -> n2 1 h w', w=w, h=h)
transient_rgba_tp1 = torch.cat([transient_rgb_tp1, transient_a_tp1], 1)
for s in range(N_samples): # compute MPI planes (front to back composition)
transient_rgba_fw = FunctionSoftsplat(tenInput=transient_rgba_t[s:s+1].cuda(),
tenFlow=of_fw[s:s+1].cuda(),
tenMetric=None,
strType='average').cpu()
transient_rgba_fw = rearrange(transient_rgba_fw, '1 c h w -> h w c')
transient_rgba_bw = FunctionSoftsplat(tenInput=transient_rgba_tp1[s:s+1].cuda(),
tenFlow=of_bw[s:s+1].cuda(),
tenMetric=None,
strType='average').cpu()
transient_rgba_bw = rearrange(transient_rgba_bw, '1 c h w -> h w c')
composed_rgb = transient_rgba_fw[..., :3]*transient_rgba_fw[..., 3:]*(1-dt) + \
transient_rgba_bw[..., :3]*transient_rgba_bw[..., 3:]*dt + \
static_rgb[:, :, s]*static_a[:, :, s]
composed_a = 1 - (1-(transient_rgba_fw[..., 3:]*(1-dt)+
transient_rgba_bw[..., 3:]*dt)) * \
(1-static_a[:, :, s])
rgba[..., :3] += (1-rgba[..., 3:])*composed_rgb
depth += (1-rgba[..., 3])*composed_a[..., 0]*zs[..., s]
rgba[..., 3:] += (1-rgba[..., 3:])*composed_a
return rgba[..., :3], depth
| 16,608
|
def divisor(baudrate):
"""Calculate the divisor for generating a given baudrate"""
CLOCK_HZ = 50e6
return round(CLOCK_HZ / baudrate)
| 16,609
|
def test_Preprocess_undefined_variable():
"""Test that an undefined variable does not cause an error."""
assert (
clang.Preprocess(
"""
int main(int argc, char** argv) { return UNDEFINED_VARIABLE; }
""",
[],
)
== """
int main(int argc, char** argv) { return UNDEFINED_VARIABLE; }
"""
)
| 16,610
|
def scm_get_active_branch(*args, **kwargs):
"""
Get the active named branch of an existing SCM repository.
:param str path: Path on the file system where the repository resides. If not specified, it defaults to the
current work directory.
:return: Name of the active branch
:rtype: str
"""
if not _scm_handler:
_load_scm_handler()
return _scm_handler.get_active_branch(*args, **kwargs)
| 16,611
|
def search_evaluations(campus, **kwargs):
"""
year (required)
term_name (required): Winter|Spring|Summer|Autumn
curriculum_abbreviation
course_number
section_id
student_id (student number)
"""
url = "%s?%s" % (IAS_PREFIX, urlencode(kwargs))
data = get_resource_by_campus(url, campus)
evaluations = _json_to_evaluation(data)
return evaluations
| 16,612
|
async def get_user(username: str, session: AsyncSession) -> Optional[User]:
"""
Returns a user with the given username
"""
return (
(await session.execute(select(User).where(User.name == username)))
.scalars()
.first()
)
| 16,613
|
def convert_modtime_to_date(path):
"""
Formats last modification date of a file into m/d/y form.
Params:
path (file path): the file to be documented
Example:
convert_modtime_to_date(/users/.../last_minute_submission.pdf)
"""
fileStatsObj = os.stat(path)
modificationTime = time.ctime(fileStatsObj[stat.ST_MTIME])
return datetime.datetime.strptime(modificationTime,'%a %b %d %H:%M:%S %Y').strftime('%m/%d/%y')
| 16,614
|
def compute_balances(flows):
"""
Balances by currency.
:param flows:
:return:
"""
flows = flows.set_index('date')
flows_by_asset = flows.pivot(columns='asset', values='amount').apply(pandas.to_numeric)
balances = flows_by_asset.fillna(0).cumsum()
return balances
| 16,615
|
def unjsonify(json_data):
"""
Converts the inputted JSON data to Python format.
:param json_data | <variant>
"""
return json.loads(json_data, object_hook=json2py)
| 16,616
|
def comp_state_dist(table: np.ndarray) -> Tuple[np.ndarray, List[str]]:
"""Compute the distribution of distinct states/diagnoses from a table of
individual diagnoses detailing the patterns of lymphatic progression per
patient.
Args:
table: Rows of patients and columns of LNLs, reporting which LNL was
involved for which patient.
Returns:
A histogram of unique states and a list of the corresponding state
labels.
Note:
This function cannot deal with parts of the diagnose being unknown. So
if, e.g., one level isn't reported for a patient, that row will just be
ignored.
"""
_, num_cols = table.shape
table = table.astype(float)
state_dist = np.zeros(shape=2**num_cols, dtype=int)
for row in table:
if not np.any(np.isnan(row)):
idx = int(np.sum([n * 2**i for i,n in enumerate(row[::-1])]))
state_dist[idx] += 1
state_labels = []
for i in range(2**num_cols):
state_labels.append(change_base(i, 2, length=num_cols))
return state_dist, state_labels
| 16,617
|
def polyConvert(coeffs, trans=(0, 1), backward=False):
"""
Converts polynomial coeffs for x (P = a0 + a1*x + a2*x**2 + ...) in
polynomial coeffs for x~:=a+b*x (P~ = a0~ + a1~*x~ + a2~*x~**2 +
...). Therefore, (a,b)=(0,1) makes nothing. If backward, makes the
opposite transformation.
Note: backward transformation could be done using more general
polynomial composition `polyval`, but forward transformation is a
long standing issue in the general case (look for functional
decomposition of univariate polynomial).
"""
a, b = trans
if not backward:
a = -float(a) / float(b)
b = 1 / float(b)
return N.dot(polyConvMatrix(len(coeffs), (a, b)), coeffs)
| 16,618
|
def uniform(_data, weights):
"""
Randomly initialize the weights with values between 0 and 1.
Parameters
----------
_data: ndarray
Data to pick to initialize weights.
weights: ndarray
Previous weight values.
Returns
-------
weights: ndarray
New weight values
"""
return random.rand(*weights.shape)
| 16,619
|
def ingresar_datos():
"""Ingresa los datos de las secciones"""
datos = {}
while True:
codigo = int_input('Ingrese el código de la sección: ')
if codigo < 0:
break
cantidad = int_input(
'Ingrese la cantidad de alumnos: ', min=MIN, max=MAX
)
datos[codigo] = cantidad
return datos
| 16,620
|
def fetch_credentials() -> Credentials:
"""Produces a Credentials object based on the contents of the
CONFIG_FILE or, alternatively, interactively.
"""
if CONFIG_FILE_EXISTS:
return parse_config_file(CONFIG_FILE)
else:
return get_credentials_interactively()
| 16,621
|
def pool_adjacency_mat_reference_wrapper(
adj: sparse.spmatrix, kernel_size=4, stride=2, padding=1
) -> sparse.spmatrix:
"""Wraps `pool_adjacency_mat_reference` to provide the same API as `pool_adjacency_mat`"""
adj = Variable(to_sparse_tensor(adj).to_dense())
adj_conv = pool_adjacency_mat_reference(adj, kernel_size, stride, padding)
return sparse.coo_matrix(adj_conv.data.numpy(), dtype=np.int16)
| 16,622
|
def send(socket, obj, flags=0, protocol=-1):
"""stringify an object, and then send it"""
s = str(obj)
return socket.send_string(s)
| 16,623
|
def arraystr(A: Array) -> str:
"""Pretty print array"""
B = np.asarray(A).ravel()
if len(B) <= 3:
return " ".join([itemstr(v) for v in B])
return " ".join([itemstr(B[0]), itemstr(B[1]), "...", itemstr(B[-1])])
| 16,624
|
def dist2_test(v1, v2, idx1, idx2, len2):
"""Square of distance equal"""
return (v1-v2).mag2() == len2
| 16,625
|
def extract_grid_cells(browser, grid_id):
"""
Given the ID of a legistar table, returns a list of dictionaries
for each row mapping column headers to td elements.
"""
table = browser.find_element_by_id(grid_id)
header_cells = table.find_elements_by_css_selector(
'thead:nth-child(2) > tr:nth-child(2) > th'
)
headers = [extract_text(cell) for cell in header_cells]
tbody = table.find_element_by_css_selector('tbody:nth-child(4)')
rows = tbody.find_elements_by_tag_name('tr')
result_rows = []
for row in rows:
cells = {}
td_elements = row.find_elements_by_tag_name('td')
for header, cell in zip(headers, td_elements):
cells[header] = cell
result_rows.append(cells)
return (headers, result_rows)
| 16,626
|
def gatherAllParameters(a, keep_orig=True):
"""Gather all parameters in the tree. Names are returned along
with their original names (which are used in variable mapping)"""
if type(a) == list:
allIds = set()
for line in a:
allIds |= gatherAllVariables(line)
return allIds
if not isinstance(a, ast.AST):
return set()
allIds = set()
for node in ast.walk(a):
if type(node) == ast.arg:
origName = node.originalId if (keep_orig and hasattr(node, "originalId")) else None
allIds |= set([(node.arg, origName)])
return allIds
| 16,627
|
def average_link_euclidian(X,verbose=0):
"""
Average link clustering based on data matrix.
Parameters
----------
X array of shape (nbitem,dim): data matrix
from which an Euclidian distance matrix is computed
verbose=0, verbosity level
Returns
-------
t a weightForest structure that represents the dendrogram of the data
Note
----
this method has not been optimized
"""
if X.shape[0]==np.size(X):
X = np.reshape(X,(np.size(X),1))
if np.size(X)<10000:
D = Euclidian_distance(X)
else:
raise ValueError, "The distance matrix is too large"
t = average_link_distance(D,verbose)
return t
| 16,628
|
def cached(func):
"""Decorator cached makes the function to cache its result and return it in duplicate calls."""
prop_name = '__cached_' + func.__name__
@functools.wraps(func)
def _cached_func(self):
try:
return getattr(self, prop_name)
except AttributeError:
val = func(self)
setattr(self, prop_name, val)
return val
return _cached_func
| 16,629
|
def SX_inf(*args):
"""
create a matrix with all inf
inf(int nrow, int ncol) -> SX
inf((int,int) rc) -> SX
inf(Sparsity sp) -> SX
"""
return _casadi.SX_inf(*args)
| 16,630
|
def test_book_id_put_missing_auth_gets_400_status_code(testapp, testapp_session, one_user):
"""Test that PUT to book-id route gets 400 status code for missing auth."""
res = testapp.put('/books/1', status=400)
assert res.status_code == 400
| 16,631
|
def many_body_collide(
MAX_TIMER=600, VB=1000, TSTEP=0.0001, EP=0.01, recalculate=True, algorithm="vv"
):
"""
Runs a multi body collision to look check other functions.
"""
particles = []
sub = str(MAX_TIMER) + "t_" + str(EP) + "e"
co = scl.Controls(
MAXTIMER=MAX_TIMER,
TSTEP=TSTEP,
vb=VB,
halo=False,
EPS=EP,
OUT="./MBP_TEST/" + sub,
calculate_am=True,
algorithm=algorithm,
)
sy = scl.System(co, particles=particles)
name = "MBP" + sub
co.name = name
p = [
[0.0, 0.0, 0.0],
[0.0, 5.0, 0.0],
[0.0, -5.0, 0.0],
[5.0, 0.0, 0.0],
[-5.0, 0.0, 0.0],
]
v = [
[0.0, 0.0, 0.0],
[0.5, -0.2, 0.0],
[-0.5, 0.2, 0.0],
[-0.2, 0.5, 0.0],
[-0.2, 0.5, 0.0],
]
m = [1, 1, 1, 1, 1]
if recalculate:
particles = arbitrary_particle_adder(
co, particles, pos_list=p, vel_list=v, mass_list=m
)
sy = scl.System(co, particles=particles)
middle_manager(co, sy, name, particles=particles, recalculate=recalculate)
| 16,632
|
async def live_pusher(bot: Bot, config: Config):
"""直播推送"""
uids = config.get_live_uid_list()
if not uids:
return
log.debug(f'爬取直播列表,目前开播{sum(status.values())}人,总共{len(uids)}人')
br = BiliReq()
res = await br.get_live_list(uids)
if not res:
return
for uid, info in res.items():
new_status = 0 if info['live_status'] == 2 else info['live_status']
if uid not in status:
status[uid] = new_status
continue
old_status = status[uid]
if new_status != old_status:
if new_status == 0:
status[uid] = new_status
continue
room_id = info['short_id'] if info['short_id'] else info['room_id']
url = 'https://live.bilibili.com/' + str(room_id)
name = info['uname']
title = info['title']
cover = (info['cover_from_user'] if info['cover_from_user'] else info['keyframe'])
log.info(f"检测到开播:{name}({uid})")
live_msg = Card([
Header(f'{name} 正在直播:'),
Section(Kmarkdown(f'**{title}**')),
Container([Image(cover)]),
Section(Kmarkdown(f'[网页链接]({url})'))
])
guild = await bot.fetch_guild(config.khl_server_id)
channels = await guild.fetch_channel_list()
for i in channels:
if i.id in config.khl_channel:
await i.send([live_msg.build()])
status[uid] = new_status
| 16,633
|
def aes_encrypt(mode, aes_key, aes_iv, *data):
"""
Encrypt data with AES in specified mode.
:param aes_key: aes_key to use
:param aes_iv: initialization vector
"""
encryptor = Cipher(algorithms.AES(aes_key), mode(aes_iv), backend=default_backend()).encryptor()
result = None
for value in data:
result = encryptor.update(value)
encryptor.finalize()
return result, None if not hasattr(encryptor, 'tag') else encryptor.tag
| 16,634
|
def _ebpm_gamma_update_a(init, b, plm, step=1, c=0.5, tau=0.5, max_iters=30):
"""Backtracking line search to select step size for Newton-Raphson update of
a"""
def loss(a):
return -(a * np.log(b) + a * plm - sp.gammaln(a)).sum()
obj = loss(init)
d = (np.log(b) - sp.digamma(init) + plm).mean() / sp.polygamma(1, init)
update = loss(init + step * d)
while (not np.isfinite(update) or update > obj + c * step * d) and max_iters > 0:
step *= tau
update = loss(init + step * d)
max_iters -= 1
if max_iters == 0:
# Step size is small enough that update can be skipped
return init
else:
return init + step * d
| 16,635
|
def edge_distance_mapping(graph : Graph,
iterations : int,
lrgen : LearningRateGen,
verbose : bool = True,
reset_locations : bool = True):
"""
Stochastic Gradient Descent algorithm for performing graph vertex laoyout
optimization using the path distances as target distance in the layout.
The algorihm is adapted from the paper https://arxiv.org/pdf/1710.04626.pdf
Args:
graph : The graph to arrange
iterations : number of iteration rounds
lrgen : learning rate function that takes iteration round as input
verbose : boolean, set True to print progress status information
Returns:
Vertex location stress value list that contains one summary stress
value per iteration.
"""
# Create temporary lists of vertex list indices
n_vertex = graph.vertex_count
vertex_idx_list_a = np.arange(n_vertex)
vertex_idx_list_b = np.arange(n_vertex)
stress_list = []
# Calculate distance look-up table
dist_arr, keys = __edge_distance_lut(graph)
if reset_locations:
__reset_locations(graph)
# Main iteration loop
for iter_round in range(iterations):
stress = 0
lr = lrgen.get_lr(iter_round)
if verbose:
progress_print = ProgressPrint(n_vertex)
a_loop = 0
np.random.shuffle(vertex_idx_list_a)
for idx_a in vertex_idx_list_a:
np.random.shuffle(vertex_idx_list_b)
for idx_b in vertex_idx_list_b:
if idx_a == idx_b:
continue
# Get path distance from vertex a to b.
# Value -1 means there is no path.
dist_target = dist_arr[idx_a, idx_b]
if dist_target == np.inf:
continue
# Update the locations and get stress for the patg
key_a = keys[idx_a]
key_b = keys[idx_b]
edge_stress = __coord_update(graph, key_a, key_b, dist_target, lr)
stress += edge_stress
# Progress monitoring
if verbose:
a_loop += 1
progress_print.print_update(iter_round, a_loop, stress)
stress_list.append(stress)
return stress_list
| 16,636
|
def prec_solvefn(t, y, r, z, gamma, delta, lr):
"""Solve the preconditioning system I - gamma * J except
for a constant factor."""
z[0] = r[0] + gamma * r[1]
z[1] = - gamma*k/m * r[0] + r[1]
| 16,637
|
def plot_n_images(image_array, n):
""" plot first n images
n has to be a square number
"""
first_n_images = image_array[:n, :]
grid_size = int(np.sqrt(n))
fig, ax_array = plt.subplots(nrows=grid_size, ncols=grid_size, sharex=True, sharey=True, figsize=(8, 8))
for r in range(grid_size):
for c in range(grid_size):
ax_array[r, c].imshow(first_n_images[grid_size * r + c].astype(np.uint8))
plt.xticks(np.array([]))
plt.yticks(np.array([]))
| 16,638
|
def strip_extension(name: str) -> str:
"""
Remove a single extension from a file name, if present.
"""
last_dot = name.rfind(".")
if last_dot > -1:
return name[:last_dot]
else:
return name
| 16,639
|
def next_joystick_device():
"""Finds the next available js device name."""
for i in range(100):
dev = "/dev/input/js{0}".format(i)
if not os.path.exists(dev):
return dev
| 16,640
|
def test_notimplemented():
"""
Function testing raising not implemented error for higher order
"""
x = fwd.Variable()
y = fwd.Variable()
with pytest.raises(NotImplementedError):
f = x * y
f.derivative_at(x, {x:0.5, y:0.5}, order=3)
with pytest.raises(NotImplementedError):
f = x / y
f.derivative_at(x, {x:0.5, y:0.5}, order=3)
with pytest.raises(NotImplementedError):
f = fwd.cotan(x)
f.derivative_at(x, {x:0.5}, order=2)
with pytest.raises(NotImplementedError):
f = fwd.sec(x)
f.derivative_at(x, {x:0.5}, order=2)
with pytest.raises(NotImplementedError):
f = fwd.csc(x)
f.derivative_at(x, {x:0.5}, order=2)
with pytest.raises(NotImplementedError):
f = fwd.sinh(x)
f.derivative_at(x, {x:0.5}, order=2)
with pytest.raises(NotImplementedError):
f = fwd.cosh(x)
f.derivative_at(x, {x:0.5}, order=2)
with pytest.raises(NotImplementedError):
f = fwd.tanh(x)
f.derivative_at(x, {x:0.5}, order=2)
with pytest.raises(NotImplementedError):
f = fwd.csch(x)
f.derivative_at(x, {x:0.5}, order=2)
with pytest.raises(NotImplementedError):
f = fwd.sech(x)
f.derivative_at(x, {x:0.5}, order=2)
with pytest.raises(NotImplementedError):
f = fwd.coth(x)
f.derivative_at(x, {x:0.5}, order=2)
with pytest.raises(NotImplementedError):
f = fwd.arcsin(x)
f.derivative_at(x, {x:0.5}, order=2)
with pytest.raises(NotImplementedError):
f = fwd.arccos(x)
f.derivative_at(x, {x:0.5}, order=2)
with pytest.raises(NotImplementedError):
f = fwd.arctan(x)
f.derivative_at(x, {x:0.5}, order=2)
| 16,641
|
def validatePullRequest(data):
"""Validate pull request by action."""
if 'action' not in data:
raise BadRequest('no event supplied')
if 'pull_request' not in data or 'html_url' not in data.get('pull_request'):
raise BadRequest('payload.pull_request.html_url missing')
return True
| 16,642
|
def assert_json_roundtrip_works(obj, text_should_be=None, resolvers=None):
"""Tests that the given object can serialized and de-serialized
Args:
obj: The object to test round-tripping for.
text_should_be: An optional argument to assert the JSON serialized
output.
resolvers: Any resolvers if testing those other than the default.
Raises:
AssertionError: The given object can not be round-tripped according to
the given arguments.
"""
buffer = io.StringIO()
cirq.protocols.to_json(obj, buffer)
if text_should_be is not None:
buffer.seek(0)
text = buffer.read()
assert text == text_should_be, text
buffer.seek(0)
restored_obj = cirq.protocols.read_json(buffer, resolvers=resolvers)
if isinstance(obj, np.ndarray):
np.testing.assert_equal(restored_obj, obj)
elif isinstance(obj, pd.DataFrame):
pd.testing.assert_frame_equal(restored_obj, obj)
elif isinstance(obj, pd.Index):
pd.testing.assert_index_equal(restored_obj, obj)
else:
assert restored_obj == obj
| 16,643
|
def _test(fonts, phase, extra_styles):
"""Build name info from font_paths and dump the names for them."""
family_to_name_info = create_family_to_name_info(fonts, phase, extra_styles)
print(write_family_name_info(family_to_name_info, pretty=True))
_dump_family_names(fonts, family_to_name_info, phase)
| 16,644
|
def __adjust_data_for_log_scale(dataframe: pd.DataFrame) -> pd.DataFrame:
"""
This will clean and adjust some of the data so that Altair can plot it using a logarithmic scale. Altair does not
allow zero values on the Y axis when plotting with a logarithmic scale, as log(0) is undefined.
Args:
dataframe: The data to plot on the chart.
Returns: A new data frame with the appropriate adjustments for plotting on a log scale.
"""
return dataframe.replace(0, float('nan'))
| 16,645
|
def predict_sentence(model,vocab,sentence):
"""Predicts the section value of a given sentence
INPUT: Trained model, Model vocab, Sentence to predict
OUTPUT: Assigned section to the sentence"""
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
nlp=spacy.load('en_core_sci_md')
model=model.to(device)
tokens=[t.text for t in nlp.tokenizer(sentence)]
indexed = [vocab[t] for t in tokens]
tensor_to_predict=torch.LongTensor(indexed).to(device)
tensor_to_predict=tensor_to_predict.unsqueeze(1).T
length_tensor= torch.LongTensor([len(indexed)]).to(device)
prediction=model(tensor_to_predict,length_tensor)
return prediction.argmax(1).item()
| 16,646
|
def ne_offsets_by_sent(
text_nest_list=[],
model='de_core_news_sm',
):
""" extracts offsets of NEs and the NE-type grouped by sents
:param text_nest_list: A list of list with following structure:\
[{"text": "Wien ist schön", "ner_dicts": [{"text": "Wien", "ne_type": "LOC"}]}]
:param model: The name of the spacy model which should be used for sentence splitting.
:return: A list of spacy-like NER Tuples [('some text'), entities{[(15, 19, 'place')]}]
"""
import spacy
nlp = spacy.load(model)
text_nes = text_nest_list
results = []
for entry in text_nes:
ner_dicts = entry['ner_dicts']
in_text = entry['text']
doc = nlp(in_text)
for sent in doc.sents:
entities = []
if sent.text != "":
plain_text = sent.text
for x in ner_dicts:
for m in re.finditer(x['text'], plain_text):
entities.append([m.start(), m.end(), x['ne_type']])
entities = [item for item in set(tuple(row) for row in entities)]
entities = sorted(entities, key=lambda x: x[0])
ents = []
next_item_index = 1
for x in entities:
cur_start = x[0]
try:
next_start = entities[next_item_index][0]
except IndexError:
next_start = 9999999999999999999999
if cur_start == next_start:
pass
else:
ents.append(x)
next_item_index = next_item_index + 1
train_data = (
plain_text,
{
"entities": ents
}
)
results.append(train_data)
return results
| 16,647
|
def write_build_info(filename, is_config_cuda, is_config_rocm, key_value_list):
"""Writes a Python that describes the build.
Args:
filename: filename to write to.
build_config: A string that represents the config used in this build (e.g.
"cuda").
key_value_list: A list of "key=value" strings that will be added to the
module as additional fields.
Raises:
ValueError: If `key_value_list` includes the key "is_cuda_build", which
would clash with one of the default fields.
"""
module_docstring = "\"\"\"Generates a Python module containing information "
module_docstring += "about the build.\"\"\""
build_config_rocm_bool = "False"
build_config_cuda_bool = "False"
if is_config_rocm == "True":
build_config_rocm_bool = "True"
elif is_config_cuda == "True":
build_config_cuda_bool = "True"
key_value_pair_stmts = []
if key_value_list:
for arg in key_value_list:
key, value = arg.split("=")
if key == "is_cuda_build":
raise ValueError("The key \"is_cuda_build\" cannot be passed as one of "
"the --key_value arguments.")
if key == "is_rocm_build":
raise ValueError("The key \"is_rocm_build\" cannot be passed as one of "
"the --key_value arguments.")
key_value_pair_stmts.append("%s = %r" % (key, value))
key_value_pair_content = "\n".join(key_value_pair_stmts)
contents = """
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
%s
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
is_rocm_build = %s
is_cuda_build = %s
%s
""" % (module_docstring, build_config_rocm_bool, build_config_cuda_bool,
key_value_pair_content)
open(filename, "w").write(contents)
| 16,648
|
def clone_repo(
url: str,
path: str,
branch: Optional[str] = None,
) -> bool:
"""Clone repo from URL (at branch if specified) to given path."""
cmd = ['git', 'clone', url, path]
if branch:
cmd += ['--branch', branch]
return run(cmd)[0].returncode == 0
| 16,649
|
def get_inputtype(name, object_type):
"""Get an input type based on the object type"""
if object_type in _input_registry:
return _input_registry[object_type]
inputtype = type(
name,
(graphene.InputObjectType,),
_get_input_attrs(object_type),
)
_input_registry[object_type] = inputtype
return inputtype
| 16,650
|
def normalize_df_(df_train, *, other_dfs=None, skip_column=None):
"""
Normalizes all columns of `df_train` to zero mean unit variance in place.
Optionally performs same transformation to `other_dfs`
# Parameters
other_dfs [pd.DataFrame]: List of other DataFrames to apply transformation to
skip_column (str, int): Column to omit, for example categorical targets.
"""
if skip_column is None:
skip_columns = set()
else:
skip_columns = {skip_column}
# Skip where standard deviation is zero or close to zero
low_std_columns = df_train.columns[df_train.std() < 1e-6]
df_train.loc[:, low_std_columns] = 0
if other_dfs is not None:
for df in other_dfs:
df.loc[:, low_std_columns] = 0
skip_columns.update(set(low_std_columns))
columns = list(set(df_train.columns) - skip_columns)
mean = df_train[columns].mean(axis=0)
std = df_train[columns].std(axis=0)
df_train.loc[:, columns] -= mean
df_train.loc[:, columns] /= std
if other_dfs is not None:
for df in other_dfs:
df.loc[:, columns] -= mean
df.loc[:, columns] /= std
| 16,651
|
def res_ex_response(e, original=False):
"""异常响应,结果必须可以作json序列化
参数: e # 原始错误信息
original # 是否返回原始错误信息,默认flase
"""
from src.common import lang
if os.getenv('ENV') != 'UnitTest':
current_app.logger.error(e)
msg = lang.resp('L_OPER_FAILED')
if original:
msg = str(e)
return jsonify({"msg":msg, "code":4001, "status":False}), 200
| 16,652
|
def transform_mappings(queryables, typename, reverse=False):
"""transform metadata model mappings"""
if reverse: # from csw:Record
for qbl in queryables.keys():
if qbl in typename.values():
tmp = [k for k, v in typename.items() if v == qbl][0]
val = queryables[tmp]
queryables[qbl] = {}
queryables[qbl]['xpath'] = val['xpath']
queryables[qbl]['dbcol'] = val['dbcol']
else: # to csw:Record
for qbl in queryables.keys():
if qbl in typename.keys():
queryables[qbl] = queryables[qbl]
| 16,653
|
def get_engine_status(engine=None):
"""Return a report of the current engine status"""
if engine is None:
engine = crawler.engine
global_tests = [
"time()-engine.start_time",
"engine.is_idle()",
"engine.has_capacity()",
"engine.scheduler.is_idle()",
"len(engine.scheduler.pending_requests)",
"engine.downloader.is_idle()",
"len(engine.downloader.sites)",
"engine.scraper.is_idle()",
"len(engine.scraper.sites)",
]
spider_tests = [
"engine.spider_is_idle(spider)",
"engine.closing.get(spider)",
"engine.scheduler.spider_has_pending_requests(spider)",
"len(engine.scheduler.pending_requests[spider])",
"len(engine.downloader.sites[spider].queue)",
"len(engine.downloader.sites[spider].active)",
"len(engine.downloader.sites[spider].transferring)",
"engine.downloader.sites[spider].closing",
"engine.downloader.sites[spider].lastseen",
"len(engine.scraper.sites[spider].queue)",
"len(engine.scraper.sites[spider].active)",
"engine.scraper.sites[spider].active_size",
"engine.scraper.sites[spider].itemproc_size",
"engine.scraper.sites[spider].needs_backout()",
]
status = {'global': {}, 'spiders': {}}
for test in global_tests:
try:
status['global'][test] = eval(test)
except Exception, e:
status['global'][test] = "%s (exception)" % type(e).__name__
for spider in engine.downloader.sites:
x = {}
for test in spider_tests:
try:
x[test] = eval(test)
except Exception, e:
x[test] = "%s (exception)" % type(e).__name__
status['spiders'][spider] = x
return status
| 16,654
|
def stock_em_jgdy_tj():
"""
东方财富网-数据中心-特色数据-机构调研-机构调研统计
http://data.eastmoney.com/jgdy/tj.html
:return: pandas.DataFrame
"""
url = "http://data.eastmoney.com/DataCenter_V3/jgdy/gsjsdy.ashx"
page_num = _get_page_num_tj()
temp_df = pd.DataFrame()
for page in tqdm(range(1, page_num+1)):
params = {
"pagesize": "5000",
"page": str(page),
"js": "var sGrabtEb",
"param": "",
"sortRule": "-1",
"sortType": "0",
"rt": "52581365",
}
res = requests.get(url, params=params)
data_json = json.loads(res.text[res.text.find("={")+1:])
temp_df = temp_df.append(pd.DataFrame(data_json["data"]), ignore_index=True)
return temp_df
| 16,655
|
def storage():
"""Create managed bare-database repository storage."""
with tempfile.TemporaryDirectory(prefix="representations-") as temporary:
yield RepoStorage(directory=temporary)
| 16,656
|
def hpdi(x, prob=0.90, axis=0):
"""
Computes "highest posterior density interval" (HPDI) which is the narrowest
interval with probability mass ``prob``.
:param numpy.ndarray x: the input array.
:param float prob: the probability mass of samples within the interval.
:param int axis: the dimension to calculate hpdi.
:return: quantiles of ``x`` at ``(1 - prob) / 2`` and
``(1 + prob) / 2``.
:rtype: numpy.ndarray
"""
x = np.swapaxes(x, axis, 0)
sorted_x = np.sort(x, axis=0)
mass = x.shape[0]
index_length = int(prob * mass)
intervals_left = sorted_x[:(mass - index_length)]
intervals_right = sorted_x[index_length:]
intervals_length = intervals_right - intervals_left
index_start = intervals_length.argmin(axis=0)
index_end = index_start + index_length
hpd_left = np.take_along_axis(sorted_x, index_start[None, ...], axis=0)
hpd_left = np.swapaxes(hpd_left, axis, 0)
hpd_right = np.take_along_axis(sorted_x, index_end[None, ...], axis=0)
hpd_right = np.swapaxes(hpd_right, axis, 0)
return np.concatenate([hpd_left, hpd_right], axis=axis)
| 16,657
|
def a(n, k):
"""calculates maximum power of p(n) needed
>>> a(0, 20)
4
>>> a(1, 20)
2
>>> a(2, 20)
1
"""
return floor(log(k) / log(p(n)))
| 16,658
|
def do_train(
train_path: typing.Union[str, Path],
model_path: typing.Union[str, Path],
max_iterations: int = 100,
**kwargs,
):
"""CLI method for train_model"""
train_model(train_path, model_path, max_iterations=max_iterations)
| 16,659
|
def is_windows():
""" détermine si le système actuel est windows """
return platform.system().lower() == "windows"
| 16,660
|
def WTfilt_1d(sig):
"""
# 使用小波变换对单导联ECG滤波
# 参考:Martis R J, Acharya U R, Min L C. ECG beat classification using PCA, LDA, ICA and discrete
wavelet transform[J].Biomedical Signal Processing and Control, 2013, 8(5): 437-448.
:param sig: 1-D numpy Array,单导联ECG
:return: 1-D numpy Array,滤波后信号
"""
coeffs = pywt.wavedec(sig, 'db6', level=9)
coeffs[-1] = np.zeros(len(coeffs[-1]))
coeffs[-2] = np.zeros(len(coeffs[-2]))
coeffs[0] = np.zeros(len(coeffs[0]))
sig_filt = pywt.waverec(coeffs, 'db6')
return sig_filt
| 16,661
|
def discount_rewards(r):
""" take 1D float array of rewards and compute discounted reward """
gamma = 0.99
discounted_r = np.zeros_like(r)
running_add = 0
for t in reversed(range(0, r.size)):
running_add = running_add * gamma + r[t]
discounted_r[t] = running_add
return discounted_r
| 16,662
|
def mvn(tensor):
"""Per row mean-variance normalization."""
epsilon = 1e-6
mean = K.mean(tensor, axis=1, keepdims=True)
std = K.std(tensor, axis=1, keepdims=True)
mvn = (tensor - mean) / (std + epsilon)
return mvn
| 16,663
|
def grapheme_to_phoneme(text, g2p, lexicon=None):
"""Converts grapheme to phoneme"""
phones = []
words = filter(None, re.split(r"(['(),:;.\-\?\!\s+])", text))
for w in words:
if lexicon is not None and w.lower() in lexicon:
phones += lexicon[w.lower()]
else:
phones += list(filter(lambda p: p != " ", g2p(w)))
return phones
| 16,664
|
def mean_log_cosh_error(pred, target):
"""
Determine mean log cosh error.
f(y_t, y) = sum(log(cosh(y_t-y)))/n
where, y_t = predicted value
y = target value
n = number of values
:param pred: {array}, shape(n_samples,)
predicted values.
:param target: {array}, shape(n_samples,)
target values.
:return: mean log cosh error.
"""
error = pred - target
return np.mean(np.log(np.cosh(error)))
| 16,665
|
def plot_abctraces(pools, surveypath=''):
""" Input: a list of pools in the abc format
Generates trace plots of the thetas,eps and metrics """
sns.set_style("white")
matplotlib.rc("font", size=30)
""" Plot Metric-Distances """
distances = np.array([pool.dists for pool in pools])
print(distances.shape)
f, ax = plt.subplots()
for ii in range(distances.shape[2]):
ax.errorbar(np.arange(len(distances)), np.mean(distances, axis=1)[:, ii], np.std(distances, axis=1)[:, ii], label='$\\Delta_%i$' % (ii+1))
# sns.distplot(np.asarray(distances)[:, ii], axlabel="distances", label='M%i' % (ii))
#plt.title("Development of Metric Distances")
plt.xlabel('Iteration')
plt.ylabel('Distance $\Delta$ in metric')
plt.legend()
plt.savefig('%s/Metrics.png' % (surveypath))
""" Plot Variables """
thetas = np.array([pool.thetas for pool in pools])
print(thetas.shape)
f, ax = plt.subplots()
for ii in range(thetas.shape[2]):
ax.errorbar(np.arange(len(thetas)), np.mean(thetas, axis=1)[:, ii], np.std(thetas, axis=1)[:, ii], label='$\\theta_%i$' % (ii+1))
#plt.title("Development of Parameters")
plt.xlabel('Iteration')
plt.ylabel('$\\theta_i$')
plt.legend()
plt.savefig('%s/Thetas.png' % (surveypath))
""" Plot Variables """
#TODO: Fix bug ... you need to call pools or pool?
for ii, pool, in enumerate(pools):
thetas = pool.thetas
figure = corner.corner(thetas)
plt.savefig('%s/CornerThetas_%02i.png' % (surveypath,ii))
"""
corner.corner(distances)
plots the various distances over each other to show if they are uncorrelated.
This is not super important, you could also use correlated distances with this approach. On the other hand it is interesting to see
if both metrices are independent, often this is a sign that they are good features!
"""
""" Plot Epsilon"""
fig, ax = plt.subplots()
eps_values = np.array([pool.eps for pool in pools])
for ii in range(distances.shape[2]):
ax.plot(eps_values[:, ii], label='$\epsilon_%i$' % (ii))
ax.set_xlabel("Iteration")
ax.set_ylabel(r"$\epsilon$", fontsize=15)
ax.legend(loc="best")
#ax.set_title("Thresholds $\epsilon$")
plt.savefig('%s/Thresholds.png' % (surveypath))
""" Violin Plots """
fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(9, 4))
# generate some random test data
all_data = [pool.thetas[:,0] for pool in pools]
len_pools = [pool.thetas.shape[0]/pool.ratio for pool in pools]
print('len_pools:', len_pools, sum(len_pools))
mod_data = [np.concatenate((pool.thetas[:,0]+0.2, pool.thetas[:,0]), axis=0) for pool in pools]
# plot violin plot
#background = axes.violinplot(mod_data,
# showmeans=False,
# showmedians=False, showextrema=False)
axes.violinplot(all_data,
showmeans=False,
showmedians=True)
#for pc in background['bodies']:
# pc.set_facecolor('grey')
# pc.set_edgecolor('black')
# pc.set_alpha(0.4)
#for pc in foreground['bodies']:
# pc.set_facecolor('cornflowerblue')
# pc.set_edgecolor('black')
# pc.set_alpha(1)
# axes.set_title('violin plot')
# adding horizontal grid lines
axes.yaxis.grid(True)
axes.set_xticks([y+1 for y in range(len(all_data))])
axes.set_xlabel('Iteration')
axes.set_ylabel('$\\log_{10}(\\xi_e)$')
axes.set_ylabel('$\\log_{10}(\\xi_e)$')
# add x-tick labels
plt.setp(axes, xticks=[y+1 for y in range(len(all_data))])
plt.savefig('%s/Violin.png' % (surveypath))
plt.savefig('%s/Violin.pdf' % (surveypath))
plt.clf()
""" Plot Parameters
pools[ii].thetas[:, 0] is a numpy array
"""
for ii, nouse in enumerate(pools):
if thetas.shape[1] > 1:
jg = sns.jointplot(pools[ii].thetas[:, 0],
pools[ii].thetas[:, 1],
#kind="kde", # BUG: creates an error
)
jg.ax_joint.set_xlabel('var1')
jg.ax_joint.set_ylabel('var2')
plt.savefig('%s/FirstThetas_%i.png' % (surveypath, ii))
return 0
| 16,666
|
def initialize_runtime_commands():
"""
Creates all runtimeCommands that are depended to the Maya GUI.
"""
main_category = 'FG-Tools'
category = main_category + '.Display'
fg_tools.maya_runtime_command.create_runtime_command(command_name='fgToggleSmoothShaded',
annotation=toggle_smooth_shaded.__doc__,
command=('import fg_tools.ui\n'
'fg_tools.ui.toggle_smooth_shaded()'),
category=category)
fg_tools.maya_runtime_command.create_runtime_command(command_name='fgToggleWireframe',
annotation=toggle_wireframe.__doc__,
command=('import fg_tools.ui\n'
'fg_tools.ui.toggle_wireframe()'),
category=category)
fg_tools.maya_runtime_command.create_runtime_command(command_name='fgSaveSnapshot',
annotation='Create a snapshot of the viewport and save it in '
'the render folder.',
command=('import fg_tools.ui\n'
'fg_tools.ui.save_snapshot()'),
category=category)
| 16,667
|
def set_cookie(cookiejar, kaka):
"""
Place a cookie (a http_cookielib.Cookie based on a set-cookie header line) in the cookie jar.
Always chose the shortest expires time.
:param cookiejar:
:param kaka: Cookie
"""
# default rfc2109=False
# max-age, httponly
for cookie_name, morsel in kaka.items():
std_attr = ATTRS.copy()
std_attr["name"] = cookie_name
_tmp = morsel.coded_value
if _tmp.startswith('"') and _tmp.endswith('"'):
std_attr["value"] = _tmp[1:-1]
else:
std_attr["value"] = _tmp
std_attr["version"] = 0
attr = ""
# copy attributes that have values
try:
for attr in morsel.keys():
if attr in ATTRS:
if morsel[attr]:
if attr == "expires":
std_attr[attr] = http2time(morsel[attr])
else:
std_attr[attr] = morsel[attr]
elif attr == "max-age":
if morsel[attr]:
std_attr["expires"] = http2time(morsel[attr])
except TimeFormatError:
# Ignore cookie
logger.info(
"Time format error on %s parameter in received cookie"
% (sanitize(attr),)
)
continue
for att, spec in PAIRS.items():
if std_attr[att]:
std_attr[spec] = True
if std_attr["domain"] and std_attr["domain"].startswith("."):
std_attr["domain_initial_dot"] = True
if morsel["max-age"] == 0:
try:
cookiejar.clear(
domain=std_attr["domain"],
path=std_attr["path"],
name=std_attr["name"],
)
except ValueError:
pass
else:
# Fix for Microsoft cookie error
if "version" in std_attr:
try:
std_attr["version"] = std_attr["version"].split(",")[0]
except (TypeError, AttributeError):
pass
new_cookie = http_cookiejar.Cookie(**std_attr) # type: ignore
cookiejar.set_cookie(new_cookie)
| 16,668
|
def init_configuration(config_file):
"""
config_file -- configuration file of SFT module.
raises ConfigError in case of problems.
"""
sft_globals.config = configuration.Config(config_file)
| 16,669
|
def main():
"""Run experiments"""
parser = argparse.ArgumentParser(
description="Run experiments for WAFR 2016 paper")
parser.add_argument('jobs', help='job file to run')
args = parser.parse_args()
with open(args.jobs, 'r') as job_file:
jobs = yaml.load(job_file)
print "Starting {} jobs".format(len(jobs))
callback = Callback(len(jobs))
# Setting maxtasksperchild forces the child process to restart
# after each task. This will force C libraries like numpy to
# clean up, which they iothewise wouldn't do. That isn't a major
# problem for me here, but the fix doesn't hurt anything either.
pool = multiprocessing.Pool(processes=multiprocessing.cpu_count()-1,
maxtasksperchild=1)
for name, job in jobs.iteritems():
pool.apply_async(run, (name, job,), callback=callback)
pool.close()
pool.join()
print "Done"
| 16,670
|
def context(profile, mock_admin_connection, message):
"""RequestContext fixture."""
context = RequestContext(profile)
context.connection_record = mock_admin_connection
context.connection_ready = True
context.message = message
yield context
| 16,671
|
def check_columns(board: list):
"""
Check column-wise compliance of the board for uniqueness (buildings of unique height)
and visibility (top-bottom and vice versa).
Same as for horizontal cases, but aggregated in one function for vertical case, i.e. columns.
>>> check_columns(['***21**', '412453*', '423145*', '*543215', \
'*35214*', '*41532*', '*2*1***'])
False
>>> check_columns(['***21**', '412453*', '423145*', '*543215', \
'*35214*', '*41232*', '*2*1***'])
False
>>> check_columns(['***21**', '412553*', '423145*', '*543215', \
'*35214*', '*41532*', '*2*1***'])
False
>>> check_columns(['***22**', '412453*', '423145*', '*543215', \
'*35214*', '*41532*', '*2*1***'])
False
>>> check_columns(['***21**', '412453*', '423145*', '*543215', \
'*35214*', '*41532*', '*2*4***'])
False
"""
return check_uniqueness_in_rows(get_board_columns(board), False) and \
check_horizontal_visibility(get_board_columns(board), False)
| 16,672
|
def extract_header(mjds, path, keywords, dtypes=None, split_dbs=False, is_range=False):
"""Returns a `~pandas.DataFrame` with header information.
For a list or range of MJDs, collects a series of header keywords for
database files and organises them in a `~pandas.DataFrame` sorted by
MJD and frame number.
Parameters
----------
mjds : list
The list of MJDs to extract. If the lenght of ``mjds`` is two and
``is_range=True``, all the MJDs between both values will be extracted.
path : str
The path to the database file.
keywords : list
A list of strings with the header keywords to extract.
dtypes : list, optional
A list of types to cast the keyword values.
split_dbs : bool, optional
If True, assumes that the DB is split into multiple files, one for each
MJD. In that case, the path for each file is assumed to be ``path``
with the ``_{MJD}`` suffix.
is_range : bool, optional
If True, assumes that ``mjds`` are the extremes of a range of MJDs.
"""
mjds = numpy.atleast_1d(mjds)
path = pathlib.Path(path)
keywords = [key.lower() for key in keywords]
if dtypes:
assert len(dtypes) == len(keywords), 'inconsistent lenghts of keywords and dtypes'
assert mjds.ndim == 1, 'invalid number of dimensions in mjds'
if is_range:
assert len(mjds) == 2, 'when is_range=True, mjds must be a list of lenght 2'
mjds = numpy.arange(mjds[0], mjds[1] + 1)
if not split_dbs:
assert path.exists()
database.init(str(path))
assert database.connect(), 'cannot connect to database'
dataframes = []
with tqdm.trange(len(mjds)) as tt:
for mjd in mjds:
tt.set_description(str(mjd))
if split_dbs:
suffix = path.suffix
database_mjd = str(path).replace(suffix, f'_{mjd}{suffix}')
if not pathlib.Path(database_mjd).exists():
tt.update()
continue
database.init(str(database_mjd))
assert database.connect(), 'cannot connect to database'
Header = playhouse.reflection.Introspector.from_database(
database).generate_models()['header']
fields = [Frame.mjd, Frame.frame]
failed = any([not hasattr(Header, keyword) for keyword in keywords])
if failed:
tt.update()
continue
for keyword in keywords:
fields.append(getattr(Header, keyword))
data = Header.select(*fields).join(Frame, on=(Frame.pk == Header.frame_pk)).tuples()
dataframes.append(pandas.DataFrame(list(data), columns=(['mjd', 'frame'] + keywords)))
tt.update()
dataframe = pandas.concat(dataframes)
if dtypes:
failed = False
for ii, key in enumerate(keywords):
try:
dataframe[key] = dataframe[key].astype(dtypes[ii])
except ValueError as ee:
warnings.warn(f'failed to apply astype: {ee!r}', exceptions.GuiderQAUserWarning)
failed = True
if not failed:
dataframe = dataframe[dataframe > -999.]
dataframe = dataframe.set_index(['mjd', 'frame'])
dataframe.sort_index(inplace=True)
return dataframe
| 16,673
|
async def get_time():
"""获取服务器时间
"""
tz = pytz.timezone('Asia/Shanghai')
return {
'nowtime': datetime.now(),
'utctime': datetime.utcnow(),
'localtime': datetime.now(tz)
}
| 16,674
|
def health_check():
"""Attempt to ping the database and respond with a status code 200.
This endpoint is verify that the server is running and that the database is
accessible.
"""
response = {"service": "OK"}
try:
postgres.session.query(text("1")).from_statement(text("SELECT 1")).all()
response["database"] = "OK"
except Exception as e:
app.logger.error(e)
response["database"] = "ERROR"
try:
mongo.connection.server_info()
response["document_store"] = "OK"
except Exception as e:
app.logger.error(e)
response["document_store"] = "ERROR"
return response
| 16,675
|
def find_ad_adapter(bus):
"""Find the advertising manager interface.
:param bus: D-Bus bus object that is searched.
"""
remote_om = dbus.Interface(
bus.get_object(constants.BLUEZ_SERVICE_NAME, '/'),
constants.DBUS_OM_IFACE)
objects = remote_om.GetManagedObjects()
for o, props in objects.items():
if constants.LE_ADVERTISING_MANAGER_IFACE in props:
return o
return None
| 16,676
|
def leaky_relu(x, slope=0.2):
"""Leaky Rectified Linear Unit function.
This function is expressed as :math:`f(x) = \max(x, ax)`, where :math:`a`
is a configurable slope value.
Args:
x (~chainer.Variable): Input variable.
slope (float): Slope value :math:`a`.
Returns:
~chainer.Variable: Output variable.
"""
return LeakyReLU(slope)(x)
| 16,677
|
def test_check_whitespace_ink():
""" Whitespace glyphs have ink? """
check = CheckTester(universal_profile,
"com.google.fonts/check/whitespace_ink")
test_font = TTFont(TEST_FILE("nunito/Nunito-Regular.ttf"))
assert_PASS(check(test_font))
test_font["cmap"].tables[0].cmap[0x1680] = "a"
assert_PASS(check(test_font),
'because Ogham space mark does have ink.')
test_font["cmap"].tables[0].cmap[0x0020] = "uni1E17"
assert_results_contain(check(test_font),
FAIL, 'has-ink',
'for whitespace character having composites (with ink).')
test_font["cmap"].tables[0].cmap[0x0020] = "scedilla"
assert_results_contain(check(test_font),
FAIL, 'has-ink',
'for whitespace character having outlines (with ink).')
import fontTools.pens.ttGlyphPen
pen = fontTools.pens.ttGlyphPen.TTGlyphPen(test_font.getGlyphSet())
pen.addComponent("space", (1, 0, 0, 1, 0, 0))
test_font["glyf"].glyphs["uni200B"] = pen.glyph()
assert_results_contain(check(test_font),
FAIL, 'has-ink', # should we give is a separate keyword? This looks wrong.
'for whitespace character having composites (without ink).')
| 16,678
|
def GetConfig(user_config):
"""Decide number of vms needed to run oldisim."""
config = configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
config['vm_groups']['default']['vm_count'] = (FLAGS.oldisim_num_leaves
+ NUM_DRIVERS + NUM_ROOTS)
return config
| 16,679
|
def build_tstuser_dir(username):
"""
Create a directory with files and return its structure
in a list.
:param username: str
:return: tuple
"""
# md5("foo") = "acbd18db4cc2f85cedef654fccc4a4d8"
# md5("bar") = "37b51d194a7513e45b56f6524f2d51f2"
# md5("spam") = "e09f6a7593f8ae3994ea57e1117f67ec"
file_contents = [
('spamfile', 'spam', 'e09f6a7593f8ae3994ea57e1117f67ec'),
(os.path.join('subdir', 'foofile.txt'), 'foo', 'acbd18db4cc2f85cedef654fccc4a4d8'),
(os.path.join('subdir', 'barfile.md'), 'bar', '37b51d194a7513e45b56f6524f2d51f2'),
]
user_root = userpath2serverpath(username)
# If directory already exists, destroy it
if os.path.isdir(user_root):
shutil.rmtree(user_root)
os.mkdir(user_root)
expected_timestamp = None
expected_snapshot = {}
for user_filepath, content, md5 in file_contents:
expected_timestamp = int(_create_file(username, user_filepath, content))
expected_snapshot[user_filepath] = [expected_timestamp, unicode(md5)]
return expected_timestamp, expected_snapshot
| 16,680
|
def gen_answer(question, passages):
"""由于是MLM模型,所以可以直接argmax解码。
"""
all_p_token_ids, token_ids, segment_ids = [], [], []
for passage in passages:
passage = re.sub(u' |、|;|,', ',', passage)
p_token_ids, _ = tokenizer.encode(passage, maxlen=max_p_len + 1)
q_token_ids, _ = tokenizer.encode(question, maxlen=max_q_len + 1)
all_p_token_ids.append(p_token_ids[1:])
token_ids.append([tokenizer._token_start_id])
token_ids[-1] += ([tokenizer._token_mask_id] * max_a_len)
token_ids[-1] += [tokenizer._token_end_id]
token_ids[-1] += (q_token_ids[1:] + p_token_ids[1:])
segment_ids.append([0] * len(token_ids[-1]))
token_ids = sequence_padding(token_ids)
segment_ids = sequence_padding(segment_ids)
probas = model.predict([token_ids, segment_ids])
results = {}
for t, p in zip(all_p_token_ids, probas):
a, score = tuple(), 0.
for i in range(max_a_len):
idxs = list(get_ngram_set(t, i + 1)[a])
if tokenizer._token_end_id not in idxs:
idxs.append(tokenizer._token_end_id)
# pi是将passage以外的token的概率置零
pi = np.zeros_like(p[i])
pi[idxs] = p[i, idxs]
a = a + (pi.argmax(),)
score += pi.max()
if a[-1] == tokenizer._token_end_id:
break
score = score / (i + 1)
a = tokenizer.decode(a)
if a:
results[a] = results.get(a, []) + [score]
results = {
k: (np.array(v)**2).sum() / (sum(v) + 1)
for k, v in results.items()
}
return results
| 16,681
|
def compile(function_or_sdfg, *args, **kwargs):
""" Obtain a runnable binary from a Python (@dace.program) function. """
if isinstance(function_or_sdfg, dace.frontend.python.parser.DaceProgram):
sdfg = dace.frontend.python.parser.parse_from_function(
function_or_sdfg, *args, **kwargs)
elif isinstance(function_or_sdfg, SDFG):
sdfg = function_or_sdfg
else:
raise TypeError("Unsupported function type")
return sdfg.compile(**kwargs)
| 16,682
|
def show_bgr(bgr, caption="image"):
"""指定BGRイメージをウィンドウへ表示する.
# Args:
bgr: BGRイメージ.
"""
cv2.imshow(caption, bgr)
cv2.waitKey(0)
| 16,683
|
def is_untweeable(html):
"""
I'm not sure at the moment what constitutes untweeable HTML, but if we don't find DVIS in tiddlywiki,
that is a blocker
"""
# the same regex used in tiddlywiki
divs_re = re.compile(
r'<div id="storeArea"(.*)</html>',
re.DOTALL
)
return bool(divs_re.search(html))
| 16,684
|
def create_1m_cnn_model(only_digits: bool = False, seed: Optional[int] = 0):
"""A CNN model with slightly under 2^20 (roughly 1 million) params.
A simple CNN model for the EMNIST character recognition task that is very
similar to the default recommended model from `create_conv_dropout_model`
but has slightly under 2^20 parameters. This is useful if the downstream task
involves randomized Hadamard transform, which requires the model weights /
gradients / deltas concatednated as a single vector to be padded to the
nearest power-of-2 dimensions.
This model is used in https://arxiv.org/abs/2102.06387.
When `only_digits=False`, the returned model has 1,018,174 trainable
parameters. For `only_digits=True`, the last dense layer is slightly smaller.
Args:
only_digits: If True, uses a final layer with 10 outputs, for use with the
digits only EMNIST dataset. If False, uses 62 outputs for the larger
dataset.
seed: A random seed governing the model initialization and layer randomness.
Returns:
A `tf.keras.Model`.
"""
data_format = 'channels_last'
initializer = tf.keras.initializers.GlorotUniform(seed=seed)
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(
32,
kernel_size=(3, 3),
activation='relu',
data_format=data_format,
input_shape=(28, 28, 1),
kernel_initializer=initializer),
tf.keras.layers.MaxPool2D(pool_size=(2, 2), data_format=data_format),
tf.keras.layers.Conv2D(
64,
kernel_size=(3, 3),
activation='relu',
data_format=data_format,
kernel_initializer=initializer),
tf.keras.layers.Dropout(0.25),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(
128, activation='relu', kernel_initializer=initializer),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(
10 if only_digits else 62,
activation=tf.nn.softmax,
kernel_initializer=initializer),
])
return model
| 16,685
|
def _check_index(target_expr, index_expr):
"""
helper function for making sure that an index is valid
:param target_expr: the target tensor
:param index_expr: the index
:return: the index, wrapped as an expression if necessary
"""
if issubclass(index_expr.__class__, _Expression):
index = index_expr
else:
index = _ConstScalar(index_expr)
if index.proto_expr.dtype is lang.UNDEFINED_TYPE:
raise TypeError('Can only index with a scalar.')
if type(index) is _ConstScalar:
if target_expr.size <= index.value() or index.value() < 0:
raise IndexError('Index out of bounds.')
return index
| 16,686
|
def create_access_token(user: UserModel, expires_delta: timedelta = None) -> str:
"""
Create an access token for a user
:param user: CTSUser -> The user
:param expires_delta: timedelta -> The expiration of the token. If not given a default will be used
:return: str -> A token
"""
load_all_config()
to_encode = user.dict()
if not expires_delta:
expires_delta = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)
return __generate_jwt_token(to_encode, expires_delta)
| 16,687
|
def is_async_mode():
"""Tests if we're in the async part of the code or not."""
async def f():
"""Unasync transforms async functions in sync functions."""
return None
obj = f()
if obj is None:
return False
obj.close() # prevent unawaited coroutine warning
return True
| 16,688
|
def remove_arm(frame):
"""
Removes the human arm portion from the image.
"""
##print("Removing arm...")
# Cropping 15 pixels from the bottom.
height, width = frame.shape[:2]
frame = frame[:height - 15, :]
##print("Done!")
return frame
| 16,689
|
def _DeleteCheckout(path, dry_run):
"""Deletes the checkout in |path|. Only actually deletes the checkout if
|dry_run| is False.
"""
_LOGGER.info('Deleting checkout directory: %s', path)
if dry_run:
return
_Shell('rmdir', '/S', '/Q', path, dry_run=False)
| 16,690
|
def test_initialize_variants():
"""Test NNDSVD variants correctness
Test that the variants 'a' and 'ar' differ from basic NNDSVD only where
the basic version has zeros.
"""
data = np.abs(random_state.randn(10, 10))
W0, H0 = nmf._initialize_nmf(data, 10, variant=None)
Wa, Ha = nmf._initialize_nmf(data, 10, variant='a')
War, Har = nmf._initialize_nmf(data, 10, variant='ar')
for ref, evl in ((W0, Wa), (W0, War), (H0, Ha), (H0, Har)):
assert_true(np.allclose(evl[ref != 0], ref[ref != 0]))
| 16,691
|
def cumulative_spread(array, x):
"""
>>> import numpy as np
>>> a = np.array([1., 2., 3., 4.])
>>> cumulative_spread(a, 0.)
array([0., 0., 0., 0.])
>>> cumulative_spread(a, 5.)
array([1., 2., 2., 0.])
>>> cumulative_spread(a, 6.)
array([1., 2., 3., 0.])
>>> cumulative_spread(a, 12.)
array([1., 2., 3., 4.])
"""
# This is probably inefficient.
cumulative_effect = np.cumsum(array) - array
b = x - cumulative_effect
return np.fmin(array, np.fmax(0, b))
| 16,692
|
def GetIdentifierStart(token):
"""Returns the first token in an identifier.
Given a token which is part of an identifier, returns the token at the start
of the identifier.
Args:
token: A token which is part of an identifier.
Returns:
The token at the start of the identifier or None if the identifier was not
of the form 'a.b.c' (e.g. "['a']['b'].c").
"""
start_token = token
previous_code_token = GetPreviousCodeToken(token)
while (previous_code_token and (
previous_code_token.IsType(JavaScriptTokenType.IDENTIFIER) or
IsDot(previous_code_token))):
start_token = previous_code_token
previous_code_token = GetPreviousCodeToken(previous_code_token)
if IsDot(start_token):
return None
return start_token
| 16,693
|
def search_Language_Binding_Spec(stmt, node, config, gentype=None):
""" Identifying a name in Language_Binding_Spec node"""
# No need to resolve exteranl c library routines
pass
| 16,694
|
def load_spec(filename):
"""
loads the IDL spec from the given file object or filename, returning a
Service object
"""
service = Service.from_file(filename)
service.resolve()
return service
| 16,695
|
def connect(db_config_name):
"""
Check the current environment to determine which database
parameters to use, then connect to the target database on the
specified host.
:return: A database connection object.
"""
config_path = os.path.join(
os.path.dirname(os.path.dirname(os.path.realpath(__file__))),
'config'
)
property_file = os.environ.get('DB_PARAM_FILE')
if property_file is None:
logger.info("No environmental variable set; using 'default.ini'.")
property_file = 'default.ini'
else:
logger.info("property file set: '{}'".format(property_file))
config = configparser.ConfigParser()
property_path = os.path.join(config_path, property_file)
with open(property_path) as f:
config.read_file(f)
db_host = config.get(db_config_name, 'db_host')
db_name = config.get(db_config_name, 'db_name')
logger.info("Connecting to database '{}' on host '{}'."
.format(db_name, db_host))
client = pymongo.MongoClient(db_host, 27017)
try:
logger.info("Authenticating database '{}'.".format(db_name))
client[db_name].authenticate(config.get(db_config_name, 'user'),
config.get(db_config_name, 'password'))
except configparser.NoOptionError:
logger.info("No username/password provided; "
"attempting to connect anyway.")
return client[db_name]
| 16,696
|
def load_model(file_path, *, epoch, model, likelihood, mll, optimizer, loss):
"""モデルの保存関数
Parameters
----------
file_path : str
モデルの保存先のパスとファイル名
epoch : int
現在のエポック数
model : :obj:`gpytorch.models`
学習済みのモデルのオブジェクト
likelihood : :obj:`gpytorch.likelihoods`
学習済みのlikelihoodsのオブジェクト
mll : :obj:`gpytorch.mlls`
学習済みのmllsのオブジェクト
optimizer : :obj:`torch.optim`
学習済みのoptimのオブジェクト
loss : list
現在のエポックまでの経過loss
Returns
-------
epoch : int
現在のエポック数
model : :obj:`gpytorch.models`
学習済みのモデルのオブジェクト
likelihood : :obj:`gpytorch.likelihoods`
学習済みのlikelihoodsのオブジェクト
mll : :obj:`gpytorch.mlls`
学習済みのmllsのオブジェクト
optimizer : :obj:`torch.optim`
学習済みのoptimのオブジェクト
loss : list
現在のエポックまでの経過loss
"""
temp = torch.load(file_path)
epoch = temp['epoch']
model.load_state_dict(temp['model'])
likelihood.load_state_dict(temp['likelihood'])
mll.load_state_dict(temp['mll'])
optimizer.load_state_dict(temp['optimizer'])
loss = temp['loss']
return epoch, model, likelihood, mll, optimizer, loss
| 16,697
|
def valid_account_id(log, account_id):
"""Validate account Id is a 12 digit string"""
if not isinstance(account_id, str):
log.error("supplied account id {} is not a string".format(account_id))
return False
id_re = re.compile(r'^\d{12}$')
if not id_re.match(account_id):
log.error("supplied account id '{}' must be a 12 digit number".format(account_id))
return False
return True
| 16,698
|
def R_2(opc):
"""2 ops: src1/src2"""
# REG_OPC(opc)
M3(opc)
| 16,699
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.