content
stringlengths 5
1.05M
|
|---|
#!/usr/bin/env python -tt
from collections import Counter
import unicodedata
words = []
with open("./all-verbs.txt", "rb") as fh:
words = fh.readlines()
words = [unicodedata.normalize("NFC", item.decode("utf-8").rstrip()) for item in words]
cnt = Counter()
for word in words:
cnt[word] += 1
for item in cnt.most_common(None):
print("{}\t{}".format(item[1], item[0]))
|
import matplotlib.pyplot as plt
import numpy as np
from numpy.linalg import norm
import re
import os
import torch
import torchvision.transforms as transforms
import os.path as osp
from torch.utils.data import Dataset
from os import listdir
from os.path import isfile, join
#from Bio.PDB import PDBParser
from src.dataloader_utils import AA_DICT, MASK_DICT, DSSP_DICT, NUM_DIMENSIONS, AA_PAD_VALUE, MASK_PAD_VALUE, \
DSSP_PAD_VALUE, SeqFlip, PSSM_PAD_VALUE, ENTROPY_PAD_VALUE, COORDS_PAD_VALUE, ListToNumpy
# Routines to read the file
def separate_coords(full_coords, pos): # pos can be either 0(n_term), 1(calpha), 2(cterm)
res = []
for i in range(len(full_coords[0])):
if i % 3 == pos:
res.append([full_coords[j][i] for j in range(3)])
return res
class switch(object):
"""Switch statement for Python, based on recipe from Python Cookbook."""
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5
self.fall = True
return True
else:
return False
def letter_to_num(string, dict_):
""" Convert string of letters to list of ints """
patt = re.compile('[' + ''.join(dict_.keys()) + ']')
num_string = patt.sub(lambda m: dict_[m.group(0)] + ' ', string)
num = [int(i) for i in num_string.split()]
return num
def flip_multidimensional_list(list_in): # pos can be either 0(n_term), 1(calpha), 2(cterm)
list_out = []
ld = len(list_in)
for i in range(len(list_in[0])):
list_out.append([list_in[j][i] for j in range(ld)])
return list_out
class ListToNumpy(object):
def __init__(self):
pass
def __call__(self, *args):
args_array = ()
for arg in args:
args_array += (np.asarray(arg),)
return args_array
def __repr__(self):
return self.__class__.__name__ + '()'
def read_record(file_, num_evo_entries):
""" Read all protein records from pnet file. """
id = []
seq = []
pssm = []
entropy = []
dssp = []
coord = []
mask = []
while True:
next_line = file_.readline()
for case in switch(next_line):
if case('[ID]' + '\n'):
id.append(file_.readline()[:-1])
elif case('[PRIMARY]' + '\n'):
seq.append(letter_to_num(file_.readline()[:-1], AA_DICT))
elif case('[EVOLUTIONARY]' + '\n'):
evolutionary = []
for residue in range(num_evo_entries):
evolutionary.append([float(step) for step in file_.readline().split()])
pssm.append(evolutionary)
entropy.append([float(step) for step in file_.readline().split()])
elif case('[SECONDARY]' + '\n'):
dssp.append(letter_to_num(file_.readline()[:-1], DSSP_DICT))
elif case('[TERTIARY]' + '\n'):
tertiary = []
for axis in range(NUM_DIMENSIONS): tertiary.append([float(coord) for coord in file_.readline().split()])
coord.append(tertiary)
elif case('[MASK]' + '\n'):
mask.append(letter_to_num(file_.readline()[:-1], MASK_DICT))
elif case(''):
return id,seq,pssm,entropy,dssp,coord,mask
def parse_pnet(file):
with open(file, 'r') as f:
id, seq, pssm, entropy, dssp, coords, mask = read_record(f, 20)
r1 = []
r2 = []
r3 = []
pssm2 = []
for i in range(len(pssm)): #We transform each of these, since they are inconveniently stored
pssm2.append(flip_multidimensional_list(pssm[i]))
r1.append(separate_coords(coords[i], 0))
r2.append(separate_coords(coords[i], 1))
r3.append(separate_coords(coords[i], 2))
return id, seq, pssm2, entropy, dssp, r1,r2,r3, mask
# Processing the data to maps
def ang2plain(v1,v2,v3,v4):
nA = torch.cross(v1,v2)
nA = nA/torch.norm(nA)
nB = torch.cross(v3,v4)
nB = nB/torch.norm(nB)
cosPsi = torch.dot(nA,nB)
#Psi = torch.acos(cosPsi)
return cosPsi
def convertCoordToDistAngles(rN, rCa, rCb, mask=None):
'''
data should be coordinate data in pnet format, meaning that each amino acid is characterized
by a 3x3 matrix, which are the coordinates of r1,r2,r3=N,Calpha,Cbeta.
This lite version, only computes the angles in along the sequence
(upper one-off diagonal of the angle matrices of the full version)
'''
seq_len = rN.shape[0]
# Initialize distances and angles
d = torch.zeros([seq_len, seq_len])
phi = torch.zeros([seq_len,seq_len])
omega = torch.zeros([seq_len,seq_len])
theta = torch.zeros([seq_len,seq_len])
for i in range(seq_len):
for j in range(i+1,seq_len):
if mask is not None and (mask[i] == 0 or mask[j] == 0):
continue
r1i = rN[i, :] # N1 atom
r2i = rCa[i, :] # Ca1 atom
r3i = rCb[i, :] # Cb1 atom
r1j = rN[j, :] # N2 atom
r2j = rCa[j, :] # Ca2 atom
r3j = rCb[j, :] # Cb2 atom
# Compute distance Cb-Cb
vbb = r3j - r3i
d[i, j] = torch.norm(vbb)
d[j, i] = d[i,j]
# Compute phi
v1 = r2i - r3i # Ca1 - Cb1
v2 = r3i - r3j # Cb1 - Cb2
#phi[i,j] = torch.acos(torch.dot(v1,v2)/torch.norm(v1)/torch.norm(v2))
phi[i, j] = torch.dot(v1, v2) / torch.norm(v1) / torch.norm(v2)
v1 = r2j - r3j # Ca2 - Cb2
v2 = r3j - r3i # Cb2 -Cb1
#phi[j, i] = torch.acos(torch.dot(v1,v2)/torch.norm(v1)/torch.norm(v2))
phi[j, i] = torch.dot(v1, v2) / torch.norm(v1) / torch.norm(v2)
# Thetas
v1 = r1i - r2i # N1 - Ca1
v2 = r2i - r3i # Ca1 - Cb1
v3 = r3i - r3j # Cb1 - Cb2
theta[i,j] = ang2plain(v1, v2, v2, v3)
v1 = r1j - r2j # N2 - Ca2
v2 = r2j - r3j # Ca2 - Cb2
v3 = r3j - r3i # Cb2 - Cb1
theta[j,i] = ang2plain(v1, v2, v2, v3)
# Omega
v1 = r2i - r3i # Ca1 - Cb1
v2 = r3i - r3j # Cb1 - Cb2
v3 = r3j - r2j # Cb2 - Ca2
omega[i,j] = ang2plain(v1,v2,v2,v3)
omega[j,i] = omega[i,j]
return d, omega, phi, theta
def crossProdMat(V1,V2):
Vcp = torch.zeros(V1.shape)
Vcp[:,:,0] = V1[:,:,1]*V2[:,:,2] - V1[:,:,2]*V2[:,:,1]
Vcp[:,:,1] = -V1[:,:,0]*V2[:,:,2] + V1[:,:,2]*V2[:,:,0];
Vcp[:,:,2] = V1[:,:,0]*V2[:,:,1] - V1[:,:,1]*V2[:,:,0];
return Vcp
def ang2plainMat(v1,v2,v3,v4):
nA = crossProdMat(v1,v2)
nB = crossProdMat(v3, v4)
nA = nA/(torch.sqrt(torch.sum(nA**2,axis=2)).unsqueeze(2))
nB = nB/(torch.sqrt(torch.sum(nB**2,axis=2)).unsqueeze(2))
cosPsi = torch.sum(nA*nB,axis=2)
#Psi = torch.acos(cosPsi)
return cosPsi
def convertCoordToDistAnglesVec(rN, rCa, rCb, mask=None):
# Vectorized
# Get D
D = torch.sum(rCb ** 2, dim=1).unsqueeze(1) + torch.sum(rCb ** 2, dim=1).unsqueeze(0) - 2 * (rCb @ rCb.t())
M = msk.unsqueeze(1) @ msk.unsqueeze(0)
D = torch.sqrt(torch.relu(M*D))
# Get Upper Phi
# TODO clean Phi to be the same as OMEGA
V1x = rCa[:, 0].unsqueeze(1) - rCb[:, 0].unsqueeze(1)
V1y = rCa[:, 1].unsqueeze(1) - rCb[:, 1].unsqueeze(1)
V1z = rCa[:, 2].unsqueeze(1) - rCb[:, 2].unsqueeze(1)
V2x = rCb[:, 0].unsqueeze(1) - rCb[:, 0].unsqueeze(1).t()
V2y = rCb[:, 1].unsqueeze(1) - rCb[:, 1].unsqueeze(1).t()
V2z = rCb[:, 2].unsqueeze(1) - rCb[:, 2].unsqueeze(1).t()
# Normalize them
V1n = torch.sqrt(V1x**2 + V1y**2 + V1z**2)
V1x = V1x/V1n
V1y = V1y/V1n
V1z = V1z/V1n
V2n = torch.sqrt(V2x**2 + V2y**2 + V2z**2)
V2x = V2x/V2n
V2y = V2y/V2n
V2z = V2z/V2n
# go for it
PHI = M*(V1x * V2x + V1y * V2y + V1z * V2z)
indnan = torch.isnan(PHI)
PHI[indnan] = 0.0
# Omega
nat = rCa.shape[0]
V1 = torch.zeros(nat, nat, 3)
V2 = torch.zeros(nat, nat, 3)
V3 = torch.zeros(nat, nat, 3)
# Ca1 - Cb1
V1[:,:,0] = (rCa[:,0].unsqueeze(1) - rCb[:,0].unsqueeze(1)).repeat((1,nat))
V1[:,:,1] = (rCa[:,1].unsqueeze(1) - rCb[:,1].unsqueeze(1)).repeat((1, nat))
V1[:,:,2] = (rCa[:,2].unsqueeze(1) - rCb[:,2].unsqueeze(1)).repeat((1, nat))
# Cb1 - Cb2
V2[:,:,0] = rCb[:,0].unsqueeze(1) - rCb[:,0].unsqueeze(1).t()
V2[:,:,1] = rCb[:,1].unsqueeze(1) - rCb[:,1].unsqueeze(1).t()
V2[:,:,2] = rCb[:,2].unsqueeze(1) - rCb[:,2].unsqueeze(1).t()
# Cb2 - Ca2
V3[:,:,0] = (rCb[:,0].unsqueeze(0) - rCa[:,0].unsqueeze(0)).repeat((nat,1))
V3[:,:,1] = (rCb[:,1].unsqueeze(0) - rCa[:,1].unsqueeze(0)).repeat((nat,1))
V3[:,:,2] = (rCb[:,2].unsqueeze(0) - rCa[:,2].unsqueeze(0)).repeat((nat,1))
OMEGA = M*ang2plainMat(V1, V2, V2, V3)
indnan = torch.isnan(OMEGA)
OMEGA[indnan] = 0.0
# Theta
V1 = torch.zeros(nat, nat, 3)
V2 = torch.zeros(nat, nat, 3)
V3 = torch.zeros(nat, nat, 3)
# N - Ca
V1[:,:,0] = (rN[:,0].unsqueeze(1) - rCa[:,0].unsqueeze(1)).repeat((1,nat))
V1[:,:,1] = (rN[:,1].unsqueeze(1) - rCa[:,1].unsqueeze(1)).repeat((1, nat))
V1[:,:,2] = (rN[:,2].unsqueeze(1) - rCa[:,2].unsqueeze(1)).repeat((1, nat))
# Ca - Cb # TODO - repeated computation
V2[:,:,0] = (rCa[:,0].unsqueeze(1) - rCb[:,0].unsqueeze(1)).repeat((1,nat))
V2[:,:,1] = (rCa[:,1].unsqueeze(1) - rCb[:,1].unsqueeze(1)).repeat((1, nat))
V2[:,:,2] = (rCa[:,2].unsqueeze(1) - rCb[:,2].unsqueeze(1)).repeat((1, nat))
# Cb1 - Cb2 # TODO - repeated computation
V3[:,:,0] = rCb[:,0].unsqueeze(1) - rCb[:,0].unsqueeze(1).t()
V3[:,:,1] = rCb[:,1].unsqueeze(1) - rCb[:,1].unsqueeze(1).t()
V3[:,:,2] = rCb[:,2].unsqueeze(1) - rCb[:,2].unsqueeze(1).t()
THETA = M*ang2plainMat(V1, V2, V2, V3)
indnan = torch.isnan(THETA)
THETA[indnan] = 0.0
return D, OMEGA, PHI, THETA
|
"""Methods for saving/loading checkpoints"""
import logging
import typing
from dataclasses import dataclass
from pathlib import Path
import torch
import torch.optim
from vits_train import setup_discriminator, setup_model
from vits_train.config import TrainingConfig
from vits_train.models import MultiPeriodDiscriminator, SynthesizerTrn
_LOGGER = logging.getLogger("vits_train.checkpoint")
# -----------------------------------------------------------------------------
@dataclass
class Checkpoint:
model_g: SynthesizerTrn
global_step: int
epoch: int
version: int
best_loss: typing.Optional[float] = None
model_d: typing.Optional[MultiPeriodDiscriminator] = None
optimizer_g: typing.Optional[torch.optim.Optimizer] = None
optimizer_d: typing.Optional[torch.optim.Optimizer] = None
scheduler_g: typing.Optional[torch.optim.lr_scheduler._LRScheduler] = None
scheduler_d: typing.Optional[torch.optim.lr_scheduler._LRScheduler] = None
def save_checkpoint(checkpoint: Checkpoint, checkpoint_path: Path):
"""Save model/optimizer/training state to a Torch checkpoint"""
checkpoint_path.parent.mkdir(parents=True, exist_ok=True)
model_g = checkpoint.model_g
if hasattr(model_g, "module"):
state_dict_g = model_g.module.state_dict() # type: ignore
else:
state_dict_g = model_g.state_dict()
checkpoint_dict = {
"model_g": state_dict_g,
"global_step": checkpoint.global_step,
"epoch": checkpoint.epoch,
"version": checkpoint.version,
"best_loss": checkpoint.best_loss,
}
model_d = checkpoint.model_d
if model_d is not None:
if hasattr(model_d, "module"):
state_dict_d = model_d.module.state_dict() # type: ignore
else:
state_dict_d = model_d.state_dict()
checkpoint_dict["model_d"] = state_dict_d
model_d = checkpoint.model_d
if model_d is not None:
if hasattr(model_d, "module"):
state_dict_d = model_d.module.state_dict() # type: ignore
else:
state_dict_d = model_d.state_dict()
checkpoint_dict["model_d"] = state_dict_d
optimizer_g = checkpoint.optimizer_g
if optimizer_g is not None:
checkpoint_dict["optimizer_g"] = optimizer_g.state_dict()
optimizer_d = checkpoint.optimizer_d
if optimizer_d is not None:
checkpoint_dict["optimizer_d"] = optimizer_d.state_dict()
scheduler_g = checkpoint.scheduler_g
if scheduler_g is not None:
checkpoint_dict["scheduler_g"] = scheduler_g.state_dict()
scheduler_d = checkpoint.scheduler_d
if scheduler_d is not None:
checkpoint_dict["scheduler_d"] = scheduler_d.state_dict()
torch.save(checkpoint_dict, checkpoint_path)
def load_checkpoint(
checkpoint_path: Path,
config: TrainingConfig,
model_g: typing.Optional[SynthesizerTrn] = None,
model_d: typing.Optional[MultiPeriodDiscriminator] = None,
load_discrimiator: bool = True,
optimizer_g: typing.Optional[torch.optim.Optimizer] = None,
optimizer_d: typing.Optional[torch.optim.Optimizer] = None,
scheduler_g: typing.Optional[torch.optim.lr_scheduler._LRScheduler] = None,
scheduler_d: typing.Optional[torch.optim.lr_scheduler._LRScheduler] = None,
load_optimizers: bool = True,
load_schedulers: bool = True,
use_cuda: bool = True,
) -> Checkpoint:
"""Load model/optimizer/training state from a Torch checkpoint"""
checkpoint_dict = torch.load(checkpoint_path, map_location="cpu")
version = int(checkpoint_dict.get("version", 1))
global_step = int(checkpoint_dict.get("global_step", 1))
epoch = int(checkpoint_dict.get("epoch", 1))
best_loss = checkpoint_dict.get("best_loss")
if best_loss is not None:
best_loss = float(best_loss)
# Create generator if necessary
if model_g is None:
model_g = setup_model(config, use_cuda=use_cuda)
_load_state_dict(model_g, checkpoint_dict, "model_g")
if load_discrimiator:
if model_d is None:
model_d = setup_discriminator(config, use_cuda=use_cuda)
_load_state_dict(model_d, checkpoint_dict, "model_d")
# Load optimizer states
if load_optimizers:
if optimizer_g is not None:
optimizer_g.load_state_dict(checkpoint_dict["optimizer_g"])
if optimizer_d is not None:
optimizer_d.load_state_dict(checkpoint_dict["optimizer_d"])
# Load scheduler states
if load_schedulers:
if scheduler_g is not None:
scheduler_g.load_state_dict(checkpoint_dict["scheduler_g"])
if scheduler_d is not None:
scheduler_d.load_state_dict(checkpoint_dict["scheduler_d"])
return Checkpoint(
model_g=model_g,
model_d=model_d,
optimizer_g=optimizer_g,
optimizer_d=optimizer_d,
scheduler_g=scheduler_g,
scheduler_d=scheduler_d,
global_step=global_step,
epoch=epoch,
version=version,
best_loss=best_loss,
)
def _load_state_dict(model, checkpoint_dict, key):
saved_state_dict_g = checkpoint_dict[key]
if hasattr(model, "module"):
state_dict_g = model.module.state_dict() # type: ignore
else:
state_dict_g = model.state_dict()
new_state_dict_g = {}
for k, v in state_dict_g.items():
if k in saved_state_dict_g:
# Use saved value
new_state_dict_g[k] = saved_state_dict_g[k]
else:
# Use initialized value
_LOGGER.warning("%s is not in the checkpoint for %s", k, key)
new_state_dict_g[k] = v
if hasattr(model, "module"):
model.module.load_state_dict(new_state_dict_g) # type: ignore
else:
model.load_state_dict(new_state_dict_g)
|
import numpy
from shadow4.syned.shape import Rectangle
from shadow4.syned.element_coordinates import ElementCoordinates
from shadow4.syned.refractors.interface import Interface
from shadow4.physical_models.prerefl.prerefl import PreRefl
from shadow4.beamline.s4_beamline_element import S4BeamlineElement
class S4Interface(Interface):
def __init__(self,
name="Undefined",
boundary_shape=None,
surface_shape=None,
material_object=None, # just a name, not used
material_image=None, # just a name, not used
f_r_ind = 0,
r_ind_obj = 1.0,
r_ind_ima = 1.0,
r_attenuation_obj = 0.0,
r_attenuation_ima = 0.0,
file_r_ind_obj = "",
file_r_ind_ima = "",
):
"""
f_r_ind: source of optical constants, from
constant value or PREREFL preprocessor (file):
(0) constant value in both object and image spaces
(1) file in object space, constant value in image space
(2) constant value in object space, file in image space
(3) file in both object and image space
r_ind_obj (for f_r_ind=0,2): index of refraction in object space.
r_ind_ima (for f_r_ind=0,1): index of refraction in image space.
r_attenuation_obj (for f_r_ind=0,2): attenuation coefficient in object space. Units of UserUnitLength^(-1)
r_attenuation_ima (for f_r_ind=0,1): attenuation coefficient in image space. Units of UserUnitLength^(-1)
file_r_ind_obj (for f_r_ind=1,3): file generated by PREREFL
file_r_ind_ima (for f_r_ind=2,3): file generated by PREREFL
"""
Interface.__init__(self,
name=name,
surface_shape=surface_shape,
boundary_shape=boundary_shape,
material_object=material_object,
material_image=material_image,
)
self._f_r_ind = f_r_ind
self._r_ind_obj = r_ind_obj
self._r_ind_ima = r_ind_ima
self._r_attenuation_obj = r_attenuation_obj
self._r_attenuation_ima = r_attenuation_ima
self._file_r_ind_obj = file_r_ind_obj
self._file_r_ind_ima = file_r_ind_ima
def get_refraction_indices(self):
if self._f_r_ind == 0:
refraction_index_object = self._r_ind_obj
refraction_index_image = self._r_ind_ima
elif self._f_r_ind == 1:
raise Exception(NotImplementedError)
elif self._f_r_ind == 2:
raise Exception(NotImplementedError)
elif self._f_r_ind == 3:
raise Exception(NotImplementedError)
else:
raise Exception(NotImplementedError)
return refraction_index_object, refraction_index_image
class S4InterfaceElement(S4BeamlineElement):
def __init__(self, optical_element=None, coordinates=None):
super().__init__(optical_element if optical_element is not None else S4Interface(),
coordinates if coordinates is not None else ElementCoordinates())
def trace_beam(self, beam_in, flag_lost_value=-1):
p = self.get_coordinates().p()
q = self.get_coordinates().q()
theta_grazing1 = numpy.pi / 2 - self.get_coordinates().angle_radial()
theta_grazing2 = numpy.pi / 2 - self.get_coordinates().angle_radial_out()
alpha1 = self.get_coordinates().angle_azimuthal()
#
beam = beam_in.duplicate()
#
# put beam in mirror reference system
#
beam.rotate(alpha1, axis=2)
beam.rotate(theta_grazing1, axis=1)
beam.translation([0.0, -p * numpy.cos(theta_grazing1), p * numpy.sin(theta_grazing1)])
#
# reflect beam in the mirror surface
#
soe = self.get_optical_element() #._optical_element_syned
# print(">>> CCC", soe.get_surface_shape().get_conic_coefficients())
# TODO: no check for total reflection is done...
# TODO: implement correctly in shadow4 via Fresnel equations for the transmitted beam
if not isinstance(soe, Interface): # undefined
raise Exception("Undefined refractive interface")
else:
beam_mirr, normal = self.apply_local_refraction(beam)
#
# apply mirror boundaries
#
beam_mirr.apply_boundaries_syned(soe.get_boundary_shape(), flag_lost_value=flag_lost_value)
#
# TODO" apply lens absorption
#
#
# from element reference system to image plane
#
beam_out = beam_mirr.duplicate()
n1, n2 = self.get_optical_element().get_refraction_indices()
beam_out.change_to_image_reference_system(theta_grazing2, q, refraction_index=n2)
return beam_out, beam_mirr
def apply_local_refraction(self, beam):
raise NotImplementedError()
if __name__ == "__main__":
pass
|
from time import sleep
import struct
import hid
FRAME_HEADER = 0x55
CMD_SERVO_MOVE = 0x03
CMD_MULT_SERVO_UNLOAD = 0x14
CMD_MULT_SERVO_POS_READ = 0x15
device = hid.device()
device.open(0x0483, 0x5750) # LOBOT VendorID/ProductID
print(f"Manufacturer: {device.get_manufacturer_string()}")
print(f"Product: {device.get_product_string()}")
print(f"Serial No: {device.get_serial_number_string()}")
def move_servo(servo_id: int, position: int, time: int) -> None:
if not time > 0:
raise ValueError("time must be greater than 0")
position = struct.pack("<H", position)
_time = struct.pack("<H", time)
buf = bytearray(11)
buf[0] = 0x00 # Hid id
buf[1] = FRAME_HEADER
buf[2] = FRAME_HEADER
buf[3] = 8 # Length
buf[4] = CMD_SERVO_MOVE
buf[5] = 1 # Number of servo
buf[6] = _time[0]
buf[7] = _time[1]
buf[8] = servo_id
buf[9] = position[0]
buf[10] = position[1]
device.write(buf)
sleep((time + 50) / 1000)
def move_servos(servos_id: tuple, positions: tuple, time: int) -> None:
if not time > 0:
raise ValueError("time must be greater than 0")
length = (len(servos_id) * 3) + 6
_time = struct.pack("<H", time)
buf = bytearray(length + 2) # HEADER doesn't count for length
buf[0] = 0x00 # Hid id
buf[1] = FRAME_HEADER
buf[2] = FRAME_HEADER
buf[3] = length
buf[4] = CMD_SERVO_MOVE
buf[5] = len(servos_id) # Number of servo
buf[6] = _time[0]
buf[7] = _time[1]
for pos, index in enumerate(range(0, len(servos_id) * 3, 3)):
position = struct.pack("<H", positions[pos])
buf[8 + index] = servos_id[pos]
buf[9 + index] = position[0]
buf[10 + index] = position[1]
device.write(buf)
sleep((time + 50) / 1000)
def unload_servos(servos_id: tuple) -> None:
buf = bytearray(6 + len(servos_id))
buf[0] = 0x00 # Hid id
buf[1] = FRAME_HEADER
buf[2] = FRAME_HEADER
buf[3] = 3 + len(servos_id)
buf[4] = CMD_MULT_SERVO_UNLOAD
buf[5] = len(servos_id)
for index, servo in enumerate(servos_id):
buf[6 + index] = servo
device.write(buf)
def get_servos_position(servos_id: tuple) -> tuple:
buf = bytearray(6 + len(servos_id))
buf[0] = 0x00 # Hid id
buf[1] = FRAME_HEADER
buf[2] = FRAME_HEADER
buf[3] = 3 + len(servos_id)
buf[4] = CMD_MULT_SERVO_POS_READ
buf[5] = len(servos_id)
for index, servo in enumerate(servos_id):
buf[6 + index] = servo
device.write(buf)
sleep(0.2)
data = bytearray(device.read(64))
if data[:2] != b"\x55\x55" or data is None:
raise ValueError("data don't match with what excepted")
positions = list()
for i in range(len(servos_id)):
pos = data[5 + (i * 3):8 + (i * 3)]
pos = struct.unpack("<H", pos[1:])
positions.append(pos[0])
return tuple(positions)
|
from abc import ABCMeta, abstractmethod
from typing import Optional
from categories.models import Category
from features.models import SourceType, Tag
class FeatureImporterBase(metaclass=ABCMeta):
@property
@abstractmethod
def source_system(self):
pass
@property
@abstractmethod
def source_type(self):
pass
def get_source_type(self):
st, created = SourceType.objects.get_or_create(
system=self.source_system, type=self.source_type
)
return st
@abstractmethod
def import_features(self):
"""This method should result in data being imported from a source into Features.
- Creates a features.models.SourceType if one doesn't exists.
- Creates or updates features.models.Feature instances.
"""
class MapperBase:
"""Base for implementing a mapper with configuration.
This base class handles processing the mapping configuration into a structure which
is faster to search from. Whitelisting configuration can be enabled by a subclass
with `whitelist` class variable. External strings in whitelisting and mapping rules
should be treated as case insensitive.
Example configuration:
{
"rules": [
{
# External strings which are mapped into an internal object
"mapped_names": ["Swimming", "Beach"],
"id": "beach", # Identifier internal object
"name": "Beach", # Name of the internal object
},
...
],
# If whitelisting is enabled
"whitelist": ["Island", "Sauna"],
}
"""
whitelist = False
def __init__(self, config: dict):
self.config = {
"rules": {},
}
if self.whitelist:
self.config["whitelist"] = [
item.lower() for item in config.get("whitelist", [])
]
for rule in config.get("rules", []):
for mapped_name in rule["mapped_names"]:
self.config[mapped_name.lower()] = rule
class TagMapper(MapperBase):
"""Maps external tags into Tag instances in the system.
External tags present in imported sources are either whitelisted,
mapped into internal tags or ignored. Whitelisted tags are imported
with their information. Mapped tags are created as internal tags.
Only tags defined in the configuration will be considered.
"""
whitelist = True
def get_tag(self, tag: dict) -> Optional[Tag]:
"""Return a Tag instance for the given input.
Tag instance is created and returned if the given input
matches a Tag recognised by this mapper.
Expected format for the tag input: {"id": str, "name": str}
"""
# Whitelisted tags
if tag["name"].lower() in self.config["whitelist"]:
tag, created = Tag.objects.language("fi").update_or_create(
id=tag["id"], defaults={"name": tag["name"]},
)
return tag
# Mapped tags
mapping = self.config.get(tag["name"].lower())
if mapping:
tag, created = Tag.objects.language("fi").update_or_create(
id=mapping["id"], defaults={"name": mapping["name"]},
)
return tag
return None
class CategoryMapper(MapperBase):
"""Maps external categories into Category instances in the system.
External categories present in imported sources are either: mapped
into internal categories or ignored. Mapped categories are created
as internal categories. Only categories defined in the configuration
will be considered.
"""
def get_category(self, category: dict) -> Optional[Category]:
"""Return a Category instance for the given input.
Category instance is created and returned if the given input
matches a Category recognised by this mapper.
Expected format for the category input: {"id": str, "name": str}
"""
mapping = self.config.get(category["name"].lower())
if mapping:
category, created = Category.objects.language("fi").update_or_create(
id=mapping["id"], defaults={"name": mapping["name"]},
)
return category
|
import argparse
import ConfigParser
import cPickle
import mysql.connector
import time
import sys, os, re
from context import diana
import diana.classes.drug as diana_drug
def main():
options = parse_user_arguments()
design_experiment_dcdb(options)
def parse_user_arguments(*args, **kwds):
"""
Parses the arguments of the program
"""
parser = argparse.ArgumentParser(
description = "Generate the profiles of the input drug",
epilog = "@oliva's lab 2017")
parser.add_argument('-cr','--crossings_file',dest='crossings_file',action = 'store',default=os.path.join(os.path.join(os.path.dirname(__file__), '..'), 'workspace/crossings_file.txt'),
help = """Define the file where the drug crossings to be explored will be written""")
parser.add_argument('-min','--minimum_targets',dest='minimum_targets',action = 'store',default=3,
help = """Define the minimum number of targets that the drugs need to have to be considered in the experiment""")
parser.add_argument('-sif','--sif_file',dest='sif',action = 'store',
help = """" Input file with the protein-protein interaction network in SIF format that will be used in the experiment. """)
options=parser.parse_args()
return options
#################
#################
# MAIN FUNCTION #
#################
#################
def design_experiment_dcdb(options):
"""
Designs the drug crossings to be explored in the experiment of DCDB.
"""
# Start marker for time measure
start = time.time()
# Get the script path
main_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
toolbox_dir = os.path.join(main_path, 'diana/toolbox')
#--------------------------------------#
# GET INFORMATION FROM CONFIG FILE #
#--------------------------------------#
# Read the config file
config_file = os.path.join(main_path, 'config.ini')
config = ConfigParser.ConfigParser()
config.read(config_file)
#-------------------------#
# CREATE PICKLE FILES #
#-------------------------#
dcdb2targets_file = os.path.join(toolbox_dir, 'dcdb2targets.pcl')
drugbank2targets_file = os.path.join(toolbox_dir, 'drugbank_to_targets.pcl')
pfam_dcdb_pickle_file = os.path.join(toolbox_dir, 'dcdb_target_to_pfams.pcl')
smiles_dcdb_pickle_file = os.path.join(toolbox_dir, 'dcdb2smiles.pcl')
atc_dcdb_pickle_file = os.path.join(toolbox_dir, 'dcdb2atcs.pcl')
sider_dcdb_pickle_file = os.path.join(toolbox_dir, 'dcdb2side_effects.pcl')
int_to_drugs_file = os.path.join(toolbox_dir, 'drug_int_2_drugs.pcl')
int_to_info_file = os.path.join(toolbox_dir, 'drug_int_2_info.pcl')
dump_file = os.path.join(toolbox_dir, 'pair2comb.pcl')
pubchem2drugbank_file = os.path.join(toolbox_dir, 'pubchem_to_drugbank.pcl')
# Obtain the targets for all the DCDB drugs
if not fileExist(dcdb2targets_file):
print( " DIANA INFO:\tCreating pickle file {}.\n".format( dcdb2targets_file ))
biana_cnx = mysql.connector.connect(user=config.get('BIANA', 'user'),
password=config.get('BIANA', 'password'),
host=config.get('BIANA', 'host'),
database=config.get('BIANA', 'database'))
diana_drug.obtain_dcdb_to_targets(biana_cnx, config.get('BIANA', 'unification_protocol'), options.sif, dcdb2targets_file)
biana_cnx.close()
# Obtain the targets for all the DrugBank drugs
if not fileExist(drugbank2targets_file):
print( " DIANA INFO:\tCreating pickle file {}.\n".format( drugbank2targets_file ))
biana_cnx = mysql.connector.connect(user=config.get('BIANA', 'user'),
password=config.get('BIANA', 'password'),
host=config.get('BIANA', 'host'),
database=config.get('BIANA', 'database'))
diana_drug.obtain_drugbank_to_targets(biana_cnx, config.get('BIANA', 'unification_protocol'), options.sif, drugbank2targets_file)
biana_cnx.close()
# Obtain all the PFAMs of the targets
if not fileExist(pfam_dcdb_pickle_file):
print( " DIANA INFO:\tCreating pickle file {}.\n".format( pfam_dcdb_pickle_file ))
biana_cnx = mysql.connector.connect(user=config.get('BIANA', 'user'),
password=config.get('BIANA', 'password'),
host=config.get('BIANA', 'host'),
database=config.get('BIANA', 'database'))
dcdb2targets = cPickle.load(open(dcdb2targets_file))
all_targets = set()
for drug in dcdb2targets:
for target in dcdb2targets[drug]:
all_targets.add(target)
diana_drug.obtain_target_to_pfam(biana_cnx, config.get('BIANA', 'unification_protocol'), all_targets, pfam_dcdb_pickle_file)
biana_cnx.close()
# Obtain the SMILES of all the DCDB drugs
if not fileExist(smiles_dcdb_pickle_file):
print( " DIANA INFO:\tCreating pickle file {}.\n".format( smiles_dcdb_pickle_file ))
biana_cnx = mysql.connector.connect(user=config.get('BIANA', 'user'),
password=config.get('BIANA', 'password'),
host=config.get('BIANA', 'host'),
database=config.get('BIANA', 'database'))
drug2targets = cPickle.load(open(dcdb2targets_file))
all_drugs = set(drug2targets.keys())
diana_drug.obtain_drug_to_smiles(biana_cnx, config.get('BIANA', 'unification_protocol'), all_drugs, 'dcdb', smiles_dcdb_pickle_file)
biana_cnx.close()
# Obtain the ATCs of all the DCDB drugs
if not fileExist(atc_dcdb_pickle_file):
print( " DIANA INFO:\tCreating pickle file {}.\n".format( atc_dcdb_pickle_file ))
biana_cnx = mysql.connector.connect(user=config.get('BIANA', 'user'),
password=config.get('BIANA', 'password'),
host=config.get('BIANA', 'host'),
database=config.get('BIANA', 'database'))
drug2targets = cPickle.load(open(dcdb2targets_file))
all_drugs = set(drug2targets.keys())
diana_drug.obtain_drug_to_atcs(biana_cnx, config.get('BIANA', 'unification_protocol'), all_drugs, 'dcdb', atc_dcdb_pickle_file)
biana_cnx.close()
# Obtain the side effects of all the DCDB drugs
if not fileExist(sider_dcdb_pickle_file):
print( " DIANA INFO:\tCreating pickle file {}.\n".format( sider_dcdb_pickle_file ))
biana_cnx = mysql.connector.connect(user=config.get('BIANA', 'user'),
password=config.get('BIANA', 'password'),
host=config.get('BIANA', 'host'),
database=config.get('BIANA', 'database'))
drug2targets = cPickle.load(open(dcdb2targets_file))
all_drugs = set(drug2targets.keys())
diana_drug.obtain_drug_to_side_effects(biana_cnx, config.get('BIANA', 'unification_protocol'), all_drugs, 'dcdb', sider_dcdb_pickle_file)
biana_cnx.close()
# Obtain the component drugs of the drug interactions in DCDB
if not fileExist(int_to_drugs_file):
print( " DIANA INFO:\tCreating pickle file {}.\n".format( int_to_drugs_file ))
biana_cnx = mysql.connector.connect(user=config.get('BIANA', 'user'),
password=config.get('BIANA', 'password'),
host=config.get('BIANA', 'host'),
database=config.get('BIANA', 'database'))
diana_drug.obtain_drug_interaction_to_drugs(biana_cnx, int_to_drugs_file)
biana_cnx.close()
# Obtain the information of the drug interactions in DCDB
if not fileExist(int_to_info_file):
print( " DIANA INFO:\tCreating pickle file {}.\n".format( int_to_info_file ))
biana_cnx = mysql.connector.connect(user=config.get('BIANA', 'user'),
password=config.get('BIANA', 'password'),
host=config.get('BIANA', 'host'),
database=config.get('BIANA', 'database'))
diana_drug.obtain_drug_interaction_to_info(biana_cnx, int_to_info_file)
biana_cnx.close()
#---------------------------#
# GET THE LIST OF DRUGS #
#---------------------------#
dcdb2targets = cPickle.load(open(dcdb2targets_file))
# Get the drugs with at least the minimum number of targets
drugs_with_targets = set()
for dcdb in dcdb2targets:
if len(dcdb2targets[dcdb]) >= int(options.minimum_targets):
drugs_with_targets.add(dcdb)
print('\nThe number of drugs included is: {}\n'.format(len(drugs_with_targets)))
# Get the drugs that have at least one PFAM
drugs_with_pfams = set()
geneid2pfams = cPickle.load(open(pfam_dcdb_pickle_file))
for drug in drugs_with_targets:
for target in dcdb2targets[drug]:
if target in geneid2pfams:
drugs_with_pfams.add(drug)
print('The number of drugs with at least one PFAM is: {}\n'.format(len(drugs_with_pfams)))
# Check how many drugs have side effects
drug2side_effects = cPickle.load(open(sider_dcdb_pickle_file))
drugs_with_side_effects = set(drug2side_effects.keys()) & drugs_with_pfams
print('The number of drugs with at least one SIDE EFFECT is: {}\n'.format(len(drugs_with_side_effects)))
drugs_no_se = [x for x in drugs_with_targets if x not in set(drug2side_effects.keys())]
print(drugs_no_se)
# Get the drugs that have at least one ATC
dcdb2atcs = cPickle.load(open(atc_dcdb_pickle_file))
drugs_with_atcs = set(dcdb2atcs.keys()) & drugs_with_side_effects
print('The number of drugs with at least one ATC is: {}\n'.format(len(drugs_with_atcs)))
# Check how many drugs have SMILES
drug2smiles = cPickle.load(open(smiles_dcdb_pickle_file))
drugs_with_smiles = set(drug2smiles.keys()) & drugs_with_atcs
print('The number of DCDB drugs with at least one SMILES is: {}\n'.format(len(drugs_with_smiles)))
#-------------------------------------------------------#
# OBTAIN THE NAMES OF THE DCDB DRUGS IN DRUGBANK ID #
#-------------------------------------------------------#
dcdb_to_drugbank_file = os.path.join(toolbox_dir, 'dcdb_to_drugbank.pcl')
check_file(dcdb_to_drugbank_file)
dcdb_to_drugbank = cPickle.load(open(dcdb_to_drugbank_file))
drugs_considered = set()
drugs_considered_drugbank = set()
for dcdbid in drugs_with_smiles:
if dcdbid in dcdb_to_drugbank:
drugs_considered.add(dcdbid)
for db in dcdb_to_drugbank[dcdbid]:
drugs_considered_drugbank.add(db)
print('The number of DCDB drugs with DrugBank ID considered is: {}\n'.format(len(drugs_considered)))
print('The number of DrugBank IDs considered is: {}\n'.format(len(drugs_considered_drugbank)))
#-------------------------------------------------#
# DEFINE ALL POSSIBLE CROSSINGS BETWEEN PAIRS #
#-------------------------------------------------#
# We need to copy the list using the list() method because if not, when you modify one of the lists, the other gets modified as well
# This can also be done with copy.copy() method or copy.deepcopy() if the list contains objects and you want to copy them as well
# More info: http://stackoverflow.com/questions/2612802/how-to-clone-or-copy-a-list
list_of_drugs = list(drugs_considered)
list_of_drugs2 = list(drugs_considered)
drug_int_2_drugs = cPickle.load(open(int_to_drugs_file))
drug_int_2_info = cPickle.load(open(int_to_info_file))
crossings = set()
pair2comb = {}
dc = 0
non_dc = 0
n = 0
while (n < len(list_of_drugs)):
i = 0
while (i < len(list_of_drugs2)):
drug1 = list_of_drugs[n]
drug2 = list_of_drugs2[i]
if drug1 == drug2:
i+=1
continue
ddi_name1 = "%s---%s"%(drug1, drug2)
ddi_name2 = "%s---%s"%(drug2, drug1)
#print("%s vs. %s" %(drug1, drug2))
# We check that none of the two possible names are in the crossings set, and we add it (this is not necessary, but it is added as security)
if ddi_name1 not in crossings and ddi_name2 not in crossings:
crossings.add(ddi_name1)
i+=1
# We remove the first drug from the second list, so that we do not have to repeat pairings
list_of_drugs2.remove(drug1)
n+=1
print('There are {} possible DCDB crossings\n'.format(len(crossings)))
checking = len(list_of_drugs) * (len(list_of_drugs) - 1) / 2
if len(crossings) != checking:
print("THERE IS AN ERROR IN THE ANALYSIS. The number of crossings does not correspond to the theoretical number")
sys.exit(10)
#print(crossings)
#--------------------------------#
# TRANSLATE DCDB TO DRUGBANK #
#--------------------------------#
drugbank2targets = cPickle.load(open(drugbank2targets_file))
db_crossings = set()
for crossing in crossings:
drug1, drug2 = crossing.split('---')
db_drugs1 = dcdb_to_drugbank[drug1]
db_drugs2 = dcdb_to_drugbank[drug2]
if 'DB01258' in db_drugs1:
print(drug1)
print(dcdb2targets[drug1])
print(drugbank2targets['DB01258'])
if 'DB01258' in db_drugs2:
print(drug2)
print(dcdb2targets[drug2])
print(drugbank2targets['DB01258'])
for db_drug1 in db_drugs1:
for db_drug2 in db_drugs2:
db_crossing1 = '{}---{}'.format(db_drug1, db_drug2)
db_crossing2 = '{}---{}'.format(db_drug1, db_drug2)
if db_crossing1 not in db_crossings and db_crossing2 not in db_crossings:
db_crossings.add(db_crossing1)
pair2comb[db_crossing1] = 0 # We will introduce 0 if it is not drug interaction
non_dc+=1
for drug_int in drug_int_2_drugs:
if drug1 in drug_int_2_drugs[drug_int] and drug2 in drug_int_2_drugs[drug_int]:
if drug_int_2_info[drug_int]['type'] == 'pharmacodynamical':
pair2comb[db_crossing1] = 1 # We will introduce 1 if it is a pharmacodynamical drug interaction
dc+=1
non_dc-=1
else:
pair2comb[db_crossing1] = 0 # We will introduce 0 if it is not a pharmacodynamical drug interaction
break
print('There are {} possible DrugBank crossings\n'.format(len(db_crossings)))
print('NUMBER OF PHARMACODYNAMICAL DRUG INTERACTIONS:\t\t{}\n'.format(dc))
print('NUMBER OF NON-PHARMACODYNAMICAL DRUG INTERACTIONS:\t{}\n'.format(non_dc))
# Save the dict containing if the pairings are drug combinations or not
cPickle.dump(pair2comb, open(dump_file, 'w'))
#------------------------------------------#
# GENERATE THE FILE WITH THE CROSSINGS #
#------------------------------------------#
with open(options.crossings_file,"w") as crossings_file_fd:
for pair in db_crossings:
crossings_file_fd.write("{}\n".format(pair))
#------------------------------------------------------------#
# GENERATE THE FILE WITH THE CROSSINGS OF THE COMPARISON #
#------------------------------------------------------------#
if not fileExist(pubchem2drugbank_file):
print( " DIANA INFO:\tCreating pickle file {}.\n".format( pubchem2drugbank_file ))
biana_cnx = mysql.connector.connect(user=config.get('BIANA', 'user'),
password=config.get('BIANA', 'password'),
host=config.get('BIANA', 'host'),
database=config.get('BIANA', 'database'))
diana_drug.obtain_pubchem_to_drugbank(biana_cnx, config.get('BIANA', 'unification_protocol'), pubchem2drugbank_file)
biana_cnx.close()
original_crossings = set()
new_crossings = set()
pubchem_to_drugbank = cPickle.load(open(pubchem2drugbank_file))
comparison_file = os.path.join(toolbox_dir, 'DrugPairIndex_original.txt')
comparison_output_file = os.path.join(toolbox_dir, 'DrugPairIndex_drugbank.txt')
with open(comparison_file, 'r') as comparison_fd, open(comparison_output_file, 'w') as comparison_output_fd:
for line in comparison_fd:
fields = line.strip().split('\t')
pubchem1 = int(fields[0].split('CID')[1])
pubchem2 = int(fields[1].split('CID')[1])
original_crossings.add(frozenset([pubchem1, pubchem2]))
if pubchem1 in pubchem_to_drugbank and pubchem2 in pubchem_to_drugbank:
for db1 in pubchem_to_drugbank[pubchem1]:
for db2 in pubchem_to_drugbank[pubchem2]:
if db1 in drugs_considered_drugbank and db2 in drugs_considered_drugbank:
new_crossings.add(frozenset([db1, db2]))
comparison_output_fd.write('{}\t{}\n'.format( db1, db2 ))
print('In the initial comparison there were: {} crossings'.format(len(original_crossings)))
print('In the current comparison there are: {} crossings'.format(len(new_crossings)))
# End marker for time
end = time.time()
print('\n DIANA INFO:\tTIME OF EXECUTION: {:.3f} seconds or {:.3f} minutes.\n'.format(end - start, (end - start) / 60))
return
#######################
#######################
# SECONDARY FUNCTIONS #
#######################
#######################
def fileExist(file):
"""
Checks if a file exists AND is a file
"""
return os.path.exists(file) and os.path.isfile(file)
def check_file(file):
"""
Checks if a file exists and if not, raises FileNotFound exception
"""
if not fileExist(file):
raise FileNotFound(file)
class FileNotFound(Exception):
"""
Exception raised when a file is not found.
"""
def __init__(self, file):
self.file = file
def __str__(self):
return 'The file {} has not been found.\nTherefore, the comparison cannot be performed. Please, check that all the profiles have been correctly generated.\n'.format(self.file)
if __name__ == "__main__":
main()
|
from .LetterGrade import LetterGrade
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @FileName: WEWORK_ops.py
# @Software:
# @Author: Leven Xiang
# @Mail: xiangle0109@outlook.com
# @Date: 2021/5/18 16:55
from __future__ import absolute_import, unicode_literals
import os
from pwdselfservice import cache_storage
from utils.storage.cache import WeWorkCache
from utils.wework_api.abstract_api import *
APP_ENV = os.getenv('APP_ENV')
if APP_ENV == 'dev':
from conf.local_settings_dev import *
else:
from conf.local_settings import *
CORP_API_TYPE = {
'GET_ACCESS_TOKEN': ['/cgi-bin/gettoken', 'GET'],
'USER_CREATE': ['/cgi-bin/user/create?access_token=ACCESS_TOKEN', 'POST'],
'USER_GET': ['/cgi-bin/user/get?access_token=ACCESS_TOKEN', 'GET'],
'USER_UPDATE': ['/cgi-bin/user/update?access_token=ACCESS_TOKEN', 'POST'],
'USER_DELETE': ['/cgi-bin/user/delete?access_token=ACCESS_TOKEN', 'GET'],
'USER_BATCH_DELETE': ['/cgi-bin/user/batchdelete?access_token=ACCESS_TOKEN', 'POST'],
'USER_SIMPLE_LIST': ['/cgi-bin/user/simplelist?access_token=ACCESS_TOKEN', 'GET'],
'USER_LIST': ['/cgi-bin/user/list?access_token=ACCESS_TOKEN', 'GET'],
'USERID_TO_OPENID': ['/cgi-bin/user/convert_to_openid?access_token=ACCESS_TOKEN', 'POST'],
'OPENID_TO_USERID': ['/cgi-bin/user/convert_to_userid?access_token=ACCESS_TOKEN', 'POST'],
'USER_AUTH_SUCCESS': ['/cgi-bin/user/authsucc?access_token=ACCESS_TOKEN', 'GET'],
'DEPARTMENT_CREATE': ['/cgi-bin/department/create?access_token=ACCESS_TOKEN', 'POST'],
'DEPARTMENT_UPDATE': ['/cgi-bin/department/update?access_token=ACCESS_TOKEN', 'POST'],
'DEPARTMENT_DELETE': ['/cgi-bin/department/delete?access_token=ACCESS_TOKEN', 'GET'],
'DEPARTMENT_LIST': ['/cgi-bin/department/list?access_token=ACCESS_TOKEN', 'GET'],
'TAG_CREATE': ['/cgi-bin/tag/create?access_token=ACCESS_TOKEN', 'POST'],
'TAG_UPDATE': ['/cgi-bin/tag/update?access_token=ACCESS_TOKEN', 'POST'],
'TAG_DELETE': ['/cgi-bin/tag/delete?access_token=ACCESS_TOKEN', 'GET'],
'TAG_GET_USER': ['/cgi-bin/tag/get?access_token=ACCESS_TOKEN', 'GET'],
'TAG_ADD_USER': ['/cgi-bin/tag/addtagusers?access_token=ACCESS_TOKEN', 'POST'],
'TAG_DELETE_USER': ['/cgi-bin/tag/deltagusers?access_token=ACCESS_TOKEN', 'POST'],
'TAG_GET_LIST': ['/cgi-bin/tag/list?access_token=ACCESS_TOKEN', 'GET'],
'BATCH_JOB_GET_RESULT': ['/cgi-bin/batch/getresult?access_token=ACCESS_TOKEN', 'GET'],
'BATCH_INVITE': ['/cgi-bin/batch/invite?access_token=ACCESS_TOKEN', 'POST'],
'AGENT_GET': ['/cgi-bin/agent/get?access_token=ACCESS_TOKEN', 'GET'],
'AGENT_SET': ['/cgi-bin/agent/set?access_token=ACCESS_TOKEN', 'POST'],
'AGENT_GET_LIST': ['/cgi-bin/agent/list?access_token=ACCESS_TOKEN', 'GET'],
'MENU_CREATE': ['/cgi-bin/menu/create?access_token=ACCESS_TOKEN', 'POST'],
'MENU_GET': ['/cgi-bin/menu/get?access_token=ACCESS_TOKEN', 'GET'],
'MENU_DELETE': ['/cgi-bin/menu/delete?access_token=ACCESS_TOKEN', 'GET'],
'MESSAGE_SEND': ['/cgi-bin/message/send?access_token=ACCESS_TOKEN', 'POST'],
'MESSAGE_REVOKE': ['/cgi-bin/message/revoke?access_token=ACCESS_TOKEN', 'POST'],
'MEDIA_GET': ['/cgi-bin/media/get?access_token=ACCESS_TOKEN', 'GET'],
'GET_USER_INFO_BY_CODE': ['/cgi-bin/user/getuserinfo?access_token=ACCESS_TOKEN', 'GET'],
'GET_USER_DETAIL': ['/cgi-bin/user/getuserdetail?access_token=ACCESS_TOKEN', 'POST'],
'GET_TICKET': ['/cgi-bin/ticket/get?access_token=ACCESS_TOKEN', 'GET'],
'GET_JSAPI_TICKET': ['/cgi-bin/get_jsapi_ticket?access_token=ACCESS_TOKEN', 'GET'],
'GET_CHECKIN_OPTION': ['/cgi-bin/checkin/getcheckinoption?access_token=ACCESS_TOKEN', 'POST'],
'GET_CHECKIN_DATA': ['/cgi-bin/checkin/getcheckindata?access_token=ACCESS_TOKEN', 'POST'],
'GET_APPROVAL_DATA': ['/cgi-bin/corp/getapprovaldata?access_token=ACCESS_TOKEN', 'POST'],
'GET_INVOICE_INFO': ['/cgi-bin/card/invoice/reimburse/getinvoiceinfo?access_token=ACCESS_TOKEN', 'POST'],
'UPDATE_INVOICE_STATUS':
['/cgi-bin/card/invoice/reimburse/updateinvoicestatus?access_token=ACCESS_TOKEN', 'POST'],
'BATCH_UPDATE_INVOICE_STATUS':
['/cgi-bin/card/invoice/reimburse/updatestatusbatch?access_token=ACCESS_TOKEN', 'POST'],
'BATCH_GET_INVOICE_INFO':
['/cgi-bin/card/invoice/reimburse/getinvoiceinfobatch?access_token=ACCESS_TOKEN', 'POST'],
'APP_CHAT_CREATE': ['/cgi-bin/appchat/create?access_token=ACCESS_TOKEN', 'POST'],
'APP_CHAT_GET': ['/cgi-bin/appchat/get?access_token=ACCESS_TOKEN', 'GET'],
'APP_CHAT_UPDATE': ['/cgi-bin/appchat/update?access_token=ACCESS_TOKEN', 'POST'],
'APP_CHAT_SEND': ['/cgi-bin/appchat/send?access_token=ACCESS_TOKEN', 'POST'],
'MINIPROGRAM_CODE_TO_SESSION_KEY': ['/cgi-bin/miniprogram/jscode2session?access_token=ACCESS_TOKEN', 'GET'],
}
class WeWorkOps(AbstractApi):
def __init__(self, corp_id=WEWORK_CORP_ID, agent_id=WEWORK_AGENT_ID, agent_secret=WEWORK_AGNET_SECRET, storage=cache_storage, prefix='wework'):
super().__init__()
self.corp_id = corp_id
self.agent_id = agent_id
self.agent_secret = agent_secret
self.storage = storage
self.cache = WeWorkCache(self.storage, "%s:%s" % (prefix, "corp_id:%s" % self.corp_id))
def access_token(self):
access_token = self.cache.access_token.get()
if access_token is None:
ret = self.get_access_token()
access_token = ret['access_token']
expires_in = ret.get('expires_in', 7200)
self.cache.access_token.set(value=access_token, ttl=expires_in)
return access_token
def get_access_token(self):
return self.http_call(
CORP_API_TYPE['GET_ACCESS_TOKEN'],
{
'corpid': self.corp_id,
'corpsecret': self.agent_secret,
})
def get_user_id_by_code(self, code):
try:
return True, self.http_call(
CORP_API_TYPE['GET_USER_INFO_BY_CODE'],
{
'code': code,
}).get('UserId')
except ApiException as e:
return False, "get_user_id_by_code: {}-{}".format(e.errCode, e.errMsg)
except Exception as e:
return False, "get_user_id_by_code: {}".format(e)
def get_user_detail_by_user_id(self, user_id):
try:
return True, self.http_call(
CORP_API_TYPE['USER_GET'],
{
'userid': user_id,
})
except ApiException as e:
return False, "get_user_detail_by_user_id: {}-{}".format(e.errCode, e.errMsg)
except Exception as e:
return False, "get_user_detail_by_user_id: {}".format(e)
if __name__ == '__main__':
wx = WeWorkOps()
print(wx.get_user_detail_by_user_id('XiangLe'))
|
# flake8: noqa
import supriya.synthdefs
import supriya.ugens
def test_SynthDefCompiler_rngs_01():
sc_synthdef = supriya.synthdefs.SuperColliderSynthDef(
"seedednoise",
r"""
arg rand_id=0, seed=0;
RandID.ir(rand_id);
RandSeed.ir(1, seed);
Out.ar(0, WhiteNoise.ar());
""",
)
sc_compiled_synthdef = sc_synthdef.compile()
with supriya.synthdefs.SynthDefBuilder(rand_id=0, seed=0) as builder:
supriya.ugens.RandID.ir(rand_id=builder["rand_id"])
supriya.ugens.RandSeed.ir(seed=builder["seed"], trigger=1)
source = supriya.ugens.WhiteNoise.ar()
supriya.ugens.Out.ar(bus=0, source=source)
py_synthdef = builder.build("seedednoise")
py_compiled_synthdef = py_synthdef.compile()
# fmt: off
test_compiled_synthdef = bytes(
b'SCgf'
b'\x00\x00\x00\x02'
b'\x00\x01'
b'\x0bseedednoise'
b'\x00\x00\x00\x02'
b'?\x80\x00\x00'
b'\x00\x00\x00\x00'
b'\x00\x00\x00\x02'
b'\x00\x00\x00\x00'
b'\x00\x00\x00\x00'
b'\x00\x00\x00\x02'
b'\x07rand_id'
b'\x00\x00\x00\x00'
b'\x04seed'
b'\x00\x00\x00\x01'
b'\x00\x00\x00\x05'
b'\x07Control'
b'\x01'
b'\x00\x00\x00\x00'
b'\x00\x00\x00\x02'
b'\x00\x00'
b'\x01'
b'\x01'
b'\x06RandID'
b'\x00'
b'\x00\x00\x00\x01'
b'\x00\x00\x00\x01'
b'\x00\x00'
b'\x00\x00\x00\x00'
b'\x00\x00\x00\x00'
b'\x00'
b'\x08RandSeed'
b'\x00'
b'\x00\x00\x00\x02'
b'\x00\x00\x00\x01'
b'\x00\x00'
b'\xff\xff\xff\xff'
b'\x00\x00\x00\x00'
b'\x00\x00\x00\x00'
b'\x00\x00\x00\x01'
b'\x00'
b'\nWhiteNoise'
b'\x02'
b'\x00\x00\x00\x00'
b'\x00\x00\x00\x01'
b'\x00\x00'
b'\x02'
b'\x03Out'
b'\x02'
b'\x00\x00\x00\x02'
b'\x00\x00\x00\x00'
b'\x00\x00'
b'\xff\xff\xff\xff'
b'\x00\x00\x00\x01'
b'\x00\x00\x00\x03'
b'\x00\x00\x00\x00'
b'\x00\x00'
)
# fmt: on
assert sc_compiled_synthdef == test_compiled_synthdef
assert py_compiled_synthdef == test_compiled_synthdef
|
# -*- coding: utf-8 -*-
"""Make database connection and set Table objects"""
from sqlalchemy import create_engine, Table, MetaData
import config
metadata = MetaData()
def engine():
engine = create_engine('mysql+pymysql://{username}:{password}@{host}/{database}'.format(
username=config.MySQL_DB['username'],
password=config.MySQL_DB['password'],
host=config.MySQL_DB['host'],
database=config.MySQL_DB['database']
), echo=False)
return engine
def run_table(engine):
run = Table('Run', metadata, autoload=True, autoload_with=engine)
return run
def run_per_lane_table(engine):
run_per_lane = Table('Run_per_Lane', metadata, autoload=True, autoload_with=engine)
return run_per_lane
def sample_sequencer_table(engine):
sample_sequencer = Table('Sample_Sequencer', metadata, autoload=True, autoload_with=engine)
return sample_sequencer
def sample_processed_table(engine):
sample_processed = Table('Sample_Processed', metadata, autoload=True, autoload_with=engine)
return sample_processed
def bait_set_table(engine):
bait_set = Table('Bait_Set', metadata, autoload=True, autoload_with=engine)
return bait_set
|
import subprocess
def subprocess_cmd(command, DEBUG=False):
"""execute a bash command and return output"""
if DEBUG:
print "command: " + command
process = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
proc_stdout = process.communicate()[0].strip()
if DEBUG:
print "out: " + proc_stdout
return(proc_stdout)
|
class CPT:
def __init__(self):
self.name = 'CPT'
def __str__(self):
return self.name
|
#! /usr/bin/env python
import os
import numpy as np
import scipy
from astropy.io import fits
from astropy import units as u
import sys
import string
import nrm_analysis
from nrm_analysis.fringefitting.LG_Model import NRM_Model
from nrm_analysis.misctools import utils
from nrm_analysis import nrm_core, InstrumentData
from nrm_analysis import find_affine2d_parameters as FAP
from pathlib import Path
from nrm_analysis.misctools.utils import Affine2d
np.set_printoptions(precision=4, linewidth=160)
home = os.path.expanduser('~')
fitsimdir = home+"/data/implaneia/niriss_verification/test_all_residuals/"
if not os.path.exists(fitsimdir):
os.makedirs(fitsimdir)
mirexample = os.path.expanduser('~') + \
"/ImPlaneIA/example_data/example_niriss/" + \
"jw00793001001_01101_00001_nis_cal.fits"
fov = 79
filt="F430M"
lamc = 4.3e-6
oversample=11
bandpass = np.array([(1.0, lamc),])
pixelscale_as=0.0656
arcsec2rad = u.arcsec.to(u.rad)
PIXELSCALE_r = pixelscale_as * arcsec2rad
holeshape='hex'
datafiles = (fitsimdir+'all_effects_data_mir.fits',)
np.random.seed(100)
def default_printoptions():
np.set_printoptions(edgeitems=3, infstr='inf', linewidth=75, nanstr='nan', precision=8,
suppress=False, threshold=1000, formatter=None)
def cp_var(nh, cps):
""" True standard deviation given non-independent closure phases """
return ((cps - cps.mean())**2).sum() / scipy.special.comb(nh-1,2)
def ca_var(nh, cas):
""" True standard deviation given non-independent closure amplitudes """
return ((cas - cas.mean())**2).sum() / scipy.special.comb(nh-3, 2)
def examine_residuals(ff, trim=36):
""" input: FringeFitter instance after fringes are fit """
print("\n\texamine_residuals: FIT QUALITY:")
print(" Standard deviation & variance take into acount reduced DOF of all CP's and CAs")
print(" Closure phase mean {:+.4f} std dev {:.2e} var {:.2e}".format(ff.nrm.redundant_cps.mean(),
np.sqrt(cp_var(ff.nrm.N,ff.nrm.redundant_cps)),
cp_var(ff.nrm.N, ff.nrm.redundant_cps)))
print(" Closure ampl mean {:+.4f} std dev {:.2e} var {:.2e}".format(ff.nrm.redundant_cas.mean(),
np.sqrt(cp_var(ff.nrm.N,ff.nrm.redundant_cas)),
cp_var(ff.nrm.N, ff.nrm.redundant_cas)))
np.set_printoptions(precision=3, formatter={'float': lambda x: '{:+.1e}'.format(x)}, linewidth=80)
print(" Normalized residuals trimmed by {:d} pixels from each edge".format(trim))
print((ff.nrm.residual/ff.datapeak)[trim:-trim,trim:-trim])
print(" Normalized residuals max and min: {:.2e}, {:.2e}".format( ff.nrm.residual.max() / ff.datapeak,
ff.nrm.residual.min() / ff.datapeak))
default_printoptions()
def analyze_data(fitsfn, observables_dir="", affine2d=None,
psf_offset_find_rotation = (0.0,0.0),
psf_offset_ff = None,
rotsearch_d=None,
set_pistons=None):
"""
returns: affine2d (measured or input),
psf_offset_find_rotation (input),
psf_offset_ff (input or found),
fringe pistons/r (found)
"""
print("analyze_data: input file", fitsfn)
data = fits.getdata(fitsfn)
dim = data.shape[1]
mx, my, sx,sy, xo,yo, = (1.0,1.0, 0.0,0.0, 0.0,0.0)
if affine2d is None:
print(" analyze_data: Finding affine2d...")
affine2d = FAP.find_rotation(data[0,:,:], psf_offset_find_rotation,
rotsearch_d, mx, my, sx, sy, xo,yo,
PIXELSCALE_r, dim, bandpass, oversample, holeshape, outdir=fitsimdir)
print("analyze_data: Using measured affine2d...", affine2d.name)
else:
print("analyze_data: Using incoming affine2d ", affine2d.name)
niriss = InstrumentData.NIRISS(filt, bandpass=bandpass, affine2d=affine2d)
ff_t = nrm_core.FringeFitter(niriss, psf_offset_ff=psf_offset_ff, datadir=fitsimdir, savedir=fitsimdir+observables_dir,
oversample=oversample, interactive=False)
ff_t.fit_fringes(fitsfn)
examine_residuals(ff_t)
np.set_printoptions(formatter={'float': lambda x: '{:+.2e}'.format(x)}, linewidth=80)
print("analyze_data: fringepistons/rad", ff_t.nrm.fringepistons)
default_printoptions()
return affine2d, psf_offset_find_rotation, ff_t.nrm.psf_offset, ff_t.nrm.fringepistons
def simulate_data(affine2d=None):
print(" simulate_data: ")
jw = NRM_Model(mask='jwst', holeshape="hex")
jw.simulate(fov=fov, bandpass=bandpass, over=oversample, psf_offset=psf_offset_det)
fits.PrimaryHDU(data=jw.psf).writeto(fitsimdir+"all_effects_data.fits",overwrite=True)
#**********Convert simulated data to mirage format.*******
utils.amisim2mirage(fitsimdir, ("all_effects_data",), mirexample, filt)
if __name__ == "__main__":
identity = utils.Affine2d(rotradccw=utils.avoidhexsingularity(0.0),
name="affrot_{0:+.3f}deg".format(0.0))
no_pistons = np.zeros((7,)) * 1.0
_psf_offset_det = (0.48, 0.0)
no_psf_offset = (0.0, 0.0)
rot = 2.0
rot = utils.avoidhexsingularity(rot)
aff = utils.Affine2d(rotradccw=np.pi*rot/180.0, name="affrot_{0:+.3f}d".format(rot))
_rotsearch_d = np.arange(-3, 3.1, 1)
#std dev 1, 7 holes, diffraction-limited @ 2um we're at 4um
_pistons_w = 0.5 * np.random.normal(0,1.0, 7) / 14.0
simulate_data(affine2d=aff, psf_offset_det=_psf_offset_det, pistons_w=_pistons_w)
"""
Implaneia uses the center of the brightest pixel as the coordinate system to calculate psf offsets for fringefitting.
With non-zero pistons and slight rotation, the offsets used to generate the verificaton data have “true center” that is not inside the brightest pixel.
Hence a psf_offset (-0.52, 0.0) in implaneia’s local centroid-finding algorithm places the center in the pixel to the left of the brightest pixel.
which is the correct result.
Tests below are specific to analyzing data simulated with rot=2.0 deg, psf offsets (0.48, 0.0) and
pistons in waves = pistons_w - pistons_w.mean() where pistons_w = 0.5 * np.random.normal(0,1.0, 7) / 14.0
"""
args_odd_fov = [[None, (0.0,0.0), None, _rotsearch_d],
[None, (0.0,0.0), (-0.5199,0.0), _rotsearch_d],
[None, (0.48,0.0), None, _rotsearch_d],
[aff,(0.0,0.0),None, None],
[None, (0.48,0.0), (-0.5199,0.0), _rotsearch_d],
[aff, (0.48,0.0), (-0.5199,0.0), _rotsearch_d],
]
args_even_fov= [[None, (0.0,0.0), None, _rotsearch_d],
[None, (0.0,0.0), (-0.01983, -0.4998), _rotsearch_d],
[None, (0.48,0.0), None, _rotsearch_d],
[aff,(0.0,0.0),None, None],
[None, (0.48,0.0), (-0.01983, -0.4998), _rotsearch_d],
[aff, (0.48,0.0), (-0.01983, -0.4998), _rotsearch_d],
]
np.set_printoptions(formatter={'float': lambda x: '{:+.2e}'.format(x)}, linewidth=80)
for df in datafiles:
print("__main__: ")
np.set_printoptions(formatter={'float': lambda x: '{:+.2e}'.format(x)}, linewidth=80)
print(" analyzing", df)
data = fits.getdata(df)
if (data.shape[1] % 2) == 0:
args = args_even_fov
else:
args = args_odd_fov
#Simple case with one set of parameters.
#
#_aff, _psf_offset_r, _psf_offset_ff, fringepistons = \
# analyze_data(df, affine2d=None,
# psf_offset_find_rotation = (0.0,0.0),
# psf_offset_ff = None,
# rotsearch_d=_rotsearch_d)
#Analyze data with multiple sets of parameters
for iarg,arg in enumerate(args):
sys.stdout = open("driv_implane_out%d.txt"%iarg,'w')
print("\nanalyze_data arguments:", "set", iarg, ":", end=' ')
if arg[0] is not None: print("Affine2d", arg[0].name, end=' ')
else: print("Affine2d", None, end=' ')
print("psf_offset_find_rotation", arg[1], "psf_offset_ff", arg[2], "rotsearch_d",arg[3])
_aff, _psf_offset_r, _psf_offset_ff, fringepistons = \
analyze_data(df, "observables%d/"%iarg, affine2d=arg[0],
psf_offset_find_rotation = arg[1],
psf_offset_ff = arg[2],
rotsearch_d=arg[3])
print(" rotation search deg ",_rotsearch_d)
np.set_printoptions(formatter={'float': lambda x: '{:+.2e}'.format(x)}, linewidth=80)
print(" affine rot used ", _aff.name, )
np.set_printoptions(formatter={'float': lambda x: '{:+.3f}'.format(x)}, linewidth=80)
print(" input psf offsets ", np.array(_psf_offset_det))
print(" psf offset used to find rotation", np.array(_psf_offset_r))
print(" psf offset used by fringefitter ", np.array(_psf_offset_ff))
utils.compare_pistons(2*np.pi*(_pistons_w- _pistons_w.mean()), fringepistons)
print("implaneia output in: ", fitsimdir, "\n")
sys.stdout.close()
|
import os
from urllib.parse import urlparse
from http.server import BaseHTTPRequestHandler
from .constants import *
from .routes import routes
class Server(BaseHTTPRequestHandler):
def do_GET(self):
for route, filename in routes.items():
if urlparse(self.path).path.lstrip("/") == route.lstrip("/"):
self.path = os.path.join(views_path, filename)
try:
file_to_open = open(self.path).read()
self.send_response(200)
except:
file_to_open = "File not found!"
self.send_response(404)
self.end_headers()
self.wfile.write(bytes(file_to_open, "utf-8"))
|
from .models import Location, Pokemon, Area, PokemonUser, Region
from django.contrib.auth.models import User
from rest_framework import serializers
from rest_framework.validators import UniqueValidator
from django.contrib.auth.password_validation import validate_password
class PokemonSerializer(serializers.ModelSerializer):
class Meta:
model = Pokemon
fields = (
'id',
'abilities',
'capture_rate',
'color',
'flavor_text',
'height',
'moves',
'name',
'sprites',
'stats',
'types',
'weight',
)
class AreaDetailsSerializer(serializers.ModelSerializer):
class Meta:
model = Area
fields = (
'id',
'location',
'name',
'pokemon_count',
)
class AreaListSerializer(serializers.ModelSerializer):
class Meta:
model = Area
fields = (
'id',
'location',
'name',
'pokemons',
)
class LocationDetailsSerializer(serializers.ModelSerializer):
areas = AreaDetailsSerializer(many=True, read_only=True)
class Meta:
model = Location
fields = (
'id',
'name',
'areas',
)
class LocationListSerializer(serializers.ModelSerializer):
class Meta:
model = Location
fields = (
'id',
'name',
)
class RegionDetailsSerializer(serializers.ModelSerializer):
locations = LocationListSerializer(many=True, read_only=True)
class Meta:
model = Region
fields = (
'id',
'name',
'locations'
)
class RegionListSerializer(serializers.ModelSerializer):
class Meta:
model = Region
fields = (
'id',
'name',
)
class PokemonUserListSerializer(serializers.ModelSerializer):
specie = PokemonSerializer(read_only=True)
class Meta:
model = PokemonUser
fields = (
'id',
'nick_name',
'is_party_member',
'specie',
)
class PokemonUserDetailsSerializer(serializers.ModelSerializer):
class Meta:
model = PokemonUser
fields = (
'id',
'nick_name',
'is_party_member',
'specie',
)
class RegisterSerializer(serializers.ModelSerializer):
email = serializers.EmailField(
required=True,
validators=[UniqueValidator(queryset=User.objects.all())]
)
password = serializers.CharField(write_only=True, required=True, validators=[validate_password])
password2 = serializers.CharField(write_only=True, required=True)
class Meta:
model = User
fields = ('username', 'password', 'password2', 'email', 'first_name', 'last_name')
extra_kwargs = {
'first_name': {'required': True},
'last_name': {'required': True},
'username': {'required': True}
}
def validate(self, attrs):
if attrs['password'] != attrs['password2']:
raise serializers.ValidationError({"password": "Password fields didn't match."})
return attrs
def create(self, validated_data):
user = User.objects.create(
username=validated_data['username'],
email=validated_data['email'],
first_name=validated_data['first_name'],
last_name=validated_data['last_name']
)
user.set_password(validated_data['password'])
user.save()
return user
|
import numpy as np
import os
import tensorflow as tf
from matplotlib import pyplot as plt
plt.ioff()
from keras import backend as K
from keras.models import load_model, Model
from keras.layers import Input, Dense, Flatten
from keras.constraints import UnitNorm
from keras.applications import VGG16
from aux_masks import gen_mask
import hdf5storage
from tqdm import tqdm
from model.network import AdaptiveInstanceNormalization
# GPU allocation
K.clear_session()
tf.reset_default_graph()
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID";
os.environ["CUDA_VISIBLE_DEVICES"] = "0";
# Tensorflow memory allocation
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = 1.
session = tf.Session(config=config)
K.tensorflow_backend.set_session(session)
K.set_learning_phase(False)
# Directories of pretrained models/data
model_dir = 'trained_models/lord-aviv/lord/model/'
data_loc = 'trained_models/lord/data/celeba_test.npz'
cbk_loc = 'trained_codebooks/aviv/one_sample_fixed.mat'
# Load data
data = np.load(data_loc)
x_d_test = np.copy(data['imgs'] / 255.)
y_d_test = np.copy(data['classes'])
# Rearrange y_test as ordinal classes (since absolute value of class doesn't matter)
_, y_d_test_ordinal = np.unique(y_d_test, return_inverse=True)
# Load model by parts
content_encoder = load_model(os.path.join(model_dir, 'content_encoder.h5py'))
class_encoder = load_model(os.path.join(model_dir, 'class_encoder.h5py'))
class_modulation = load_model(os.path.join(model_dir, 'class_modulation.h5py'))
generator = load_model(os.path.join(model_dir, 'generator.h5py'), custom_objects={
'AdaptiveInstanceNormalization': AdaptiveInstanceNormalization})
# Load modulation codebook
contents = hdf5storage.loadmat(cbk_loc)
person_mod_codebook = contents['frozen_class_mod']
# Instantiate and load VGGFace with VGG16 core
latent_dim = 128
input_img = Input(shape=(64, 64, 3))
core_model = VGG16(input_shape=(64, 64, 3), include_top=False)
encoded = core_model(input_img)
# Feature layer
encoded = Flatten()(encoded)
encoded = Dense(latent_dim, activation='linear', kernel_constraint=UnitNorm())(encoded)
# Create shared model
model = Model(input_img, encoded)
# Load weights
core_folder = 'trained_models/proposed'
core_weights = 'steps16_lr10.0_last'
target_weights = '%s/%s.h5' % (core_folder, core_weights)
model.load_weights(target_weights)
# Attack parameters
batch_size = 64
num_thresholds = 200 # For AUC
learning_rate = 1e-2
num_iterations = 1000
# Repetition parameters
random_start = True
num_restarts = 5
# Instant overwrite
if not random_start:
num_restarts = 1
# Mask type/size/style
mask_size = 10
mask_style = 'eyeglasses'
if mask_style == 'corner_patch' or mask_style == 'frame':
mask_name = '%s%d' % (mask_style, mask_size)
else:
mask_name = mask_style
# Does the defender use mirroring?
def_mirroring = True
# Does the defender pick the anchor?
def_anchor_pick = True
# Generate the mask
mask_np = gen_mask(batch_size, mask_style, mask_size)
# Tensors
if random_start:
delta = tf.Variable(tf.random.uniform(shape=(batch_size, 64, 64, 3),
minval=-0.5, maxval=0.5, dtype=tf.float32),
dtype=np.float32)
else:
delta = tf.Variable(np.zeros((batch_size, 64, 64, 3)), dtype=np.float32)
# Input tensors
x_input = tf.placeholder(dtype=np.float32, shape=(batch_size, 64, 64, 3))
x_target_features = tf.placeholder(dtype=np.float32, shape=(batch_size, latent_dim))
x_true_features = tf.placeholder(dtype=np.float32, shape=(batch_size, latent_dim))
# Tensor mask
loss_mask = tf.constant(mask_np, dtype=np.float32)
# Add adversarial noise
x_adv = tf.tanh(x_input + tf.multiply(loss_mask, delta))/2 + 1/2
# Mirror image
x_adv_mirror = tf.image.flip_left_right(x_adv)
# Get features
adv_features = model(x_adv)
mirror_features = model(x_adv_mirror)
# Feature loss
if def_mirroring:
feature_loss = (tf.reduce_sum(tf.square(adv_features - x_target_features), axis=-1) +\
tf.reduce_sum(tf.square(mirror_features - x_target_features), axis=-1)) / 2
# True feature loss
true_feature_loss = (tf.reduce_sum(tf.square(adv_features - x_true_features), axis=-1) +\
tf.reduce_sum(tf.square(mirror_features - x_true_features), axis=-1)) / 2
else:
feature_loss = tf.reduce_sum(tf.square(adv_features - x_target_features), axis=-1)
# True feature loss
true_feature_loss = tf.reduce_sum(tf.square(adv_features - x_true_features), axis=-1)
# Deviation loss
dev_loss = tf.reduce_sum(tf.square(tf.tanh(x_input)/2+1/2 - x_adv), axis=(1, 2, 3))
# Merge into single loss
target_loss = tf.reduce_sum(feature_loss)
# Adam optimizer
start_vars = set(x.name for x in tf.global_variables())
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
trainer = optimizer.minimize(target_loss, var_list=[delta])
end_vars = tf.global_variables()
new_vars = [x for x in end_vars if x.name not in start_vars]
init = tf.variables_initializer(var_list=[delta]+new_vars)
# Create result directory
meta_folder = 'paper_attacks_indirect_%s' % (mask_name)
submeta_folder = 'mirror%d_anchor%d_random%d' % (def_mirroring, def_anchor_pick, random_start)
result_dir = '%s/%s/%s/%s' % (meta_folder, submeta_folder, core_folder, core_weights)
if not os.path.exists(result_dir):
os.makedirs(result_dir)
# Pick a target
np.random.seed(2020)
num_persons = 30
target_person_list = np.random.choice(np.max(y_d_test_ordinal)+1, replace=False, size=num_persons)
for target_person in target_person_list:
# Wipe variables
session.run(init)
local_target_mean = []
# Seed results
np.random.seed(target_person)
# Split person/others
x_target_person = x_d_test[y_d_test_ordinal == target_person]
x_others_person = x_d_test[y_d_test_ordinal != target_person]
# Number of images of the target person
num_samples_target = len(x_target_person)
# If there are too few images of the target, skip them
if num_samples_target < 10:
print('Skipping target %d, too few samples' % target_person)
continue
# Does the defender pick their anchor?
if def_anchor_pick:
# Pick each sample as the template - this exactly emulates the defender
for sample_idx in tqdm(range(num_samples_target)):
# Compare all images with the template
x_target_anchor = x_target_person[sample_idx]
x_target_pair = x_target_person[np.logical_not(np.isin(np.arange(num_samples_target), sample_idx))]
# Get their features
x_target_anchor_features = model.predict(x_target_anchor[None, :])
x_target_pair_features = model.predict(x_target_pair)
# Pairwise distances
target_feature_loss = np.sum(np.square(x_target_anchor_features - x_target_pair_features), axis=-1)
# Compute and store average distance
target_feature_mean = np.mean(target_feature_loss)
# Store separately
local_target_mean.append(target_feature_mean)
# Once we went through all samples, pick the best anchor
anchor_idx = np.argmin(local_target_mean)
else:
# Pick the first (equivalent to a random) sample to use as anchor
anchor_idx = 0
# Create a local directory
local_dir = result_dir + '/target%d_sample%d' % (target_person, anchor_idx)
if not os.path.exists(local_dir):
os.mkdir(local_dir)
# Use exactly the template the defender uses
x_target_attack = x_target_person[anchor_idx]
# However, reconstruct them with the autoencoder
x_target_content = content_encoder.predict(x_target_attack[None, :])
x_target_gen = generator.predict([x_target_content, person_mod_codebook[target_person][None, :]])
# And use those features as a proxy
x_target_gen_features = model.predict(x_target_gen)
# Distance is still measured to real features
x_target_real_features = model.predict(x_target_attack[None, :])
# Fetch a batch of images and a single target image
others_idx = np.random.choice(x_others_person.shape[0], size=batch_size, replace=False)
# Construct feed dictionary
feed_dict = {x_input: np.arctanh((x_others_person[others_idx] - 1/2) * 2 * 0.999999),
x_target_features: np.repeat(x_target_gen_features, batch_size, axis=0),
x_true_features: np.repeat(x_target_real_features, batch_size, axis=0)}
# Store losses across (potential) multiple runs
feature_loss_matrix = np.zeros((num_restarts, batch_size))
true_loss_matrix = np.zeros((num_restarts, batch_size))
dev_loss_matrix = np.zeros((num_restarts, batch_size))
x_adv_matrix = np.zeros((num_restarts, batch_size, 64, 64, 3))
# For each repetition
for rep_idx in range(num_restarts):
# Wipe graph
session.run(init)
# Verbose
print('Running restart %d.' % rep_idx)
# Run attack
for step_idx in range(num_iterations):
_, feature_loss_np, true_feature_loss_np, \
dev_loss_np, x_adv_np = session.run([trainer, feature_loss,
true_feature_loss,
dev_loss, x_adv],
feed_dict=feed_dict)
# Verbose
if np.mod(step_idx+1, num_iterations//10) == 0:
print('Iteration %d, Gen. Feature MSE %.3f, True Feature MSE %.3f, Deviation MSE %.3f' % (step_idx,
np.mean(feature_loss_np), np.mean(true_feature_loss_np), np.mean(dev_loss_np)))
# Store in meta arrays
feature_loss_matrix[rep_idx] = feature_loss_np
true_loss_matrix[rep_idx] = true_feature_loss_np
x_adv_matrix[rep_idx] = x_adv_np
# After all repetitions, pick best solutions
winner_idx = np.argmin(true_loss_matrix, axis=0)
# Instantly overwrite
feature_loss_np = feature_loss_matrix[winner_idx, np.arange(batch_size)]
true_feature_loss_np = true_loss_matrix[winner_idx, np.arange(batch_size)]
dev_loss_np = dev_loss_matrix[winner_idx, np.arange(batch_size)]
x_adv_np = x_adv_matrix[winner_idx, np.arange(batch_size)]
# Plot
plt.figure()
plt.suptitle('Feature MSE = %.3f, Deviation MSE = %.3f' % (feature_loss_np[0], dev_loss_np[0]))
plt.subplot(2, 2, 1); plt.imshow(x_others_person[others_idx[0]]); plt.axis('off'); plt.title('Intruder Original')
plt.subplot(2, 2, 2); plt.imshow(x_adv_np[0]); plt.axis('off'); plt.title('Intruder Adversarial')
plt.tight_layout(rect=[0, 0., 1, 0.9])
plt.savefig(local_dir + '/attack.png', dpi=300)
# Save data
hdf5storage.savemat(local_dir + '/attack.mat', {'x_adv_np': x_adv_np,
'x_others_person': x_others_person[others_idx],
'adv_proxy_feature_loss': feature_loss_np,
'adv_true_feature_loss': true_feature_loss_np,
'dev_loss': dev_loss_np},
truncate_existing=True)
plt.close()
|
from django.core import management
from django.test import TestCase
from django.utils import six
class ModelValidationTest(TestCase):
def test_models_validate(self):
# All our models should validate properly
# Validation Tests:
# * choices= Iterable of Iterables
# See: https://code.djangoproject.com/ticket/20430
# * related_name='+' doesn't clash with another '+'
# See: https://code.djangoproject.com/ticket/21375
management.call_command("validate", stdout=six.StringIO())
|
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# Level | Level for Humans | Level Description
# -------|------------------|------------------------------------
# 0 | DEBUG | [Default] Print all messages
# 1 | INFO | Filter out INFO messages
# 2 | WARNING | Filter out INFO & WARNING messages
# 3 | ERROR | Filter out all messages
import pandas as pd
import numpy as np
import tensorflow as tf
# the tf settings below ALSO GOVERNS ALL OTHER LOGGERS!
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO) # or any {DEBUG, INFO, WARN, ERROR, FATAL}
import random
import pickle
import spacy
import sys
sys.path.append('..')
from pathlib import Path
import logging
import configparser
import ui_utils
import nlp_tools
# allow gpu memory growth
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)
config = configparser.ConfigParser()
config.read('./config_core_train.cfg')
OUTPUT_DIR = config.get('data', 'OUTPUT_DIR')
FILE_NOTE = config.get('data', 'FILE_NOTE')
APPLY_FILE = config.get('applying', 'APPLY_FILE')
CLF_THRESHOLD = float(config.get('applying', 'CLF_THRESHOLD'))
APPLY_BATCH_SIZE = int(config.get('applying', 'APPLY_BATCH_SIZE'))
OUTPUT_PATH = f"{OUTPUT_DIR}{FILE_NOTE}/"
SCORED_PATH = f"{OUTPUT_PATH}scored/"
Path(SCORED_PATH).mkdir(parents=True, exist_ok=True)
# initialize logger
root_logger = logging.getLogger()
formatter = logging.Formatter('%(asctime)s: %(levelname)s:: %(message)s')
# prints to file
logfile = f"{OUTPUT_PATH}apply_logs.log"
file_handler = logging.FileHandler(logfile, mode='a')
file_handler.setFormatter(formatter)
root_logger.addHandler(file_handler)
console_handler = logging.StreamHandler()
console_handler.setFormatter(formatter)
root_logger.addHandler(console_handler)
root_logger.setLevel(logging.INFO)
print = root_logger.info
print(">>> Using Tensorflow 2 with GPU for this scoring script:")
print(tf.__version__)
print(tf.config.experimental.list_physical_devices('GPU'))
gpus = tf.config.experimental.list_physical_devices('GPU')
print(">>> Loading apply data")
apply_df = pd.read_csv(APPLY_FILE)
print(">>> Loading pretrained tokenizer...")
with open(f'{OUTPUT_PATH}tokenizer.pickle', 'rb') as file:
tokenizer = pickle.load(file)
print('>>> Vocabulary size: {:,}'.format(len(tokenizer.word_index.items())))
print(">>> Loading trained models from model training output folder...")
taggers_dict = nlp_tools.obtain_tag_clfs(OUTPUT_PATH)
print(">>> Scoring the apply data with the trained models...")
labels_dict = nlp_tools.generate_labels_with_model_dict(
apply_df.Text, tokenizer,
taggers_dict, batch_size=APPLY_BATCH_SIZE, clf_threshold=CLF_THRESHOLD)
print(">>> Saving the results to output dir...")
apply_scored_df = pd.concat([apply_df, pd.DataFrame(labels_dict)], axis=1)
apply_scored_df.to_csv(f"{SCORED_PATH}scored_output.csv", index=False)
apply_scored_df.to_json(f"{SCORED_PATH}scored_output.json", orient='records', indent=2)
with open(f"{OUTPUT_PATH}apply_record.cfg", 'w') as file:
config.write(file)
|
# -*- coding: utf-8 -*-
"""
Created on Dec 20, 2011
@author: Tyranic-Moron
"""
from twisted.plugin import IPlugin
from pymoronbot.moduleinterface import IModule
from pymoronbot.modules.commandinterface import BotCommand, admin
from zope.interface import implementer
from pymoronbot.message import IRCMessage
from pymoronbot.response import IRCResponse, ResponseType
@implementer(IPlugin, IModule)
class Leave(BotCommand):
def triggers(self):
return ['leave', 'gtfo']
def help(self, query):
return "leave/gtfo - makes the bot leave the current channel"
@admin('Only my admins can tell me to leave')
def execute(self, message):
"""
@type message: IRCMessage
"""
if len(message.ParameterList) > 0:
return IRCResponse(ResponseType.Raw, 'PART {} :{}'.format(message.ReplyTo, message.Parameters), '')
else:
return IRCResponse(ResponseType.Raw, 'PART {} :toodles!'.format(message.ReplyTo), '')
leave = Leave()
|
#!/usr/bin/env python
# ROS python API
import rospy
import numpy as np
import set_point
import time
import scipy.linalg
import control
from tf.transformations import euler_from_quaternion
# 3D point & Stamped Pose msgs
from geometry_msgs.msg import Point, PoseStamped
from gazebo_msgs.msg import LinkStates
# import all mavros messages and services
from mavros_msgs.msg import *
from mavros_msgs.srv import *
class Controller:
def __init__(self):
#Drone state
self.state = State()
self.uav_pos = Point(0.0, 0.0, 3.0)
#self.load_pos = Point(0.0, 0.0, 0.0)
self.uav_vel = Point(0.0, 0.0, 0.0)
#self.load_vel = Point(0.0, 0.0, 0.0)
# Instantiate a attitude setpoints message
self.sp = AttitudeTarget()
# set the flag to use body_rates and thrust
#http://docs.ros.org/api/mavros_msgs/html/msg/AttitudeTarget.html
self.sp.type_mask = int('10000000', 2)
self.sp.body_rate.x = 0
self.sp.body_rate.y = 0
self.sp.body_rate.z = 0
self.sp.thrust = 0
# Desired rotational rate for UAV(rad/s)
self.omega = np.pi
# Instantiate a position setpoints message
self.pos_sp = PositionTarget()
self.pos_sp.type_mask = int('010111111000', 2)
self.pos_sp.coordinate_frame = 1
self.pos_sp.position.x = 0
self.pos_sp.position.y = 0
self.pos_sp.position.z = 3
# Step size for position update
self.STEP_SIZE = 2.0
# Fence. We will assume a square fence for now
self.FENCE_LIMIT = 5.0
# We will fly at a fixed altitude for now
self.ALT_SP = 3.0
# parameters of the system
self.l = 4.01 #length of the tether
self.r = 3.0 #radius of the UAV circle
#self.p0 = 0.8 #radius of the load circle
self.g = 9.80665 #gravity
def init_position(self):
self.pos_sp.position.x = 3.0
self.pos_sp.position.y = 0
self.pos_sp.position.z = 5.0
## Drone State callback
def stateCb(self, msg):
self.state = msg
def get_position(self,msg):
#self.load_pos.x = msg.pose[28].position.x - msg.pose[1].position.x
#self.load_pos.y = msg.pose[28].position.y - msg.pose[1].position.y
#self.load_pos.z = msg.pose[28].position.z
self.uav_pos.x = msg.pose[1].position.x
self.uav_pos.y = msg.pose[1].position.y
self.uav_pos.z = msg.pose[1].position.z
#self.load_vel.x = msg.twist[28].linear.x - msg.twist[1].linear.x
#self.load_vel.y = msg.twist[28].linear.y - msg.twist[1].linear.y
#self.load_vel.z = msg.twist[28].linear.z
self.uav_vel.x = msg.twist[1].linear.x
self.uav_vel.y = msg.twist[1].linear.y
self.uav_vel.z = msg.twist[1].linear.z
orientation_q = msg.pose[1].orientation
orientation_list = [orientation_q.x, orientation_q.y, orientation_q.z, orientation_q.w]
(roll, pitch, yaw) = euler_from_quaternion(orientation_list)
self.uav_att.x = roll
self.uav_att.y = pitch
self.uav_att.z = yaw
def cal_x(self):
uav_pos = np.array([self.uav_pos.x, self.uav_pos.y, self.uav_pos.z])
#load_pos = np.array([self.load_pos.x, self.load_pos.y])
uav_vel = np.array([self.uav_vel.x, self.uav_vel.y, self.uav_vel.z])
#load_vel = np.array([self.load_vel.x, self.load_vel.y])
rot_matrix1 = np.array([[np.cos(self.omega*self.t), np.sin(self.omega*self.t), 0], [-np.sin(self.omega*self.t), np.cos(self.omega*self.t), 0], [0, 0, 1]])
rot_matrix2 = np.array([[np.sin(self.omega*self.t), -np.cos(self.omega*self.t), 0], [np.cos(self.omega*self.t), np.sin(self.omega*self.t), 0], [0, 0, 0]])
inv_uav_pos = np.dot(rot_matrix1, uav_pos)
inv_uav_vel = np.dot(rot_matrix1, uav_vel) - self.omega*np.dot(rot_matrix2, uav_pos)
#inv_load_pos = np.dot(rot_matrix1[:2,:2], load_pos)
#inv_load_vel = np.dot(rot_matrix1[:2,:2], load_vel) - self.omega*np.dot(rot_matrix2[:2,:2], load_pos)
#self.lqr_x = np.matrix([inv_load_pos[0], inv_load_vel[0], inv_load_pos[1], inv_load_vel[1], inv_uav_pos[0],
#inv_uav_vel[0], inv_uav_pos[1], inv_uav_vel[1], inv_uav_pos[2], inv_uav_vel[2]]).T
self.lqr_x = np.matrix([inv_uav_pos[0], inv_uav_vel[0], inv_uav_pos[1], inv_uav_vel[1], inv_uav_pos[2], inv_uav_vel[2], self.]).T
#print("lqr_x",self.lqr_x)
def cal_AB(self):
#calc all the components
xi = np.sqrt(self.l**2-self.p0**2)
a0 = np.sqrt(self.g**2+self.omega**4*self.r**2)
mu0 = np.arctan(-self.omega**2*self.r/self.g)
p1 = xi**2/self.l**2*(self.omega**2+self.g/xi**3*(4*self.p0-self.l**2))
p2 = xi**2/self.l**2*2*self.omega
p3 = xi**2/self.l**2*(self.p0/xi*a0*np.sin(mu0)-a0*np.cos(mu0))
p4 = -xi**2/self.l**2*(self.p0/xi*np.cos(mu0)-np.sin(mu0))
q1 = self.omega**2-self.g/xi
q2 = -2*self.omega
q3 = a0
u1 = np.sin(mu0)
u2 = a0*np.cos(mu0)
u3 = 2*self.omega
u4 = self.omega**2
v1 = -a0
v2 = -2*self.omega
v3 = self.omega**2
w1 = np.cos(mu0)
w2 = -a0*np.sin(mu0)
self.A = np.array([[0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[p1,0, 0, p2,0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, q2,q1,0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, u4,0, 0,u3, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0,v2,v3, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
self.B = np.array([[0, 0, 0],
[p3,0,p4],
[0, 0, 0],
[0,q3, 0],
[0, 0, 0],
[u2,0,u1],
[0, 0, 0],
[0,v1, 0],
[0, 0, 0],
[w2,0,w1]])
#print("A",self.A)
#print("B",self.B)
def lqr(self):
Q = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
#Q = np.zeros([10,10])
R = 1000*np.eye(3)
K, S, E = control.lqr(self.A, self.B, Q, R)
#P = np.matrix(scipy.linalg.solve_discrete_are(self.A, self.B, Q, R))
self.u = -scipy.linalg.inv(R)*(self.B.T*(S*self.lqr_x))
#self.u = -np.matrix(scipy.linalg.inv(self.B.T*P*self.B+R)*(self.B.T*P*self.A))*self.lqr_x
#print("S",S)
#print("B",self.B)
#print("x",self.lqr_x)
#print("u",self.u)
def cal_omegas(self):
gamma = np.arcsin(np.sin(float(self.u[1]))*np.cos(self.omega*self.t) - np.sin(float(self.u[0]))*np.cos(float(self.u[1]))*np.sin(self.omega*self.t))
print("gamma",gamma)
beta = np.arccos(np.cos(float(self.u[0]))*np.cos(float(self.u[1]))/np.cos(gamma))
print("beta",beta)
gamma_dot = (self.r*self.omega**3*np.arccos(gamma)*np.cos(self.omega*self.t))/np.sqrt(self.g**2+self.omega**4*self.r**2)
beta_dot = self.r*self.omega**3*np.arccos(gamma)*(np.tan(beta)*np.tan(gamma)*np.cos(self.omega*self.t)+np.arccos(beta)*np.sin(self.omega*self.t))/np.sqrt(self.g**2+self.omega**4*self.r**2)
rot_matrix = np.array([[np.cos(beta)*np.cos(gamma),-np.sin(gamma),0],[np.cos(beta)*np.sin(gamma),np.cos(gamma),0],[-np.sin(beta),0,1]])
euler = np.array([gamma_dot,beta_dot,0])
self.omegas = np.dot(rot_matrix,euler)
def output(self,t):
self.t = t
self.cal_x()
self.cal_AB()
self.lqr()
self.cal_omegas()
self.sp.body_rate.x = self.omegas[0]
self.sp.body_rate.y = self.omegas[1]
self.sp.body_rate.z = self.omegas[2]
self.sp.thrust = float(self.u[2])/15.56
print("omegas",self.omegas[0],self.omegas[1],self.omegas[2],float(self.u[2]))
def main():
# initiate node
rospy.init_node('controller_node', anonymous=True)
# flight mode object
modes = set_point.fcuModes()
# flight Controller
cnt = Controller()
rate = rospy.Rate(20.0)
# Subscribe to drone state
rospy.Subscriber('mavros/state', State, cnt.stateCb)
# Subscribe to UAV and Payload Position
rospy.Subscriber('/gazebo/link_states', LinkStates, cnt.get_position)
# Attitude Setpoint publisher
at_pub = rospy.Publisher('mavros/setpoint_raw/attitude', AttitudeTarget, queue_size=1)
# Position Setpoint Publisher
pos_pub = rospy.Publisher('mavros/setpoint_raw/local', PositionTarget, queue_size=1)
# Make sure the drone is armed
while not cnt.state.armed:
modes.setArm()
rate.sleep()
print('armed')
# We need to send few setpoint messages, then activate OFFBOARD mode, to take effect
k = 0
while k<10:
pos_pub.publish(cnt.pos_sp)
rate.sleep()
k = k + 1
# activate OFFBOARD mode
modes.setOffboardMode()
# wait until the altitude of UAV is 5m
cnt.init_position()
while cnt.uav_pos.z<5 or cnt.uav_pos.x<1 or cnt.uav_pos.x>3:
print("uav_pos_z:",cnt.uav_pos.z)
print("load_pos_z:",cnt.load_pos.z)
print("uav_x",cnt.uav_pos.x)
print("uav_y",cnt.uav_pos.y)
print("load_x",cnt.load_pos.x)
print("load_y",cnt.load_pos.y)
print("----------------")
pos_pub.publish(cnt.pos_sp)
print("reached")
time.sleep(0.1)
# ROS main loop
t_start = time.time()
cnt.p0 = cnt.uav_pos.x - cnt.load_pos.x
while not rospy.is_shutdown():
t = time.time() - t_start
cnt.output(t)
at_pub.publish(cnt.sp)
print("----------------")
rate.sleep()
if __name__ == '__main__':
try:
main()
except rospy.ROSInterruptException:
pass
|
from __future__ import print_function
import tensorflow as tf
from sklearn.neighbors import NearestNeighbors
import numpy as np
from tqdm import tqdm
import time
def nn_dist(train_set, query_set, exclude_self):
# Flatten
train_set = np.reshape(train_set, [train_set.shape[0], -1])
query_set = np.reshape(query_set, [query_set.shape[0], -1])
# Create and query model
print('Creating model')
start = time.time()
model = NearestNeighbors(n_neighbors=2 if exclude_self else 1, algorithm='ball_tree').fit(train_set)
print('Took {} seconds'.format(time.time() - start))
print('Querying model')
start = time.time()
dists, _ = model.kneighbors(query_set)
print('Took {} seconds'.format(time.time() - start))
# If specified, exclude first nearest neighbor (duplicate) if it is nonzero
if exclude_self:
dists_excluded = []
for dist0, dist1 in dists:
if dist0 == 0:
dists_excluded.append(dist1)
else:
dists_excluded.append(dist0)
dists = dists_excluded
else:
dists = dists[:, 0]
return np.mean(dists), np.std(dists)
if __name__ == '__main__':
import argparse
import cPickle as pickle
import os
import random
import sys
parser = argparse.ArgumentParser()
parser.add_argument('--train_set', type=str,
help='Set to train KNN model')
parser.add_argument('--query_set', type=str,
help='Query set for KNN model')
args = parser.parse_args()
with open(args.train_set, 'rb') as f:
train_set = pickle.load(f)
with open(args.query_set, 'rb') as f:
query_set = pickle.load(f)
mean, std = nn_dist(train_set, query_set, args.train_set == args.query_set)
print('Similarity: {} +- {}'.format(mean, std))
|
"""
https://docs.microsoft.com/en-us/graph/api/resources/identityset
"""
from .base_data import BaseData
from .identity import Identity
import attr
@attr.s(auto_attribs=True)
class IdentitySet(BaseData):
"""
The IdentitySet resource is a keyed collection of identity resources.
It is used to represent a set of identities associated with various events for an item,
such as created by or last modified by.
"""
application: Identity = None
device: Identity = None
user: Identity = None
|
#!/usr/bin/env python
#
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#
"""This is a pre-commit hook that checks whether the contents of PO files
committed to the repository are encoded in UTF-8.
"""
import codecs
import string
import sys
import subprocess
from svn import core, fs, delta, repos
# Set to the path of the 'msgfmt' executable to use msgfmt to check
# the syntax of the po file
USE_MSGFMT = None
if USE_MSGFMT is not None:
class MsgFmtChecker:
def __init__(self):
self.pipe = subprocess.Popen([USE_MSGFMT, "-c", "-o", "/dev/null", "-"],
stdin=subprocess.PIPE,
close_fds=sys.platform != "win32")
self.io_error = 0
def write(self, data):
if self.io_error:
return
try:
self.pipe.stdin.write(data)
except IOError:
self.io_error = 1
def close(self):
try:
self.pipe.stdin.close()
except IOError:
self.io_error = 1
return self.pipe.wait() == 0 and not self.io_error
else:
class MsgFmtChecker:
def write(self, data):
pass
def close(self):
return 1
class ChangeReceiver(delta.Editor):
def __init__(self, txn_root, base_root, pool):
self.txn_root = txn_root
self.base_root = base_root
self.pool = pool
def add_file(self, path, parent_baton,
copyfrom_path, copyfrom_revision, file_pool):
return [0, path]
def open_file(self, path, parent_baton, base_revision, file_pool):
return [0, path]
def apply_textdelta(self, file_baton, base_checksum):
file_baton[0] = 1
# no handler
return None
def close_file(self, file_baton, text_checksum):
changed, path = file_baton
if len(path) < 3 or path[-3:] != '.po' or not changed:
# This is not a .po file, or it hasn't changed
return
try:
# Read the file contents through a validating UTF-8 decoder
subpool = core.svn_pool_create(self.pool)
checker = MsgFmtChecker()
try:
stream = core.Stream(fs.file_contents(self.txn_root, path, subpool))
reader = codecs.getreader('UTF-8')(stream, 'strict')
writer = codecs.getwriter('UTF-8')(checker, 'strict')
while True:
data = reader.read(core.SVN_STREAM_CHUNK_SIZE)
if not data:
break
writer.write(data)
if not checker.close():
sys.exit("PO format check failed for '" + path + "'")
except UnicodeError:
sys.exit("PO file is not in UTF-8: '" + path + "'")
finally:
core.svn_pool_destroy(subpool)
def check_po(pool, repos_path, txn):
def authz_cb(root, path, pool):
return 1
fs_ptr = repos.fs(repos.open(repos_path, pool))
txn_ptr = fs.open_txn(fs_ptr, txn, pool)
txn_root = fs.txn_root(txn_ptr, pool)
base_root = fs.revision_root(fs_ptr, fs.txn_base_revision(txn_ptr), pool)
editor = ChangeReceiver(txn_root, base_root, pool)
e_ptr, e_baton = delta.make_editor(editor, pool)
repos.dir_delta(base_root, '', '', txn_root, '',
e_ptr, e_baton, authz_cb, 0, 1, 0, 0, pool)
if __name__ == '__main__':
assert len(sys.argv) == 3
core.run_app(check_po, sys.argv[1], sys.argv[2])
|
__version__ = '1.0.0b11'
from .performance_list import PerformanceList
from .entity import Entity
from .meet import Meet
from .race import Race
from .runner import Runner
from .horse import Horse
from .jockey import Jockey
from .trainer import Trainer
from .performance import Performance
from .provider import Provider
|
# Copyright European Organization for Nuclear Research (CERN)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Vincent Garonne, <vincent.garonne@cern.ch> , 2012
# - Cedric Serfon, <cedric.serfon@cern.ch>, 2013
from nose.tools import assert_true, assert_false
from rucio.api.permission import has_permission
from rucio.core.scope import add_scope
from rucio.tests.common import scope_name_generator
class TestPermissionCoreApi():
def setup(self):
self.usr = 'jdoe'
def tearDown(self):
pass
def test_permission_add_did(self):
""" PERMISSION(CORE): Check permission to add a did"""
scope = scope_name_generator()
add_scope(scope=scope, account='root')
assert_true(has_permission(issuer='panda', action='add_did', kwargs={'scope': scope}))
assert_false(has_permission(issuer='spock', action='add_did', kwargs={'scope': scope}))
def test_permission_add_account(self):
""" PERMISSION(CORE): Check permission to add account """
assert_true(has_permission(issuer='root', action='add_account', kwargs={'account': 'account1'}))
assert_false(has_permission(issuer='self.usr', action='add_account', kwargs={'account': 'account1'}))
def test_permission_add_scope(self):
""" PERMISSION(CORE): Check permission to add scope """
assert_true(has_permission(issuer='root', action='add_scope', kwargs={'account': 'account1'}))
assert_false(has_permission(issuer=self.usr, action='add_scope', kwargs={'account': 'root'}))
assert_true(has_permission(issuer=self.usr, action='add_scope', kwargs={'account': self.usr}))
def test_permission_get_auth_token_user_pass(self):
""" PERMISSION(CORE): Check permission to get_auth_token_user_pass """
assert_true(has_permission(issuer='root', action='get_auth_token_user_pass', kwargs={'account': 'root', 'username': 'ddmlab', 'password': 'secret'}))
assert_false(has_permission(issuer='root', action='get_auth_token_user_pass', kwargs={'account': self.usr, 'username': 'ddmlab', 'password': 'secret'}))
def test_permission_get_auth_token_x509(self):
""" PERMISSION(CORE): Check permission to get_auth_token_x509 """
dn = '/C=CH/ST=Geneva/O=CERN/OU=PH-ADP-CO/CN=DDMLAB Client Certificate/emailAddress=ph-adp-ddm-lab@cern.ch'
assert_true(has_permission(issuer='root', action='get_auth_token_x509', kwargs={'account': 'root', 'dn': dn}))
assert_false(has_permission(issuer='root', action='get_auth_token_x509', kwargs={'account': self.usr, 'dn': dn}))
def test_permission_get_auth_token_gss(self):
""" PERMISSION(CORE): Check permission to get_auth_token_gss """
gsscred = 'ddmlab@CERN.CH'
assert_true(has_permission(issuer='root', action='get_auth_token_gss', kwargs={'account': 'root', 'gsscred': gsscred}))
assert_false(has_permission(issuer='root', action='get_auth_token_gss', kwargs={'account': self.usr, 'gsscred': gsscred}))
|
'''
Description:
Given an array of integers A sorted in non-decreasing order, return an array of the squares of each number, also in sorted non-decreasing order.
Example 1:
Input: [-4,-1,0,3,10]
Output: [0,1,9,16,100]
Example 2:
Input: [-7,-3,2,3,11]
Output: [4,9,9,49,121]
Note:
1 <= A.length <= 10000
-10000 <= A[i] <= 10000
A is sorted in non-decreasing order.
'''
from typing import List
from collections import deque
class Solution:
def sortedSquares(self, A: List[int]) -> List[int]:
left, right = 0, len(A)-1
output = deque()
while left <= right:
if abs( A[left] ) >= abs( A[right] ):
output.appendleft( A[left]**2 )
left += 1
else:
output.appendleft( A[right]**2 )
right -= 1
return [ *output ]
# n : the length of input list, A
## Time Complexity: O( n )
#
# The overhead in time is the cost of linear iteration, which is of O( n )
## Space Complexity: O( n )
#
# The overhead in space is the storage for output, which is of O( n )
from collections import namedtuple
TestEntry = namedtuple('TestEntry', 'sequence')
def test_bench():
test_data = [
TestEntry( sequence = [-4,-1,0,3,10] ),
TestEntry( sequence = [-7,-3,2,3,11] ),
]
# expected output:
'''
[0, 1, 9, 16, 100]
[4, 9, 9, 49, 121]
'''
for t in test_data:
print( Solution().sortedSquares( t.sequence) )
return
if __name__ == '__main__':
test_bench()
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import hparams as hp
class StepwiseMonotonicMultiheadAttention(nn.Module):
""" Stepwise Monotonic Multihead Attention
args:
n_heads (int): number of monotonic attention heads
d_model (int): dimension of model (attention)
d_k (int): dimension of key
d_v (int): dimension of value
noise_std (float): standard deviation for input noisse
dropout (float): dropout probability for attention weights
"""
def __init__(self, d_model, d_k, d_v,
noise_std=1.0,
n_head=hp.sma_head,
dropout=hp.sma_dropout,
is_tunable=hp.sma_tunable):
super(StepwiseMonotonicMultiheadAttention, self).__init__()
self.n_head = n_head
self.noise_std = noise_std
self.energy = MultiheadEnergy(n_head, d_model, d_k, d_v)
self.dropout = nn.Dropout(dropout)
self.last_layer = nn.Linear(n_head*d_v, d_model)
self.layer_norm = nn.LayerNorm(d_model)
self.is_tunable = is_tunable
def add_gaussian_noise(self, xs, std):
"""Add Gaussian noise to encourage discreteness."""
noise = xs.new_zeros(xs.size()).normal_(std=std)
return xs + noise
def expectation(self, e, aw_prev, n_head):
"""
e --- [batch*n_head, qlen, klen]
aw_prev --- [batch*n_head, qlen, 1]
See https://gist.github.com/mutiann/38a7638f75c21479582d7391490df37c
See https://github.com/hirofumi0810/neural_sp/blob/093bfade110d5a15a4f7a58fffe8d235acbfe14f/neural_sp/models/modules/mocha.py#L430
"""
batch_size, qlen, klen = aw_prev.size(0)//n_head, e.size(1), e.size(2)
# Compute probability sampling matrix P
p_sample = torch.sigmoid(self.add_gaussian_noise(e, self.noise_std) if self.training else e) # [batch*n_head, qlen, klen]
alpha = []
# Compute recurrence relation solution along mel frame domain
for i in range(klen):
p_sample_i = p_sample[:, :, i:i + 1]
pad = torch.zeros([batch_size*n_head, 1, 1], dtype=aw_prev.dtype).to(aw_prev.device)
aw_prev = aw_prev * p_sample_i + torch.cat(
(pad, aw_prev[:, :-1, :] * (1.0 - p_sample_i[:, :-1, :])), dim=1)
alpha.append(aw_prev)
alpha = torch.cat(alpha, dim=-1) if klen > 1 else alpha[-1] # [batch*n_head, qlen, klen]
assert not torch.isnan(alpha).any(), "NaN detected in alpha."
return alpha, p_sample
def focused_head(self, multihead, mel_len):
"""
Apply focus rate to select the best diagonal head.
multihead --- [batch*n_heads, seq_len, mel_len]
mel_len --- [batch,]
return --- [batch, seq_len, mel_len]
"""
# [batch*n_heads, seq_len, mel_len] -> [batch, n_heads, seq_len, mel_len]
multihead = multihead.reshape(self.n_head, -1, multihead.size(1), multihead.size(2)).transpose(0, 1)
focus_rate = torch.max(multihead, dim=2)[0].sum(dim=-1)/(mel_len.unsqueeze(1)) # [batch, n_heads]
h_idx = torch.argmax(focus_rate, dim=1) # [batch,]
batch=list()
fr_max=0
for b, fr, i in zip(multihead, focus_rate, h_idx):
batch.append(b[i])
fr_max += fr[i].detach().item()
return torch.stack(batch), fr_max/h_idx.size(0)
def repeat_mask_multihead(self, mask):
"""
Repeat mask over multihead.
mask --- [batch, qlen, klen]
return --- [batch*n_head, qlen, klen]
"""
return mask.repeat(self.n_head, 1, 1)
def forward(self, q, k, v, mel_len, mask=None, query_mask=None, aw_prev=None):
batch_size, qlen, klen = q.size(0), q.size(1), k.size(1)
if mask is not None:
mask = self.repeat_mask_multihead(mask)
# Calculate energy
e, v = self.energy(q, k, v, mask) # [batch*n_head, qlen, klen], [batch*n_head, klen, d_v]
# Get alpha
alpha_cv = F.softmax(e, dim=-1) # [batch*n_head, qlen, klen]
# Masking to ignore padding (query side)
if query_mask is not None:
query_mask = self.repeat_mask_multihead(query_mask.repeat(1, 1, klen))
alpha_cv = alpha_cv.masked_fill(query_mask, 0.)
# Get focused alpha
alpha_fc, fr_max = self.focused_head(alpha_cv, mel_len) # [batch, qlen, klen]
if self.is_tunable:
# Monotonic enhancement
if aw_prev is None:
aw_prev = k.new_zeros(batch_size, qlen, 1) # [batch, qlen, 1]
aw_prev[:, 0:1] = k.new_ones(batch_size, 1, 1) # initialize with [1, 0, 0 ... 0]
alpha_me, _ = self.expectation(alpha_fc, aw_prev, 1) # [batch, qlen, klen]
# Calculate context vector
v = v.reshape(self.n_head, batch_size, klen, -1).permute(1, 2, 0, 3) # [batch, klen, n_head, d_v]
cv = torch.bmm(alpha_me, v.reshape(batch_size, klen, -1)) # [batch, qlen, n_head*d_v]
else:
# Calculate normal multihead attention
cv = torch.bmm(alpha_cv, v).reshape(self.n_head, batch_size, qlen, -1).permute(1, 2, 0, 3) # [batch, qlen, n_head, d_v]
cv = cv.reshape(batch_size, qlen, -1) # [batch, qlen, n_head*d_v]
cv = self.dropout(self.last_layer(cv))
cv = self.layer_norm(cv)
return cv, alpha_fc, fr_max
class MultiheadEnergy(nn.Module):
""" Energy function for the (monotonic) multihead attention """
def __init__(self, n_head, d_model, d_k, d_v):
super(MultiheadEnergy, self).__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs = nn.Linear(d_model, n_head * d_k)
self.w_ks = nn.Linear(d_model, n_head * d_k)
self.w_vs = nn.Linear(d_model, n_head * d_v)
self.temperature = np.power(d_k, 0.5)
def scaled_dot_product(self, q, k):
sdp = torch.bmm(q, k.transpose(1, 2)) # (n*b) x lq x lk
sdp = sdp / self.temperature
return sdp
def forward(self, q, k, v, mask=None):
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
sz_b, len_q, _ = q.size()
sz_b, len_k, _ = k.size()
sz_b, len_v, _ = v.size()
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
q = q.permute(2, 0, 1, 3).contiguous().view(-1,
len_q, d_k) # (n*b) x lq x dk
k = k.permute(2, 0, 1, 3).contiguous().view(-1,
len_k, d_k) # (n*b) x lk x dk
v = v.permute(2, 0, 1, 3).contiguous().view(-1,
len_v, d_v) # (n*b) x lv x dv
# Compute monotonic multihead energy
e = self.scaled_dot_product(q, k) # (n*b) x lq x lk
# Masking to ignore padding
if mask is not None:
NEG_INF = float(np.finfo(torch.tensor(0, dtype=e.dtype).numpy().dtype).min)
e = e.masked_fill(mask, NEG_INF)
return e, v
|
# Copyright 2019 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example use of the CFR algorithm on Kuhn Poker."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from open_spiel.python.algorithms import cfr
from open_spiel.python.algorithms import expected_game_score
import pyspiel
def main(_):
game = pyspiel.load_game("kuhn_poker")
cfr_solver = cfr.CFRSolver(game)
iterations = 1000
for i in range(iterations):
cfr_value = cfr_solver.evaluate_and_update_policy()
print("Game util at iteration {}: {}".format(i, cfr_value))
average_policy = cfr_solver.average_policy()
average_policy_values = expected_game_score.policy_value(
game.new_initial_state(), [average_policy] * 2)
print("Computed player 0 value: {}".format(average_policy_values[0]))
print("Expected player 0 value: {}".format(-1 / 18))
if __name__ == "__main__":
app.run(main)
|
from collections import namedtuple
import numpy as np
import torch
fields = ('state', 'action', 'next_state', 'reward', 'done', 'weight', 'index')
Transition = namedtuple('Transition', fields)
Transition.__new__.__defaults__ = (None,) * len(Transition._fields)
def to_tensor(ndarray, requires_grad=False):
return torch.from_numpy(ndarray).float().requires_grad_(requires_grad)
def feature_scaling(x):
return (x - np.min(x)) / (np.max(x) - np.min(x))
def softmax(x):
return np.exp(x) / np.sum(np.exp(x))
def soft_update(target, source, tau):
for target_param, source_param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(target_param.data * (1.0 - tau) + source_param.data * tau)
|
"""Errors
CarpetBag Errors that might be thrown when problems happen.
"""
class Error(Exception):
"""Base class for exceptions in this module."""
pass
class EmptyProxyBag(Error):
"""Raised when the ProxyBag is empty as we request from it."""
pass
class InvalidContinent(Error):
"""Raised when an unknown continent is supplied by the user."""
pass
class NoRemoteServicesConnection(Error):
"""Raised when CarpetBag cannot talk to bad-actor.services."""
pass
class CannotOverwriteFile(Error):
"""Raised when trying to download a file to a local location that already has a file by the requested name."""
pass
# EndFile: carpetbag/carpetbag/errors.py
|
import re
from sdmx.model import PACKAGE, MaintainableArtefact
# Regular expression for URNs
URN = re.compile(
r"urn:sdmx:org\.sdmx\.infomodel"
r"\.(?P<package>[^\.]*)"
r"\.(?P<class>[^=]*)=((?P<agency>[^:]*):)?"
r"(?P<id>[^\(\.]*)(\((?P<version>[\d\.]*)\))?"
r"(\.(?P<item_id>.*))?"
)
_BASE = (
"urn:sdmx:org.sdmx.infomodel.{package}.{obj.__class__.__name__}="
"{ma.maintainer.id}:{ma.id}({ma.version}){extra_id}"
)
def make(obj, maintainable_parent=None):
"""Create an SDMX URN for `obj`.
If `obj` is not :class:`.MaintainableArtefact`, then `maintainable_parent`
must be supplied in order to construct the URN.
"""
if maintainable_parent:
ma = maintainable_parent
extra_id = f".{obj.id}"
else:
ma = obj
extra_id = ""
if not isinstance(ma, MaintainableArtefact):
raise ValueError(
f"Neither {repr(obj)} nor {repr(maintainable_parent)} are maintainable"
)
return _BASE.format(
package=PACKAGE[obj.__class__], obj=obj, ma=ma, extra_id=extra_id
)
def match(string):
return URN.match(string).groupdict()
|
# coding: utf-8
# Aluno: Misael Augusto
# Matrícula: 117110525
# Problema: Custo Empregado
salario_base = float(raw_input())
dias_trabalhados = int(raw_input())
transporte_diario = float(raw_input())
custo_transporte = dias_trabalhados * transporte_diario
if salario_base <= 1317.07:
INSS = 0.08 * salario_base
elif 1317.08 <= salario_base <= 2195.12:
INSS = 0.09 * salario_base
else:
INSS = 0.11 * salario_base
if custo_transporte > (0.06 * salario_base):
transporte_empregado = 0.06 * salario_base
transporte_empregador = custo_transporte - (0.06 * salario_base)
else:
transporte_empregador = 0
transporte_empregado = 0
custo_empregador = (salario_base * 0.20) + transporte_empregador + salario_base
custo_empregado = INSS + transporte_empregado
print "O salário base é R$ %.2f" % salario_base
print "O custo mensal para o empregador é de R$ %.2f" % custo_empregador
print "O salário líquido que o trabalhador irá receber no mês é R$ %.2f" % (salario_base - custo_empregado)
|
import datetime
from decimal import Decimal
from textwrap import dedent
from beancount.core.data import Amount, Balance
import pytest
from beancount_dkb import CreditImporter
from beancount_dkb.credit import FIELDS
CARD_NUMBER = '1234********5678'
HEADER = ';'.join('"{}"'.format(field) for field in FIELDS)
def _format(string, kwargs):
return dedent(string).format(**kwargs).lstrip()
@pytest.fixture
def tmp_file(tmp_path):
return tmp_path / f'{CARD_NUMBER}.csv'
def test_multiple_headers(tmp_file):
importer = CreditImporter(CARD_NUMBER, 'Assets:DKB:Credit')
common = '''
"Von:";"01.01.2018";
"Bis:";"31.01.2018";
"Saldo:";"5000.01 EUR";
"Datum:";"30.01.2018";
'''
# previous header format
tmp_file.write_text(
_format(
'''
"Kreditkarte:";"{card_number} Kreditkarte";
{common}
''',
dict(card_number=CARD_NUMBER, common=common),
)
)
with tmp_file.open() as fd:
assert importer.identify(fd)
# latest header format
tmp_file.write_text(
_format(
'''
"Kreditkarte:";"{card_number}";
{common}
''',
dict(card_number=CARD_NUMBER, common=common),
)
)
with tmp_file.open() as fd:
assert importer.identify(fd)
def test_identify_correct(tmp_file):
importer = CreditImporter(CARD_NUMBER, 'Assets:DKB:Credit')
tmp_file.write_text(
_format(
'''
"Kreditkarte:";"{card_number} Kreditkarte";
"Von:";"01.01.2018";
"Bis:";"31.01.2018";
"Saldo:";"5000.01 EUR";
"Datum:";"30.01.2018";
{header};
''',
dict(card_number=CARD_NUMBER, header=HEADER),
)
)
with tmp_file.open() as fd:
assert importer.identify(fd)
def test_identify_invalid_iban(tmp_file):
other_iban = '5678********1234'
tmp_file.write_text(
_format(
'''
"Kreditkarte:";"{card_number} Kreditkarte";
"Von:";"01.01.2018";
"Bis:";"31.01.2018";
"Saldo:";"5000.01 EUR";
"Datum:";"30.01.2018";
{header};
''',
dict(card_number=CARD_NUMBER, header=HEADER),
)
)
importer = CreditImporter(other_iban, 'Assets:DKB:Credit')
with tmp_file.open() as fd:
assert not importer.identify(fd)
def test_extract_no_transactions(tmp_file):
importer = CreditImporter(CARD_NUMBER, 'Assets:DKB:Credit')
tmp_file.write_text(
_format(
'''
"Kreditkarte:";"{card_number} Kreditkarte";
"Von:";"01.01.2018";
"Bis:";"31.01.2018";
"Saldo:";"5000.01 EUR";
"Datum:";"30.01.2018";
{header};
''',
dict(card_number=CARD_NUMBER, header=HEADER),
)
)
with tmp_file.open() as fd:
directives = importer.extract(fd)
assert len(directives) == 1
assert isinstance(directives[0], Balance)
assert directives[0].date == datetime.date(2018, 1, 31)
assert directives[0].amount == Amount(Decimal('5000.01'), currency='EUR')
def test_extract_transactions(tmp_file):
tmp_file.write_text(
_format(
'''
"Kreditkarte:";"{card_number} Kreditkarte";
"Von:";"01.01.2018";
"Bis:";"31.01.2018";
"Saldo:";"5000.01 EUR";
"Datum:";"30.01.2018";
{header};
"Ja";"15.01.2018";"15.01.2018";"REWE Filiale Muenchen";"-10,80";"";
''', # NOQA
dict(card_number=CARD_NUMBER, header=HEADER),
)
)
importer = CreditImporter(
CARD_NUMBER, 'Assets:DKB:Credit', file_encoding='utf-8'
)
with tmp_file.open() as fd:
directives = importer.extract(fd)
assert len(directives) == 2
assert directives[0].date == datetime.date(2018, 1, 15)
assert len(directives[0].postings) == 1
assert directives[0].postings[0].account == 'Assets:DKB:Credit'
assert directives[0].postings[0].units.currency == 'EUR'
assert directives[0].postings[0].units.number == Decimal('-10.80')
def test_extract_sets_timestamps(tmp_file):
tmp_file.write_text(
_format(
'''
"Kreditkarte:";"{card_number} Kreditkarte";
"Von:";"01.01.2018";
"Bis:";"31.01.2018";
"Saldo:";"5000.01 EUR";
"Datum:";"30.01.2018";
{header};
"Ja";"15.01.2018";"15.01.2018";"REWE Filiale Muenchen";"-10,80";"";
''', # NOQA
dict(card_number=CARD_NUMBER, header=HEADER),
)
)
importer = CreditImporter(
CARD_NUMBER, 'Assets:DKB:Credit', file_encoding='utf-8'
)
assert not importer._date_from
assert not importer._date_to
assert not importer._balance_amount
with tmp_file.open() as fd:
directives = importer.extract(fd)
assert directives
assert importer._date_from == datetime.date(2018, 1, 1)
assert importer._date_to == datetime.date(2018, 1, 31)
assert importer._balance_date == datetime.date(2018, 1, 31)
def test_extract_with_zeitraum(tmp_file):
tmp_file.write_text(
_format(
'''
"Kreditkarte:";"{card_number} Kreditkarte";
"Zeitraum:";"seit der letzten Abrechnung";
"Saldo:";"5000.01 EUR";
"Datum:";"30.01.2018";
{header};
"Ja";"15.01.2018";"15.01.2018";"REWE Filiale Muenchen";"-10,80";"";
''', # NOQA
dict(card_number=CARD_NUMBER, header=HEADER),
)
)
importer = CreditImporter(
CARD_NUMBER, 'Assets:DKB:Credit', file_encoding='utf-8'
)
assert not importer._date_from
assert not importer._date_to
assert not importer._balance_amount
with tmp_file.open() as fd:
directives = importer.extract(fd)
assert directives
assert not importer._date_from
assert not importer._date_to
assert importer._balance_date == datetime.date(2018, 1, 31)
def test_file_date_with_zeitraum(tmp_file):
tmp_file.write_text(
_format(
'''
"Kreditkarte:";"{card_number} Kreditkarte";
"Zeitraum:";"seit der letzten Abrechnung";
"Saldo:";"5000.01 EUR";
"Datum:";"30.01.2018";
{header};
"Ja";"15.01.2018";"15.01.2018";"REWE Filiale Muenchen";"-10,80";"";
''', # NOQA
dict(card_number=CARD_NUMBER, header=HEADER),
)
)
importer = CreditImporter(
CARD_NUMBER, 'Assets:DKB:Credit', file_encoding='utf-8'
)
assert not importer._date_from
assert not importer._date_to
assert not importer._balance_amount
with tmp_file.open() as fd:
assert importer.file_date(fd) == datetime.date(2018, 1, 30)
def test_emits_closing_balance_directive(tmp_file):
tmp_file.write_text(
_format(
'''
"Kreditkarte:";"{card_number} Kreditkarte";
"Von:";"01.01.2018";
"Bis:";"31.01.2018";
"Saldo:";"5000.01 EUR";
"Datum:";"30.01.2018";
{header};
"Ja";"15.01.2018";"15.01.2018";"REWE Filiale Muenchen";"-10,80";"";
''', # NOQA
dict(card_number=CARD_NUMBER, header=HEADER),
)
)
importer = CreditImporter(
CARD_NUMBER, 'Assets:DKB:Credit', file_encoding='utf-8'
)
with tmp_file.open() as fd:
directives = importer.extract(fd)
assert len(directives) == 2
assert isinstance(directives[1], Balance)
assert directives[1].date == datetime.date(2018, 1, 31)
assert directives[1].amount == Amount(Decimal('5000.01'), currency='EUR')
def test_file_date_is_set_correctly(tmp_file):
tmp_file.write_text(
_format(
'''
"Kreditkarte:";"{card_number} Kreditkarte";
"Von:";"01.01.2016";
"Bis:";"31.01.2016";
"Saldo:";"5000.01 EUR";
"Datum:";"30.01.2018";
{header};
"Ja";"15.01.2018";"15.01.2018";"REWE Filiale Muenchen";"-10,80";"";
''', # NOQA
dict(card_number=CARD_NUMBER, header=HEADER),
)
)
importer = CreditImporter(
CARD_NUMBER, 'Assets:DKB:Credit', file_encoding='utf-8'
)
with tmp_file.open() as fd:
assert importer.file_date(fd) == datetime.date(2016, 1, 31)
|
# -*- coding: utf-8 -*-
import math
import logging
import numpy as np
import phandim
EPS = 1.0e-4
def invariant(shot, the_range, steps, nr):
"""
Check phantom parameters
Parameters
----------
shot: float
shot position, mm
the_range: (float,float)
phantom range, (min,max), mm
steps: (float,float)
steps to do in phantom, (small,large), mm
nr: int
number of small steps, defined by collimator size
returns: boolean
True if ok, False otherwise
"""
rmin, rmax = the_range
smin, smax = steps
if (np.isnan(shot)):
return False
if (np.isnan(rmin)):
return False
if (np.isnan(rmax)):
return False
if (rmax <= rmin):
return False
if (np.isnan(smin)):
return False
if (np.isnan(smax)):
return False
if (smin > smax):
return False
if (nr < 1):
return False
return True
def build_one_boundary(shot, the_range, steps, nr):
"""
Build phantom one dimensionboundaries from shot position and min/max range
Parameters
----------
shot: float
shot position, mm
the_range: (float,float)
phantom range, (min,max), mm
steps: (float,float)
steps to do in phantom, (small,large), mm
nr: integer
number of small steps, defined by collimator size
returns: array
phantom one dimension boundaries
"""
logging.info("building one boundary")
if (not invariant(shot, the_range, steps, nr)):
raise ValueError("build_one_boundary", "invariant failed")
rmin, rmax = the_range
smin, smax = steps
# we know shot position is within the range
# going backward
bs = []
# first, with small steps
pos = shot
for k in range(0, nr+1):
pos = shot - float(k) * smin
bs.append(pos)
if math.fabs(pos - rmin) < EPS:
break
if pos < rmin:
break
# now large steps, continue from previous position
while True:
pos = pos - smax
bs.append(pos)
if math.fabs(pos - rmin) < EPS:
break
if pos < rmin:
break
# revert the list
bs.reverse()
# going forward
# first, with small steps
for k in range(1, nr+1):
pos = shot + float(k) * smin
bs.append(pos)
if math.fabs(pos - rmax) < EPS:
break
if pos > rmax:
break
# now large steps, continue from previous position
while True:
pos = pos + smax
bs.append(pos)
if math.fabs(pos - rmax) < EPS:
break
if pos > rmax:
break
logging.info("done building one boundary")
return bs
def build_phandim(shot, x_range, y_range, z_range, steps, nr):
"""
Build phantom dimensions from shot position and min/max ranges
Parameters
----------
shot: (float,float)
shot Y,Z position, mm
x_range: (float,float)
phantom X range, (min,max), mm
y_range: (float,float)
phantom Y range, (min,max), mm
z_range: (float,float)
phantom Z range, (min,max), mm
steps: (float,float)
steps to do in phantom, (small,large), mm
nr: integer
number of small steps, defined by collimator size
returns: phandim
phantom dimensions object
"""
logging.info("building phandim")
logging.debug(str(shot))
logging.debug(str(x_range))
logging.debug(str(y_range))
logging.debug(str(z_range))
logging.debug(str(steps))
logging.debug(str(nr))
ys, zs = shot
# X boundaries, shot position always at 0
bx = build_one_boundary(0.0, x_range, steps, nr)
by = build_one_boundary( ys, y_range, steps, nr)
bz = build_one_boundary( zs, z_range, steps, nr)
logging.info("done building phandim")
return phandim.phandim(bx, by, bz)
|
from __future__ import absolute_import
from __future__ import print_function
from tqdm import tqdm
import pickle as pkl
import hashlib
import os
import argparse
import pandas as pd
def formatter(x):
try:
x = float(x)
return '{:.1f}'.format(x)
except:
return x
def main():
parser = argparse.ArgumentParser(description='Recursively produces hashes for all tables inside this directory')
parser.add_argument('--directory', '-d', type=str, required=True, help='The directory to hash.')
parser.add_argument('--output_file', '-o', type=str, default='hashes.pkl')
args = parser.parse_args()
print(args)
# count the directories
total = 0
for subdir, dirs, files in tqdm(os.walk(args.directory), desc='Counting directories'):
total += len(files)
# change directory to args.directory
initial_dir = os.getcwd()
os.chdir(args.directory)
# iterate over all subdirectories
hashes = {}
pbar = tqdm(total=total, desc='Iterating over files')
for subdir, dirs, files in os.walk('.'):
for file in files:
pbar.update(1)
# skip files that are not csv
extension = file.split('.')[-1]
if extension != 'csv':
continue
full_path = os.path.join(subdir, file)
df = pd.read_csv(full_path, index_col=False)
# convert all numbers to floats with fixed precision
for col in df.columns:
df[col] = df[col].apply(formatter)
# sort by the first column that has unique values
for col in df.columns:
if len(df[col].unique()) == len(df):
df = df.sort_values(by=col).reset_index(drop=True)
break
# convert the data frame to string and hash it
df_str = df.to_string().encode()
hashcode = hashlib.md5(df_str).hexdigest()
hashes[full_path] = hashcode
pbar.close()
# go to the initial directory and save the results
os.chdir(initial_dir)
with open(args.output_file, 'wb') as f:
pkl.dump(hashes, f)
if __name__ == "__main__":
main()
|
from flask import current_app as app
import random
class Product:
def __init__(
self,
id,
name,
description,
category,
price,
is_available,
creator_id,
image
):
self.id = id
self.name = name
self.price = price
self.is_available = is_available
self.description = description
self.category = category
self.creator_id = creator_id
self.image = image
@staticmethod
def get(id):
rows = app.db.execute('''
SELECT id, name, description, category, price, is_available, creator_id, image
FROM Product
WHERE id = :id
''',
id=id)
return Product(*(rows[0])) if rows is not None else None
# method to return products served by a specific on-campus resturant venue
@staticmethod
def get_specific(seller_affiliation, page_num):
rows = app.db.execute('''
SELECT DISTINCT id, name, description, category, price, Sells.is_available, creator_id, image
FROM Product
RIGHT OUTER JOIN Sells ON Product.id=Sells.product_id
WHERE seller_affiliation = :seller_affiliation AND Sells.is_available = True
ORDER BY id
LIMIT 20
OFFSET ((:page_num - 1) * 20)
''',
seller_affiliation=seller_affiliation,
page_num=page_num)
return [Product(*row) for row in rows] if rows is not None else None
# method to return all available products
@staticmethod
def get_all(is_available=True):
rows = app.db.execute('''
SELECT id, name, description, category, price, Sells.is_available, creator_id, image
FROM Product
RIGHT OUTER JOIN Sells ON Product.id=Sells.product_id
WHERE Sells.is_available = :is_available
''',
is_available=is_available)
return [Product(*row) for row in rows]
@staticmethod
def get_categories(is_available=True):
rows = app.db.execute('''
SELECT DISTINCT category
FROM Product
RIGHT OUTER JOIN Sells ON Product.id=Sells.product_id
WHERE Sells.is_available = :is_available
''',
is_available=is_available)
return rows
# method to return filtered view of venue products by category (entrées, beverages, etc.)
@staticmethod
def filteredCat(seller_affiliation, category, page_num):
rows = app.db.execute('''
SELECT DISTINCT id, name, description, category, price, Sells.is_available, creator_id, image
FROM Product
RIGHT OUTER JOIN Sells ON Product.id=Sells.product_id
WHERE seller_affiliation = :seller_affiliation AND Sells.is_available = True AND category = :category
ORDER BY id
LIMIT 20
OFFSET ((:page_num - 1) * 20)
''',
seller_affiliation=seller_affiliation,
category=category,
page_num=page_num)
return [Product(*row) for row in rows]
# method to filter by price (ordered from lowest to highest)
@staticmethod
def filteredPrice(seller_affiliation, page_num):
rows = app.db.execute('''
SELECT DISTINCT id, name, description, category, price, Sells.is_available, creator_id, image
FROM Product RIGHT OUTER JOIN Sells ON Product.id=Sells.product_id
WHERE seller_affiliation = :seller_affiliation AND Sells.is_available = True
ORDER BY price ASC
LIMIT 20
OFFSET ((:page_num - 1) * 20)
''',
seller_affiliation=seller_affiliation,
page_num=page_num)
return [Product(*row) for row in rows]
# method to return filtered view of venue products by their current average rating
@staticmethod
def filteredRating(stars, page_num):
rows = app.db.execute('''
SELECT DISTINCT id, name, description, category, price, is_available, creator_id, image
FROM Product FULL OUTER JOIN Feedback ON Product.id = Feedback.product_id
GROUP BY id
HAVING AVG(rating) >= :stars
ORDER BY id
LIMIT 20
OFFSET ((:page_num - 1) * 20)
''',
stars=stars,
page_num=page_num)
return [Product(*row) for row in rows]
# method to return filtered view of venue products by search query
@staticmethod
def search_filter(search, page_num):
rows = app.db.execute('''
SELECT DISTINCT id, name, description, category, price, is_available, creator_id, image
FROM Product
WHERE LOWER(name) LIKE '%' || :search || '%' OR UPPER(name) LIKE '%' || :search || '%' OR name LIKE '%' || :search || '%'
LIMIT 20
OFFSET ((:page_num - 1) * 20)
''',
search=search,
page_num=page_num)
return [Product(*row) for row in rows]
@staticmethod
def search_id(id, page_num):
rows = app.db.execute('''
SELECT DISTINCT id, name, description, category, price, Sells.is_available, creator_id, image
FROM Product
RIGHT OUTER JOIN Sells ON Product.id=Sells.product_id
WHERE id = :id
AND Sells.is_available = true
LIMIT 20
OFFSET ((:page_num - 1) * 20)
''',
id=id,
page_num=page_num)
return [Product(*row) for row in rows]
@staticmethod
def add_product(name, description, price, category, image, current_user):
rows = app.db.execute('''
INSERT INTO Product (name, description, category, price, is_available, image, creator_id)
VALUES (:name, :description, :category, :price, :is_available, :image, :creator_id)
RETURNING id
''',
name=name,
description=description,
category=category,
price=price,
image=image,
creator_id=current_user.id,
is_available=True
)
id = rows[0][0]
return id
@staticmethod
def update_name(product_id, name):
rows = app.db.execute_with_no_return('''
UPDATE Product
SET name = :name
WHERE id = :product_id
''',
name=name,
product_id=product_id)
@staticmethod
def update_description(product_id, description):
rows = app.db.execute_with_no_return('''
UPDATE Product
SET description = :description
WHERE id = :product_id
''',
description=description,
product_id=product_id)
@staticmethod
def update_price(product_id, price):
rows = app.db.execute_with_no_return('''
UPDATE Product
SET price = :price
WHERE id = :product_id
''',
price=price,
product_id=product_id)
@staticmethod
def update_category(product_id, category):
rows = app.db.execute_with_no_return('''
UPDATE Product
SET category = :category
WHERE id = :product_id
''',
category=category,
product_id=product_id)
@staticmethod
def update_availability(product_id, available):
rows = app.db.execute_with_no_return('''
UPDATE Product
SET is_available = :available
WHERE id = :product_id
''',
available=available,
product_id=product_id)
|
# Default Python Libraries
import asyncio
from asyncio.base_events import BaseEventLoop
from multiprocessing.pool import ThreadPool
from .client_service import AristaFlowClientService
from .configuration import Configuration
from .rest_helper import RestPackageRegistry
from .service_provider import ServiceProvider
class AristaFlowClientPlatform(object):
"""Entry point to the AristaFlow Python Client framework."""
# thread pool for async requests
__async_thread_pool: ThreadPool = None
__push_event_loop: BaseEventLoop = None
def __init__(self, configuration: Configuration):
self.configuration = configuration
self.__client_services: [str, AristaFlowClientService] = {}
self.__rest_package_registry = RestPackageRegistry(configuration)
self.__async_thread_pool = ThreadPool(configuration.async_thread_pool_size)
self.__push_event_loop = asyncio.new_event_loop()
self.__async_thread_pool.apply_async(self._start_event_loop)
def get_client_service(self, user_session: str = "python_default_session"):
"""
:return: AristaFlowClientService The client service for the given user session
"""
if user_session in self.__client_services:
return self.__client_services[user_session]
cs = AristaFlowClientService(
self.configuration,
user_session,
ServiceProvider(
self.__rest_package_registry, self.__async_thread_pool, self.__push_event_loop
),
)
self.__client_services[user_session] = cs
return cs
def _start_event_loop(self):
"""
Starts the asyncio event loop for handling push notifications
"""
try:
asyncio.set_event_loop(self.__push_event_loop)
self.__push_event_loop.run_forever()
finally:
self.__push_event_loop.close()
|
# ---------------------------------------------------------------------------
# Unified Panoptic Segmentation Network
#
# Copyright (c) 2018-2019 Uber Technologies, Inc.
#
# Licensed under the Uber Non-Commercial License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at the root directory of this project.
#
# See the License for the specific language governing permissions and
# limitations under the License.
# ---------------------------------------------------------------------------
import math
import torch
from torch.optim.optimizer import Optimizer, required
class SGD(Optimizer):
r"""Implements stochastic gradient descent (optionally with momentum).
Nesterov momentum is based on the formula from
`On the importance of initialization and momentum in deep learning`__.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float): learning rate
momentum (float, optional): momentum factor (default: 0)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
dampening (float, optional): dampening for momentum (default: 0)
nesterov (bool, optional): enables Nesterov momentum (default: False)
Example:
>>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
>>> optimizer.zero_grad()
>>> loss_fn(model(input), target).backward()
>>> optimizer.step()
__ http://www.cs.toronto.edu/%7Ehinton/absps/momentum.pdf
.. note::
The implementation of SGD with Momentum/Nesterov subtly differs from
Sutskever et. al. and implementations in some other frameworks.
Considering the specific case of Momentum, the update can be written as
.. math::
v = \rho * v + g \\
p = p - lr * v
where p, g, v and :math:`\rho` denote the parameters, gradient,
velocity, and momentum respectively.
This is in contrast to Sutskever et. al. and
other frameworks which employ an update of the form
.. math::
v = \rho * v + lr * g \\
p = p - v
The Nesterov version is analogously modified.
"""
def __init__(self, params, lr=required, momentum=0, dampening=0,
weight_decay=0, nesterov=False):
defaults = dict(lr=lr, momentum=momentum, dampening=dampening,
weight_decay=weight_decay, nesterov=nesterov)
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError("Nesterov momentum requires a momentum and zero dampening")
assert dampening == 0, "not implemented"
super(SGD, self).__init__(params, defaults)
def __setstate__(self, state):
super(SGD, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('nesterov', False)
def step(self, lr, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
nesterov = group['nesterov']
for p in group['params']:
if p.grad is None:
continue
d_p = p.grad.data
if weight_decay != 0:
d_p.add_(weight_decay, p.data)
if momentum != 0:
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = (
p.data.new().resize_as_(p.data).zero_())
buf.mul_(momentum).add_(group['lr'] * lr, d_p)
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(group['lr'] * lr, d_p)
if nesterov:
d_p = d_p.add(momentum, buf)
else:
d_p = buf
p.data.add_(-1, d_p)
return loss
class Adam(Optimizer):
"""Implements Adam algorithm.
It has been proposed in `Adam: A Method for Stochastic Optimization`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
.. _Adam\: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0):
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay)
super(Adam, self).__init__(params, defaults)
def step(self, lr, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = grad.new().resize_as_(grad).zero_()
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = grad.new().resize_as_(grad).zero_()
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
if group['weight_decay'] != 0:
grad = grad.add(group['weight_decay'], p.data)
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
step_size = (lr * group['lr'] * math.sqrt(bias_correction2)
/ bias_correction1)
p.data.addcdiv_(-step_size, exp_avg, denom)
return loss
def clip_grad(parameters, clip_val):
r"""Clips gradient norm of an iterable of parameters.
The norm is computed over all gradients together, as if they were
concatenated into a single vector. Gradients are modified in-place.
Arguments:
parameters (Iterable[Variable]): an iterable of Variables that will have
gradients normalized
max_norm (float or int): max norm of the gradients
norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for
infinity norm.
Returns:
Total norm of the parameters (viewed as a single vector).
"""
parameters = list(filter(lambda p: p.grad is not None, parameters))
for p in parameters:
p.grad.data.clamp_(-clip_val, clip_val)
|
#!/usr/bin/python3
import os
import sys
import csv
import pickle
## Need this line so Atom can run it
os.chdir('/home/andres/Programs/python/covid/scripts')
#print(os.getcwdb())
def load_obj(name):
with open('obj/' + name + '.pkl', 'rb') as f:
return pickle.load(f)
def get_coords(country):
if country in country_dict:
return country_dict[country]
else: return False
country_dict = load_obj('country_coords')
def main():
with open('../data/full_data.csv') as csvData:
file = csv.reader(csvData, delimiter=',')
headers = ['name','date','new_cases','new_deaths','total_cases','total_deaths']
with open('../data/full_data_enrd.csv', 'w') as csvOut:
writer = csv.DictWriter(csvOut, fieldnames=headers)
writer.writeheader()
next(file)
sorted_file = sorted(file, key=lambda row: row[0], reverse=True)
country = None
date = '2020-04-01'
for line in sorted_file:
print (line)
if line[1] != country and line[0] == date:
country = line[1]
date = line[0]
new_line = []
if get_coords(country):
new_line.append(country)
new_line.append(line[0])
new_line.append(line[2])
new_line.append(line[3])
new_line.append(line[4])
new_line.append(line[5])
line_dict = {k:v for k,v in zip(headers,new_line)}
writer.writerow(line_dict)
print(new_line)
if __name__ == '__main__':
sys.exit(main())
|
# -*- coding: utf-8 -*-
from __future__ import print_function, division
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.autograd import Variable
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
from PIL import Image
import time
import os
import json
version = torch.__version__
######################################################################
# Options
# --------
parser = argparse.ArgumentParser(description='Training')
parser.add_argument('--data_dir',default='/home/zzd/Market/pytorch',type=str, help='training dir path')
parser.add_argument('--train_all', action='store_true', help='use all training data' )
parser.add_argument('--color_jitter', action='store_true', help='use color jitter in training' )
parser.add_argument('--batchsize', default=128, type=int, help='batchsize')
opt = parser.parse_args()
data_dir = opt.data_dir
######################################################################
# Load Data
# ---------
#
transform_train_list = [
#transforms.RandomResizedCrop(size=128, scale=(0.75,1.0), ratio=(0.75,1.3333), interpolation=3), #Image.BICUBIC)
transforms.Resize((288,144), interpolation=3),
#transforms.RandomCrop((256,128)),
#transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
#transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]
transform_val_list = [
transforms.Resize(size=(256,128),interpolation=3), #Image.BICUBIC
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]
print(transform_train_list)
data_transforms = {
'train': transforms.Compose( transform_train_list ),
'val': transforms.Compose(transform_val_list),
}
train_all = ''
if opt.train_all:
train_all = '_all'
image_datasets = {}
image_datasets['train'] = datasets.ImageFolder(os.path.join(data_dir, 'train' + train_all),
data_transforms['train'])
image_datasets['val'] = datasets.ImageFolder(os.path.join(data_dir, 'val'),
data_transforms['val'])
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=opt.batchsize,
shuffle=True, num_workers=16)
for x in ['train', 'val']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
class_names = image_datasets['train'].classes
use_gpu = torch.cuda.is_available()
######################################################################
# prepare_dataset
# ------------------
#
# Now, let's write a general function to train a model. Here, we will
# illustrate:
#
# - Scheduling the learning rate
# - Saving the best model
#
# In the following, parameter ``scheduler`` is an LR scheduler object from
# ``torch.optim.lr_scheduler``.
def prepare_model():
since = time.time()
num_epochs = 1
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train']:
mean = torch.zeros(3)
std = torch.zeros(3)
# Iterate over data.
for data in dataloaders[phase]:
# get the inputs
inputs, labels = data
now_batch_size,c,h,w = inputs.shape
mean += torch.sum(torch.mean(torch.mean(inputs,dim=3),dim=2),dim=0)
std += torch.sum(torch.std(inputs.view(now_batch_size,c,h*w),dim=2),dim=0)
print(mean/dataset_sizes['train'])
print(std/dataset_sizes['train'])
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
return
prepare_model()
|
x_axis = 0
y_axis = 0
z_axis = 0
f_option = 0
w_option = 0
k_option = 0
m_option = 0
s_option = 0
str_option = 0
slh_option = 0
dis_option = 0
Weapon = []
Jennifer_Dead = False
Jennifer_Body = False
Goblin_Village = True
GoblinK_Maim = False
GoblinP_Maim = False
GoblinP_Dead = False
GoblinK_Dead = False
room2 = False
Dead_Village = False
Gold = 0
Honor = 0
Infamy = 0
Strength = 0
Stealth = 0
Finese = 0
Justice = 0
Bloodlust = 0
while True:
if x_axis == 0 and y_axis == 0 and len(Weapon) == 0:
answer = input("You wake up in a dark forest. You can remember nothing. As you start to get up, two strange beasts approach. They resemble wolves, but they are twice as large. Around you, there is a rusty sword, a small wooden bow with a few arrows, and two small knives. You only have time to grab one. What do you grab. Choices: sword, bow, or knives. ")
if answer == "sword" or answer == "rusty sword":
Weapon.append("Sword")
Strength = 5
print("As -athe left beast leaps at you with it's enormous teeth, you grab the rusty sword and swing. The sword bursts into flames and cuts through the poor creature in one fell swoop. As the flames disappear, you see the rusty sword as transformed into a glowing steel sword with a small red gem implanted in the grey hilt. The sword is about 4.5 feet long and 4 inches wide. A brown leather sheathe has appeared on your back. The steel sword shines in the bright sunlight as you ready it. As you prepare to charge at the remaining beast, it growls and wanders off. As you look around, you see that the bow and knives have disappeared.")
if answer == "bow" or answer == "wooden bow":
Weapon.append("Bow")
Finese = 5
print("As the left beast leaps at you with, baring it's unnaturally shape teeth, you grab the small bow. As you pull an arrow back, the bow and arrow is engulfed in flame. You release the flaming arrow. The arrow pierces the beast body, killing it instantly. The wooden bow has transformed into a solid steel bow with a brown, leather handle. There is a red gem above the handle. A quiver with steel tipped arrows with a small red gem right below the tip. The bow is about 4 feet long and the arrows are 1.5 feet long. You grab another arrow from the brown, leather quiver that has appeared on your back. You hear the bow creak back as you ready another arrow, the remaining beast simply growls and and wanders off. As you look around,you see that the sword and knives have disappeared.")
if answer == "knives" or answer == "small knives":
Weapon.append("Knives")
Stealth = 5
print("You leap for the two small knives as the left beast leaps at you with it's impossibly large claws outstretched. Once you grab the knives, who throw one, as soon as the knife is thrown, it bursts into flames. The burning knife hits the right leg of the beast. It collapses as soon as it hits the ground. As you swing, aiming for its throat, the knife bursts into flames. As the montrous beast falls, the flames disappear, leaving a steel, curved dagger. The other knife has also transformed. It turned into a similiar curved dagger. Two brown, leather sheathes have appeared on your hips. As you prepare to fight the other beast. The beast growled and left. As you look around, the sword and bow have both disappeared.")
#------------------------------------------------------------------------------------------------------------------------------------------------
if len(Weapon) >= 1 and x_axis == 0 and y_axis == 0 and z_axis == 0 and Goblin_Village == True:
answer = input("Looking around, you observe that you are in a strange forest. The only thing that you can see besides trees and the mountains to the north. There is a tree that extends beyond the forest canopy. After climbing a tree and looking around, you discover a village to the east, a desert to the west, and a dark forest to the south. Choices: go west, go south, go east. ")
if answer == "north" or answer == "go north":
y_axis = y_axis + 1
if answer == "east" or answer == "go east":
x_axis = x_axis + 1
if answer == "south" or answer == "go south":
y_axis = y_axis - 1
if answer == "west" or answer == "go west":
x_axis = x_axis - 1
#-----------------------------------------------------------------------------------------------------------------------------------------------
if x_axis == 1 and y_axis == 0 and z_axis == 0 and Jennifer_Dead == False and Goblin_Village == True and GoblinK_Dead == False and GoblinP_Dead == False and f_option == 0 and w_option == 0 and room2 == False:
answer = input("As you walk toward the village, you hear some gibberish. As you quietly creep up on the speaker, you see 2 small, green goblins and 1 person tied up. The goblins are small, humaniod creatures with green skin. Their ears are so long that they droop to the side of there face. The captive appears to wear a snow white gown or dress. One goblin gestures toward the victim voilently with a long, curved dagger, while the other goblin puts himself in between the captive and the knife wielding goblin. The goblin in between the captive and the other goblin holds his hands up. While the knife wielding goblin yells angrily at the second goblin. What will you do? Go back, fight, or wait?" )
if answer == "go back" or answer == "back" or answer == "Go Back" or answer == "Back":
x_axis = x_axis - 1
Jennifer_Dead = True
Goblin_Village = False
if answer == "fight" or answer == "fight goblins" or answer == "Fight" or answer == "Fight Goblins":
f_option = f_option + 1
if answer == "wait" or answer == "Wait":
w_option = w_option + 1
#-----------------------------------------------------------------------------------------------------------------------------------------------
if x_axis == 1 and y_axis == 0 and z_axis == 0 and Jennifer_Dead == False and Goblin_Village == True and GoblinK_Dead == False and GoblinP_Dead == False and w_option == 1:
answer = input("The knife wielding goblin pushes the other goblin out of the way. He raises his knife preparing to kill the hostage. The knife comes down on the victim's body. As the knife pierces the heart, the victim falls limp. The goblin yanks the knife from the corpse and grabs something from the body. Will you extract revenge or wait? ")
if answer == "revenge" or answer == "Revenge" or answer == "extract revenge" or answer == "Extract Revenge":
f_option = f_option + 1
GoblinK_Dead = True
Jennifer_Dead = True
if answer == "move" or answer == "move on" or answer == "wait" or answer == "Wait":
w_option = w_option + 1
Goblin_Village = False
Jennifer_Dead = True
#------------------------------------------------------------------------------------------------------------------------------------------------
if x_axis == 1 and y_axis == 0 and z_axis == 0 and Jennifer_Dead == True and Goblin_Village == False and GoblinK_Dead == False and GoblinP_Dead == False and w_option == 2:
answer = input("As the goblins move on, they leave the corpse. Her snow white dress is tainted by her bright red blood. The goblins appear to have taken all her jewelry and gold. Will you take the body to the village or leave? ")
if answer == "take body" or answer == "Take Body" or answer == "take the body to village" or answer == "Take the Body to Village" or answer == "Take The Body To Village":
Jennifer_Body = True
w_option = w_option - 2
x_axis = x_axis + 1
room2 = True
if answer == "Leave" or answer == "leave":
x_axis = x_axis + 1
w_option = w_option - 2
room2 = True
#------------------------------------------------------------------------------------------------------------------------------------------------
if x_axis == 1 and y_axis == 0 and z_axis == 0 and Jennifer_Dead == True and Goblin_Village == True and GoblinK_Dead == True and GoblinP_Dead == False and w_option == 1 and f_option == 1 and "Sword" in Weapon:
answer = input("You dash out, sword ready to swing. The sword bursts into flames as you swing it. However, the flames are a bright red. The goblin with the knife tries to block your brutal slash, but the knife and its wielder is cut in half. The other goblin looks at you, its hideous, green face is full of fear. A small part of you craves his blood. You begin to tread toward him, sword prepared to run him through. However, a small flashback of him protecting the girl appears. Will you kill him, or spare him? ")
if answer == "Kill" or answer == "kill" or answer == "Kill Him" or answer == "kill him":
Bloodlust = Bloodlust + 5
Infamy = Infamy + 5
GoblinK_Dead = True
GoblinP_Dead = True
Jennifer_Dead = True
Goblin_Village = False
Strength = Strength + 5
room2 = True
if answer == "Spare" or answer == "spare" or answer == "spare him" or answer == "Spare Him":
Justice = Justice + 5
Honor = Honor + 5
GoblinK_Dead = True
Goblin_Village = False
Strength = Strength + 5
room2 = True
#------------------------------------------------------------------------------------------------------------------------------------------------
if x_axis == 1 and y_axis == 0 and z_axis == 0 and Jennifer_Dead == True and Goblin_Village == True and GoblinK_Dead == True and GoblinP_Dead == False and w_option == 1 and f_option == 1 and "Bow" in Weapon:
answer = input("You raise your bow and pull back a flaming arrow. The fire, however, is a bright red. You shoot it at the goblin with the knife. You yells in pain as the arrow imbeds itself in his arm. You then put an arrow in his forhead. You pull back another arrow as you aim it at the other goblin. His face is full of fear and surprise. A small voice urges you to kill him, while you remember that he tried to defend the girl. Will you kill or spare. ")
if answer == "Kill" or answer == "kill" or answer == "Kill Him" or answer == "kill him":
Bloodlust = Bloodlust + 5
Infamy = Infamy + 5
GoblinK_Dead = True
GoblinP_Dead = True
Jennifer_Dead = True
Goblin_Village = False
Strength = Strength + 5
room2 = True
if answer == "Spare" or answer == "spare" or answer == "spare him" or answer == "Spare Him":
Justice = Justice + 5
Honor = Honor + 5
GoblinK_Dead = True
Goblin_Village = False
Strength = Strength + 5
room2 = True
#------------------------------------------------------------------------------------------------------------------------------------------------
if x_axis == 1 and y_axis == 0 and z_axis == 0 and Jennifer_Dead == True and Goblin_Village == True and GoblinK_Dead == True and GoblinP_Dead == False and w_option == 1 and f_option == 1 and "Knife" in Weapon:
answer = input("You throw a knife at the knife wielding goblin. The knife bursts into flames as it rolls through the air. As the goblin yells in pain, you dash at him. He tries to defend himself, but your flaming knives swiftly cut his arm off. You then proceed to finish him. You look over to the stunned goblin, he looks afraid. ")
if answer == "Kill" or answer == "kill" or answer == "Kill Him" or answer == "kill him":
Bloodlust = Bloodlust + 5
Infamy = Infamy + 5
GoblinK_Dead = True
GoblinP_Dead = True
Jennifer_Dead = True
Goblin_Village = False
Strength = Strength + 5
room2 = True
if answer == "Spare" or answer == "spare" or answer == "spare him" or answer == "Spare Him":
Justice = Justice + 5
Honor = Honor + 5
GoblinK_Dead = True
Goblin_Village = False
Strength = Strength + 5
room2 = True
#----------------------------------------------------------------------------------------------------------------------------------------------
if x_axis == 1 and y_axis == 0 and z_axis == 0 and Jennifer_Dead == True and Goblin_Village == False and GoblinK_Dead == True and GoblinP_Dead == True and w_option == 1 and f_option == 1 and "Knife" in Weapon:
print("You swiftly dash toward him, your fire now a blood red. You jab your blade deep into his stomach. He doubles over in pain. You then proceed to brutally end the poor creature. You smile cruelly when you finish your \"Masterpiece\". You then smile as you move toward the village.")
x_axis = x_axis + 1
w_option = 0
f_option = 0
continue
#---------------------------------------------------------------------------------------------------------------------------------------------
if x_axis == 1 and y_axis == 0 and z_axis == 0 and Jennifer_Dead == True and Goblin_Village == False and GoblinK_Dead == True and GoblinP_Dead == False and w_option == 1 and f_option == 1 and "Knife" in Weapon and room2 == True:
print("You gently lower your knives and walk toward the girl's corpse. You gently pick her up body. A small nagging tells you that you could've saved her. You could have killed the goblins before she died. Why did you just stand there?")
Jennifer_Body = True
Weapon.append("Goblin Knife")
w_option = 0
f_option = 0
x_axis = x_axis + 1
continue
#----------------------------------------------------------------------------------------------------------------------------------------------
if x_axis == 1 and y_axis == 0 and z_axis == 0 and Jennifer_Dead == True and Goblin_Village == False and GoblinK_Dead == True and GoblinP_Dead == True and w_option == 1 and f_option == 1 and "Bow" in Weapon:
print("You drop your bow and grip the blazing arrow. You lunge at him impaling his arm like it was a pillow. He screams in pain as you continue your assault. As you finish your bloody work. You smile at the bright red liquid that stains the ground. You pick up your bow and walk to the village. Smiling and humming a happy tune.")
x_axis = x_axis + 1
w_option = 0
f_option = 0
continue
#---------------------------------------------------------------------------------------------------------------------------------------------
if x_axis == 1 and y_axis == 0 and z_axis == 0 and Jennifer_Dead == True and Goblin_Village == False and GoblinK_Dead == True and GoblinP_Dead == False and w_option == 1 and f_option == 1 and "Bow" in Weapon and room2 == True:
print("You put down the bow and put the arrow back in the quiver. You walk toward the girl's lifeless body. You untie her and pick her up. You fill guilty looking at her pale face. You could have saved her. But, you chose to wait. Why?")
Jennifer_Body = True
Weapon.append("Goblin Knife")
w_option = 0
f_option = 0
x_axis = x_axis + 1
continue
#----------------------------------------------------------------------------------------------------------------------------------------------
if x_axis == 1 and y_axis == 0 and z_axis == 0 and Jennifer_Dead == True and Goblin_Village == False and GoblinK_Dead == True and GoblinP_Dead == True and w_option == 1 and f_option == 1 and "Sword" in Weapon:
print("You dash at the frightened goblin. Your sword cuts off his arm like it was air. You let loose a laugh as you rip apart his body. The scene is hiddeous. You smile cruelly at your bloody masterpiece. You start toward the village, your fire turning a blood red with a hint of black.")
x_axis = x_axis + 1
w_option = 0
f_option = 0
continue
#---------------------------------------------------------------------------------------------------------------------------------------------
if x_axis == 1 and y_axis == 0 and z_axis == 0 and Jennifer_Dead == True and Goblin_Village == False and GoblinK_Dead == True and GoblinP_Dead == False and w_option == 1 and f_option == 1 and "Sword" in Weapon and room2 == True:
print("You walk away from the frightened goblin. You take the knife from the dead goblin and pick up the girl's corpse. You look back and find the goblin gone. You frown at the girl's corpse and remember that you chose to wait instead of take action. Guilt fills your head as you start toward the village. Why didn't you take action.")
Jennifer_Body = True
Weapon.append("Goblin Knife")
w_option = 0
f_option = 0
x_axis = x_axis + 1
continue
#------------------------------------------------------------------------------------------------------------------------------------------------
if x_axis == 1 and y_axis == 0 and z_axis == 0 and Jennifer_Dead == False and Goblin_Village == True and GoblinK_Dead == False and GoblinP_Dead == False and f_option == 1 and "Sword" in Weapon and str_option == 0 and slh_option == 0 and dis_option == 0:
answer = input("How will you attack? Choices: Charge, Stealth, or Distract ")
if answer == "Charge" or answer == "charge":
str_option = str_option + 1
if answer == "Stealth" or answer == "stealth":
slh_option = slh_option + 1
if answer == "Distract" or answer == "distract":
dis_option = dis_option + 1
#------------------------------------------------------------------------------------------------------------------------------------------------
if x_axis == 1 and y_axis == 0 and z_axis == 0 and Jennifer_Dead == False and Goblin_Village == True and GoblinK_Dead == False and GoblinP_Dead == False and f_option == 1 and str_option == 1 and "Sword" in Weapon:
answer = input("You jump out of the bushes at full charge. Your sword is raised behind you as you prepare to swing. The sword ignites in a fiery arua only matched by your intent to kill. The knife wielding goblin tries to block your sword, but your sword cuts through it with no resistance. The goblin falls to the ground as you raise your sword to kill him. His face fills with fear as his friend runs toward him. Choice: Kill, Maim, or Spare ")
if answer == "Kill" or answer == "kill":
Strength = Strength + 5
Infamy = Infamy + 1
Bloodlust = Bloodlust + 5
GoblinK_Dead = True
k_option = k_option + 1
if answer == "Maim" or answer == "maim":
Strength = Strength + 5
m_option = m_option + 1
Justice = Justice + 5
GoblinK_Maim = True
if answer == "Spare" or answer == "spare":
Strength = Strength + 5
Honor = Honor + 1
s_option = s_option + 1
#------------------------------------------------------------------------------------------------------------------------------------------------
if x_axis == 1 and y_axis == 0 and z_axis == 0 and Jennifer_Dead == False and Goblin_Village == True and GoblinK_Dead == False and GoblinP_Dead == False and f_option == 1 and slh_option == 1 and "Sword" in Weapon and Stealth < 5:
answer = input("You quietly crawl around the goblins. But, you weren't quiet enough. The knife wielding goblin cautiously creeps toward you hiding place. Will you kill him or injure him? ")
if answer == "kill" or answer == "Kill" or answer == "kill him" or answer == "Kill Him":
GoblinK_Dead = True
Infamy = Infamy + 1
Stealth = Stealth + 5
Bloodlust = Bloodlust + 5
k_option = k_option + 1
if answer == "injure" or answer == "Injure" or answer == "Injure Him" or answer == "injure him":
GoblinF_Maim = True
Honor = Honor + 1
Justice = Justice + 5
Stealth = Stealth + 5
m_option = m_option + 1
#------------------------------------------------------------------------------------------------------------------------------------------------
if x_axis == 1 and y_axis == 0 and z_axis == 0 and Jennifer_Dead == False and Goblin_Village == True and GoblinK_Dead == False and GoblinP_Dead == False and f_option == 1 and dis_option == 1 and "Sword" in Weapon and Finese < 5:
answer = input("You quietly pick up a rock from near by and throw it into a bush. But, your throw was too clumsy. The goblins quickly identify you. The one with a knife runs toward you. How will you defend yourself. Will you kill or injure him?" )
if answer == "kill" or answer == "Kill" or answer == "kill him" or answer == "Kill Him":
GoblinK_Dead = True
Infamy = Infamy + 1
Bloodlust = Bloodlust + 5
Finese = Finese + 5
k_option = k_option + 1
if answer == "injure" or answer == "Injure" or answer == "Injure Him" or answer == "injure him":
GoblinK_Maim = True
Honor = Honor + 1
Justice = Justice + 5
Finese = Finese + 5
m_option = m_option + 1
#------------------------------------------------------------------------------------------------------------------------------------------------
if x_axis == 1 and y_axis == 0 and z_axis == 0 and Jennifer_Dead == False and Goblin_Village == True and GoblinK_Dead == False and GoblinP_Dead == False and f_option == 1 and "Bow" in Weapon and str_option == 0 and slh_option == 0 and dis_option == 0:
answer = input("How will you attack? Choices: Charge, Stealth, or Distract ")
if answer == "Charge" or answer == "charge":
str_option = str_option + 1
if answer == "Stealth" or answer == "stealth":
slh_option = slh_option + 1
if answer == "Distract" or answer == "distract":
dis_option = dis_option + 1
#------------------------------------------------------------------------------------------------------------------------------------------------
if x_axis == 1 and y_axis == 0 and z_axis == 0 and Jennifer_Dead == False and Goblin_Village == True and GoblinK_Dead == False and GoblinP_Dead == False and f_option == 1 and dis_option == 1 and "Bow" in Weapon:
answer = input("You gently pull an arrow back and launch it into a bush. The goblins stare transfixed at where your arrow went. The goblin with the knife slowly walks forward to investigate. Immediantely you jump out, pulling an arrow back. The arrow disables the knife-wielding goblin. He stands there stunned and in pain. Will you kill, maim, your spare. ")
if answer == "Kill" or answer == "kill":
Finese = Finese + 5
Infamy = Infamy + 1
Bloodlust = Bloodlust + 5
k_option = k_option + 1
GoblinK_Dead = True
if answer == "Maim" or answer == "maim":
Justice = Justice + 5
Finese = Finese + 5
m_option = m_option + 1
GoblinK_Maim = True
if answer == "Spare" or answer == "spare":
str_option = str_option + 1
Finese = Finese + 5
Honor = Honor + 1
s_option = s_option + 1
#------------------------------------------------------------------------------------------------------------------------------------------------
if x_axis == 1 and y_axis == 0 and z_axis == 0 and Jennifer_Dead == False and Goblin_Village == True and GoblinK_Dead == False and GoblinP_Dead == False and f_option == 1 and slh_option == 1 and "Bow" in Weapon and Stealth < 5:
answer = input("You quietly crawl around the goblins. But, you weren't quiet enough. The knife wielding goblin cautiously creeps toward you hiding place. Will you kill him or injure him? ")
if answer == "kill" or answer == "Kill" or answer == "kill him" or answer == "Kill Him":
GoblinK_Dead = True
Infamy = Infamy + 1
Bloodlust = Bloodlust + 5
Stealth = Stealth + 1
k_option = k_option + 1
if answer == "injure" or answer == "Injure" or answer == "Injure Him" or answer == "injure him":
GoblinF_Maim = True
Honor = Honor + 1
Justice = Justice + 5
Stealth = Stealth + 1
m_option = m_option + 1
#------------------------------------------------------------------------------------------------------------------------------------------------
if x_axis == 1 and y_axis == 0 and z_axis == 0 and Jennifer_Dead == False and Goblin_Village == True and GoblinK_Dead == False and GoblinP_Dead == False and f_option == 1 and str_option == 1 and "Bow" in Weapon and Strength < 5:
answer = input("You jump out of the bushes, shooting a fiery arrow at the knife wielding goblin. The arrow pierces his arm. But he endures the pain and runs at you full sprint. You can't get out in time. Your only choice is to either kill him or maim him. " )
if answer == "kill" or answer == "Kill" or answer == "kill him" or answer == "Kill Him":
GoblinK_Dead = True
Infamy = Infamy + 1
Bloodlust = Bloodlust + 5
Strength = Strength + 5
k_option = k_option + 1
if answer == "injure" or answer == "Injure" or answer == "Injure Him" or answer == "injure him":
GoblinF_Maim = True
Honor = Honor + 1
Justice = Justice + 5
Strength = Strength + 5
m_option = m_option + 1
#------------------------------------------------------------------------------------------------------------------------------------------------
if x_axis == 1 and y_axis == 0 and z_axis == 0 and Jennifer_Dead == False and Goblin_Village == True and GoblinK_Dead == False and GoblinP_Dead == False and f_option == 1 and "Knives" in Weapon and str_option == 0 and slh_option == 0 and dis_option == 0:
answer = input("How will you attack? Choices: Charge, Stealth, or Distract ")
if answer == "Charge" or answer == "charge":
str_option = str_option + 1
if answer == "Stealth" or answer == "stealth":
slh_option = slh_option + 1
if answer == "Distract" or answer == "distract":
dis_option = dis_option + 1
#------------------------------------------------------------------------------------------------------------------------------------------------
if x_axis == 1 and y_axis == 0 and z_axis == 0 and Jennifer_Dead == False and Goblin_Village == True and GoblinK_Dead == False and GoblinP_Dead == False and f_option == 1 and slh_option == 1 and "Knives" in Weapon:
answer = input("You quietly go around the goblins until you are right beside them. You jump out of the bushes and grab the goblin with the knife. You toss him to the ground. Your knives burst into flames as you prepare to strike. His friend is frozen from fear. A small voice in your head demands his blood. Will you kill, spare, or injure him. ")
if answer == "Kill" or answer == "kill":
Stealth = Stealth + 5
Infamy = Infamy + 1
Bloodlust = Bloodlust + 5
k_option = k_option + 1
GoblinK_Dead = True
if answer == "Maim" or answer == "maim":
Justice = Justice + 5
Stealth = Stealth + 5
m_option = m_option + 1
GoblinK_Maim = True
if answer == "Spare" or answer == "spare":
str_option = str_option + 1
Stealth = Stealth + 5
Honor = Honor + 1
s_option = s_option + 1
#------------------------------------------------------------------------------------------------------------------------------------------------
if x_axis == 1 and y_axis == 0 and z_axis == 0 and Jennifer_Dead == False and Goblin_Village == True and GoblinK_Dead == False and GoblinP_Dead == False and f_option == 1 and dis_option == 1 and "Knives" in Weapon and Finese < 5:
answer = input("You gently grip a couple rocks and throw them at a few bushes across from the field, but your throw is a off, and hits a goblin. The goblin immediantely identifies you and starts charging. He's too fast you must either kill or maim him. ")
if answer == "kill" or answer == "Kill" or answer == "kill him" or answer == "Kill Him":
GoblinK_Dead = True
Infamy = Infamy + 1
Finese = Finese + 1
k_option = k_option + 1
Bloodlust = Bloodlust + 5
if answer == "injure" or answer == "Injure" or answer == "Injure Him" or answer == "injure him":
GoblinF_Maim = True
Honor = Honor + 1
Finese = Finese + 1
m_option = m_option + 1
Justice = Justice + 5
#------------------------------------------------------------------------------------------------------------------------------------------------
if x_axis == 1 and y_axis == 0 and z_axis == 0 and Jennifer_Dead == False and Goblin_Village == True and GoblinK_Dead == False and GoblinP_Dead == False and f_option == 1 and str_option == 1 and "Knives" in Weapon and Strength < 5:
answer = input("You charge out knives bursting into flames. As you dash forward, the knives leave a trail of smoke behind. The goblin with the knife turns around just in time to dodge your fatal swing. He prepares to lunge at you. You can't dodge, you must kill or disable him. " )
if answer == "kill" or answer == "Kill" or answer == "kill him" or answer == "Kill Him":
GoblinK_Dead = True
Infamy = Infamy + 1
Strength = Strength + 5
k_option = k_option + 1
Bloodlust = Bloodlust + 5
if answer == "injure" or answer == "Injure" or answer == "Injure Him" or answer == "injure him":
GoblinF_Maim = True
Honor = Honor + 1
Justice = Justice + 5
m_option = m_option + 1
Strength = Strength + 5
#------------------------------------------------------------------------------------------------------------------------------------------------
if x_axis == 1 and y_axis == 0 and z_axis == 0 and Jennifer_Dead == False and Goblin_Village == True and GoblinK_Dead == True and GoblinP_Dead == False and f_option == 1 and str_option == 1 and "Sword" in Weapon and k_option == 1:
answer = input("You bring the sword down upon the poor goblin. As the sword rips through his flesh, he cries in pain, but his cry is cut short. His friend stands stunned. A simple swing of his sword, and he will fall. A small voice at the back of your head urges you to kill him. Will you kill him, injure him, or spare him? ")
if answer == "Kill Him" or answer == "kill Him" or answer == "kill" or answer == "Kill":
GoblinP_Dead = True
k_option = k_option + 1
Infamy = Infamy + 1
Bloodlust = Bloodlust + 1
if answer == "injure" or answer == "Answer" or answer == "Injure Him" or answer == "injure him":
GoblinP_Maim = True
m_option = m_option + 1
if answer == "Spare Him" or answer == "spare him" or answer == "spare" or answer == "Spare":
s_option = s_option + 1
Justice = Justice + 1
Honor = Honor + 1
#------------------------------------------------------------------------------------------------------------------------------------------------
if x_axis == 1 and y_axis == 0 and z_axis == 0 and Jennifer_Dead == False and Goblin_Village == True and GoblinK_Maim == True and GoblinP_Dead == False and f_option == 1 and str_option == 1 and "Sword" in Weapon and m_option == 1:
answer = input("You swing your sword, aiming for the shoulder. The sword bites into the arm, severing it from the body. The goblin screams in pain. The other goblin stands stunned. You look at him, deciding what to do. Will you kill him, injure, or spare him? ")
if answer == "Kill Him" or answer == "kill Him" or answer == "kill" or answer == "Kill":
GoblinP_Dead = True
k_option = k_option + 1
Infamy = Infamy + 1
Bloodlust = Bloodlust + 1
if answer == "injure" or answer == "Answer" or answer == "Injure Him" or answer == "injure him":
GoblinP_Maim = True
m_option = m_option + 1
if answer == "Spare Him" or answer == "spare him" or answer == "spare" or answer == "Spare":
s_option = s_option + 1
Justice = Justice + 1
Honor = Honor + 1
#------------------------------------------------------------------------------------------------------------------------------------------------
if x_axis == 1 and y_axis == 0 and z_axis == 0 and Jennifer_Dead == False and Goblin_Village == True and GoblinK_Maim == True and GoblinP_Dead == False and f_option == 1 and str_option == 1 and "Sword" in Weapon and m_option == 1:
print("You lower your sword, putting it in it's sheathe. You then knock out the goblin with a quick blow to the head with your foot. The other goblin rushes to his comrad to check if he's hurt. The goblin picks up his friend and takes him away. You untie the girl. She immediantely hugs you and starts crying. You pick her up and start toward the village. ")
Goblin_Village = False
f_option = 0
str_option = 0
m_option = 0
x_axis = x_axis + 1
room2 = True
continue
#------------------------------------------------------------------------------------------------------------------------------------------------
if x_axis == 1 and y_axis == 0 and z_axis == 0 and Jennifer_Dead == False and Goblin_Village == True and GoblinK_Dead == True and GoblinP_Dead == False and f_option == 1 and dis_option == 1 and "Bow" in Weapon and k_option == 1:
answer = input("You release the flaming arrow, killing the defenseless goblin. The arrow pierces the goblin's skull, instantly killing the goblin. The other goblin runs to his fallen comrad. You instinctively pull back an arrow to kill the defenseless goblin ")
if answer == "Kill Him" or answer == "kill Him" or answer == "kill" or answer == "Kill":
GoblinP_Dead = True
k_option = k_option + 1
Bloodlust = Bloodlust + 1
Infamy = Infamy + 1
if answer == "injure" or answer == "Answer" or answer == "Injure Him" or answer == "injure him":
GoblinP_Maim = True
m_option = m_option + 1
if answer == "Spare Him" or answer == "spare him" or answer == "spare" or answer == "Spare":
s_option = s_option + 1
Honor = Honor + 1
Justice = Justice + 1
#------------------------------------------------------------------------------------------------------------------------------------------------
if x_axis == 1 and y_axis == 0 and z_axis == 0 and Jennifer_Dead == False and Goblin_Village == True and GoblinK_Maim == True and GoblinP_Dead == False and f_option == 1 and dis_option == 1 and "Bow" in Weapon and m_option == 1:
answer = input("You let loose a single flaming arrow. The arrow pierces the goblin's shoulder, while the flames burn apart the flesh and tendons. The severed arm falls to the ground as the goblin collapses and screams in pain. The other goblin rushes to his friend's aid. \"Kill him\" says a small voice. Will you kill, injure, or spare him. ")
if answer == "Kill Him" or answer == "kill Him" or answer == "kill" or answer == "Kill":
GoblinP_Dead = True
k_option = k_option + 1
Bloodlust = Bloodlust + 1
Infamy = Infamy + 1
if answer == "injure" or answer == "Answer" or answer == "Injure Him" or answer == "injure him":
GoblinP_Maim = True
m_option = m_option + 1
if answer == "Spare Him" or answer == "spare him" or answer == "spare" or answer == "Spare":
s_option = s_option + 1
Honor = Honor + 1
Justice = Justice + 1
#--------------------------------------------------------------------------------------------------------------------------------------------
if x_axis == 1 and y_axis == 0 and z_axis == 0 and Jennifer_Dead == False and Goblin_Village == True and GoblinK_Dead == False and GoblinP_Dead == False and f_option == 1 and dis_option == 1 and "Bow" in Weapon and s_option == 1:
print("You slowly tread toward their captive. Your bow is trained on the goblins. Neither is brave enough to try and stop you. Eventually, they wonder off. You untie the girl and immediantely she jumps into your arms and starts to cry. You gather pick up the weeping girl and start towards the nearby village.")
Goblin_Village = False
f_option = 0
dis_option = 0
s_option = 0
x_axis = x_axis + 1
room2 = True
continue
#------------------------------------------------------------------------------------------------------------------------------------------------
if x_axis == 1 and y_axis == 0 and z_axis == 0 and Jennifer_Dead == False and Goblin_Village == True and GoblinK_Dead == True and GoblinP_Dead == False and f_option == 1 and slh_option == 1 and "Knives" in Weapon and k_option == 1:
answer = input("You bring your knife down on his throat. Blood coats your knife as you yank it free. The other goblin begins to back up. A small part of you yearns for his blood. Will you kill, injure, or spare him? ")
if answer == "Kill Him" or answer == "kill Him" or answer == "kill" or answer == "Kill":
GoblinP_Dead = True
k_option = k_option + 1
Bloodlust = Bloodlust + 1
Infamy = Infamy + 1
if answer == "injure" or answer == "Answer" or answer == "Injure Him" or answer == "injure him":
GoblinP_Maim = True
m_option = m_option + 1
if answer == "Spare Him" or answer == "spare him" or answer == "spare" or answer == "Spare":
s_option = s_option + 1
Honor = Honor + 1
Justice = Justice + 1
#------------------------------------------------------------------------------------------------------------------------------------------------
if x_axis == 1 and y_axis == 0 and z_axis == 0 and Jennifer_Dead == False and Goblin_Village == True and GoblinK_Maim == True and GoblinP_Dead == False and f_option == 1 and slh_option == 1 and "Knives" in Weapon and m_option == 1:
answer = input("You bring your knife down in an arc, aimed for his right shoulder. The blazing knife cuts through his flesh and bone with ease. He screams in agony as he grips his bloody shoulder. You turn toward his friend. A small urge appears, it urges for his blood. Will you kill, injure, or spare him? ")
if answer == "Kill Him" or answer == "kill Him" or answer == "kill" or answer == "Kill":
GoblinP_Dead = True
k_option = k_option + 1
Bloodlust = Bloodlust + 1
Infamy = Infamy + 1
if answer == "injure" or answer == "Answer" or answer == "Injure Him" or answer == "injure him":
GoblinP_Maim = True
m_option = m_option + 1
if answer == "Spare Him" or answer == "spare him" or answer == "spare" or answer == "Spare":
s_option = s_option + 1
Honor = Honor + 1
Justice = Justice + 1
#--------------------------------------------------------------------------------------------------------------------------------------------
if x_axis == 1 and y_axis == 0 and z_axis == 0 and Jennifer_Dead == False and Goblin_Village == True and GoblinK_Dead == False and GoblinP_Dead == False and f_option == 1 and slh_option == 1 and "Knives" in Weapon and s_option == 1:
print("You flip your knife in your hand as you bring it down. The brunt hilt knocks out the violent goblin. You keep an eye on the other goblin as you approach the girl. The other goblin picks up his unconcious friend and hurries away. As soon as you untie the girl, she jumps in your arms and starts to cry. You pick up the sobbing girl and starts toward the village to the east.")
Goblin_Village = False
f_option = 0
dis_option = 0
s_option = 0
x_axis = x_axis + 1
room2 = True
continue
#------------------------------------------------------------------------------------------------------------------------------------------------
if x_axis == 1 and y_axis == 0 and z_axis == 0 and Jennifer_Dead == False and Goblin_Village == True and GoblinK_Dead == False and GoblinP_Dead == False and f_option == 1 and slh_option == 1 and "Sword" in Weapon and k_option == 1:
answer = input("You jump out of the bushes swinging your sword in a horizontal arc. The sword bursts into flames, the flames a bright red. The burning sword cuts the goblin in half right above the stomach. His companion looks on in horror as his friend lie there. Then looks at you and freezes in fear. A tiny voice at the back of your head urges you to kill him. Will you kill, injure, or spare him? ")
if answer == "Kill" or answer == "kill" or answer == "Kill Him" or answer == "kill him":
Goblin_Village = False
GoblinP_Dead = True
k_option = k_option + 1
Bloodlust = Bloodlust + 1
if answer == "Injure" or answer == "injure" or answer == "Injure Him" or answer == "injure him":
Goblin_Village = False
GoblinP_Maim = True
m_option = m_option + 1
if answer == "Spare" or answer == "spare" or answer == "Spare Him" or answer == "spare him":
Goblin_Village = False
s_option = s_option + 1
Honor = Honor + 1
Justice = Justice + 1
#------------------------------------------------------------------------------------------------------------------------------------------------
|
import json
import numpy as np
import pandas as pd
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
from sklearn.metrics import mean_absolute_error
from statsmodels.tsa.stattools import adfuller
from scipy import stats
from scipy.stats import normaltest
import statsmodels.api as sm
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
from statsmodels.tsa.arima_model import ARIMA
from statsmodels.tsa.statespace.sarimax import SARIMAX
from flask import Flask, render_template, request
app = Flask(__name__)
import warnings
warnings.filterwarnings("ignore")
@app.route('/', methods=['GET', 'POST'])
def index():
flag = False
if request.method == 'GET':
flag = True
return render_template('index.html',flag=flag)
elif request.method == 'POST':
flag=False
file_name = request.form['ticker']
data = pd.read_csv(file_name)
# data = pd.read_csv('abc.csv')
data['Close'] = data['Close'] * 1.0
close_1 = data['Close']
c = '#386B7F'
train_data, test_data = data[0:int(len(data)*0.7)], data[int(len(data)*0.7):]
training_data = train_data['Close'].values
test_data = test_data['Close'].values
history_of_train = [x for x in training_data]
predictions = []
test_records = len(test_data)
for times in range(test_records):
sarima = SARIMAX(history_of_train, order=(4,4,0))
sarima_fit = sarima.fit(disp=0)
output = sarima_fit.forecast()
pred = output[0]
predictions.append(pred)
test_value = test_data[times]
history_of_train.append(test_value)
sarima_test_MAE=mean_absolute_error(test_data, predictions)
mae=round(sarima_test_MAE,3)
Sarima = SARIMAX(data['Close'],order=(4,1,0),seasonal_order=(1,1,1,12),enforce_invertibility=False, enforce_stationarity=False)
Sarima = Sarima.fit()
predictions = Sarima.predict(start=len(data), end= len(data)+42, dynamic= True)
p_dict = {}
i = 0
for k, v in predictions.items():
i += 1
p_dict[i] = v
return render_template('index.html', predictions=p_dict, mae=mae,flag=flag,name=file_name.split(".csv"))
if __name__ == '__main__':
app.run()
|
from csv import reader
from requests import get, exceptions
from datetime import datetime
import pandas as pd
# Todo import
from .constantes import *
from .excepciones import *
class Codavi:
"""
Codavi ofrece datos y estadísticas sobre el COVID-19 en toda la Argentina.
"""
def __fecha_actual(self, formato: str = '%Y-%m-%d') -> str:
"""
Obtiene la fecha actual del sistema.
:retorna: Fecha actual, por defecto en formato 'año-mes-día'.
:tiporetorno: str.
"""
return datetime.now().strftime(formato)
def __request(self, url: str, contenido: bool = False, decodificar: str = 'utf-8') -> str:
"""
Hace una petición HTTP tipo GET hacia determinado URL.
:param url: URL para hacer la petición.
:param contenido: True para obtener el contenido crudo, false para decodificarlo.
:param decodificar: Codec a decodificar, por predeterminado 'utf-8'.
:return: str decodificado.
:treturn: str.
"""
try:
res = get(url)
res.raise_for_status()
except exceptions.HTTPError as err:
raise DatosNoActualizados()
return res if contenido else res.content.decode(decodificar)
def fallecidos(self, sexo: str = 'todos', fecha: str = None):
"""
Cantidad de fallecidos por COVID-19 en Argentina de manera acumulada.
:param sexo: Filtro por sexo, por defecto 'todos'.
:param fecha: Fecha a obtener en formato 'año-mes-día'.
:return: Fecha y cantidad.
:treturn: ['fecha', 'cantidad']
"""
if not sexo.lower() in FILTROS['sexo'].keys():
raise SexoDesconocido()
res = self.__request(URLS['ar']['fallecidos'])
csv = reader(res.splitlines())
lista = list(csv)
if not fecha:
if not sexo:
return lista[-1]
datos = lista[-1]
return [datos[0], datos[FILTROS['sexo'][sexo.lower()]]]
if not fecha in res:
raise FechaNoEncontrada()
else:
for linea in lista:
if fecha in linea:
return [fecha, linea[FILTROS['sexo'][sexo.lower()]]]
def confirmados(self, sexo: str = 'todos', fecha: str = None) -> ['fecha', 'cantidad']:
"""
Cantidad de casos confirmados en Argentina de manera acumulada.
:param sexo: Filtro por sexo, por defecto 'todos'.
:param fecha: Fecha a obtener en formato 'año-mes-día'.
:return: Fecha y cantidad.
:treturn: ['fecha', 'cantidad']
"""
if not sexo.lower() in FILTROS['sexo'].keys():
raise SexoDesconocido()
res = self.__request(URLS['ar']['confirmados'])
csv = reader(res.splitlines())
lista = list(csv)
if not fecha:
if not sexo:
return lista[-1]
datos = lista[-1]
return [datos[0], datos[FILTROS['sexo'][sexo.lower()]]]
if not fecha in res:
raise FechaNoEncontrada()
else:
for linea in lista:
if fecha in linea:
return [fecha, linea[FILTROS['sexo'][sexo.lower()]]]
def llamadas_107(self, acumulado: bool = False, fecha: str = None) -> ['fecha', 'cantidad']:
"""
Cantidad de llamadas 107 hechas de COVID-19.
:param acumulado: True para obtener el valor acumulado, False para obtener valor diario.
:param fecha: Fecha especifica a obtener en formato '12MAY2021'.
:return: Fecha y cantidad.
:treturn: ['fecha', 'cantidad']
"""
res = self.__request(URLS['ar']['llamadas_107'])
csv = reader(res.splitlines())
lista = list(csv)
lista.pop(0)
if not fecha:
fecha = lista[-1][0][:-9]
if acumulado:
sumatoria = 0
for linea in lista:
sumatoria += int(linea[1])
cantidad = sumatoria
else:
cantidad = lista[-1][1]
return [fecha, cantidad]
if not fecha in res:
raise FechaNoEncontrada()
if acumulado:
sumatoria = 0
for linea in lista:
sumatoria += int(linea[1])
if fecha in linea[0]:
break
cantidad = sumatoria
else:
for linea in lista:
if fecha in linea[0]:
cantidad = linea[1]
break
return [fecha, cantidad]
def vacuna(self, nombre: str = None, dosis: str = 'total') -> ['fecha', 'nombre', 'dosis', 'cantidad']:
"""
Cantidad de dosis aplicadas por vacuna nacionalmente.
:param nombre: Nombre de la vacuna a obtener.
:param dosis: Tipo de dosis a obtener, por defecto 'total'.
:return: Fecha, nombre de la vacuna, tipo de dosis y cantidad aplicada.
:treturn: ['fecha', 'nombre', 'dosis', 'cantidad']
"""
if not nombre:
raise VacunaDesconocida()
datos = pd.read_csv(URLS['ar']['vacunas'])
vacuna_filtrada = datos.query(f'vacuna_nombre.str.lower().str.contains("{nombre.lower()}")')
hoy = self.__fecha_actual()
if vacuna_filtrada.empty:
raise VacunaDesconocida()
if dosis == 'total':
primera, segunda = self.vacuna(nombre=nombre, dosis='primera')[3], self.vacuna(nombre=nombre, dosis='segunda')[3]
unica, refuerzo = self.vacuna(nombre=nombre, dosis='unica')[3], self.vacuna(nombre=nombre, dosis='refuerzo')[3]
adicional = self.vacuna(nombre=nombre, dosis='adicional')[3]
return [hoy, nombre, 'total', primera + segunda + unica + refuerzo + adicional]
if not dosis in FILTROS['dosis']:
raise DosisDesconocida()
dosis = FILTROS['dosis'][dosis]
nombre = vacuna_filtrada['vacuna_nombre'].values[0]
total = vacuna_filtrada[dosis].sum()
return [hoy, nombre, dosis, total]
def dosis(self, numero: str = 'total', acumulado: bool = True, fecha: str = None) -> ['fecha', 'cantidad']:
"""
Cantidad de dosis aplicadas nacionalmente.
:param numero: Tipo de dosis a obtener, por predeterminado 'total'.
:param acumulado: True para valores acumulados, False para valores diarios.
:param fecha: Fecha especifica a obtener en formato 'año-mes-día'.
:return: Fecha y cantidad de dosis aplicadas acumulada/diaria.
:treturn: ['fecha', 'cantidad']
"""
url = URLS['ar']['dosis']['acumulado'][numero] if acumulado else URLS['ar']['dosis']['diario'][numero]
res = self.__request(url)
csv = reader(res.splitlines())
lista = list(csv)
if not fecha:
return lista[-1]
if not fecha in res:
raise FechaNoEncontrada()
else:
for linea in lista:
if fecha in linea:
return linea
|
from django.conf.urls import url, include
from django.contrib import admin
from django.contrib.auth import views as auth_views
from homes.views import HomePageView, CustomRegistrationView
from homes.forms import CustomAuthenticationForm
admin.site.site_header = 'Django Property'
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^sales/', include('homes_for_sale.urls', namespace='sales')),
url(r'^lettings/', include('homes_to_let.urls', namespace='lettings')),
url(r'^user/', include('homes_user.urls', namespace='user')),
url(r'^agents/', include('homes_agent.urls', namespace='agents')),
url(r'^accounts/login/$', auth_views.login, {'authentication_form': CustomAuthenticationForm}, name='login'),
url(r'^accounts/register/$', CustomRegistrationView.as_view()),
url(r'^accounts/', include('registration.backends.hmac.urls')),
url(r'^json/', include('homes_json.urls', namespace='json')),
url(r'^$', HomePageView.as_view(), name='homepage')
]
|
print("hello world")
print("helo mans")
print("i guess its working")
|
from yt.testing import \
fake_random_ds, \
assert_equal
from yt.units.yt_array import \
uconcatenate
def _get_dobjs(c):
dobjs = [("sphere", ("center", (1.0, "unitary"))),
("sphere", ("center", (0.1, "unitary"))),
("ortho_ray", (0, (c[1], c[2]))),
("slice", (0, c[0])),
#("disk", ("center", [0.1, 0.3, 0.6],
# (0.2, 'unitary'), (0.1, 'unitary'))),
("cutting", ([0.1, 0.3, 0.6], 'center')),
("all_data", ()),
]
return dobjs
def test_chunking():
for nprocs in [1, 2, 4, 8]:
ds = fake_random_ds(64, nprocs = nprocs)
c = (ds.domain_right_edge + ds.domain_left_edge)/2.0
c += ds.arr(0.5/ds.domain_dimensions, "code_length")
for dobj in _get_dobjs(c):
obj = getattr(ds, dobj[0])(*dobj[1])
coords = {'f':{}, 'i':{}}
for t in ["io", "all", "spatial"]:
coords['i'][t] = []
coords['f'][t] = []
for chunk in obj.chunks(None, t):
coords['f'][t].append(chunk.fcoords[:,:])
coords['i'][t].append(chunk.icoords[:,:])
coords['f'][t] = uconcatenate(coords['f'][t])
coords['i'][t] = uconcatenate(coords['i'][t])
coords['f'][t].sort()
coords['i'][t].sort()
yield assert_equal, coords['f']['io'], coords['f']['all']
yield assert_equal, coords['f']['io'], coords['f']['spatial']
yield assert_equal, coords['i']['io'], coords['i']['all']
yield assert_equal, coords['i']['io'], coords['i']['spatial']
|
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import getpass
import subprocess
import platform
dname = platform.linux_distribution()[0].lower()
dver = platform.linux_distribution()[1]
if getpass.getuser() != "root":
print("You need to run this script as root!")
sys.exit(-1)
print("Your distro seems to be : " + dname + " " + dver)
if dname == 'ubuntu' or dname == 'debian':
print("Running installation script for Debian/Ubuntu servers, hang on!")
print("Installing pre-requisites via apt-get")
subprocess.check_call(('apt-get', 'install', 'apache2', 'git', 'liblua5.2-dev', 'lua-cjson', 'lua-sec', 'lua-socket', 'python3', 'python3-pip', 'subversion'))
print("Installing Python modules")
subprocess.check_call(('pip3', 'install', 'elasticsearch', 'formatflowed'))
print("Installing ElasticSearch")
subprocess.check_call(('apt-get', 'install', 'openjdk-7-jre-headless'))
try:
subprocess.check_call(("wget -qO - https://packages.elastic.co/GPG-KEY-elasticsearch | sudo apt-key add -"), shell=True)
subprocess.check_call(('echo "deb http://packages.elastic.co/elasticsearch/1.7/debian stable main" | sudo tee -a /etc/apt/sources.list.d/elasticsearch-1.7.list'), shell=True)
except:
print("Did we already add ES to the repo? hmm")
subprocess.check_call(('apt-get', 'update'))
subprocess.check_call(('apt-get', 'install', 'elasticsearch'))
print("Checking out a copy of Pony Mail from GitHub")
subprocess.check_call(('git', 'clone', 'https://github.com/Humbedooh/ponymail.git', '/var/www/ponymail'))
print("Starting ElasticSearch")
subprocess.check_call(('service', 'elasticsearch', 'start'))
print("Writing httpd configuration file /etc/apache2/sites-enabled/99-ponymail.conf")
with open("/etc/apache2/sites-enabled/99-ponymail.conf", "w") as f:
f.write("""
<VirtualHost *:80>
ServerName mylists.foo.tld
DocumentRoot /var/www/ponymail/site
AddHandler lua-script .lua
LuaScope thread
LuaCodeCache stat
AcceptPathInfo On
</VirtualHost>""")
if dname == 'ubuntu' and dver == '14.04':
print("Ubuntu 14.04 specific step; Compiling mod_lua")
subprocess.check_call(('apt-get', 'install', 'apache2-dev'))
subprocess.check_call(('svn', 'co', 'https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x/modules/lua/', '/tmp/mod_lua'))
subprocess.check_call(("cd /tmp/mod_lua && apxs2 -I/usr/include/lua5.2 -cia mod_lua.c lua_*.c -lm -llua5.2"), shell=True)
print("Enabling mod_lua")
subprocess.check_call(('a2enmod', 'lua'))
print("Starting httpd")
subprocess.check_call(('service', 'apache2', 'start'))
print("Done! Please run setup.py now to set up Pony Mail")
|
#!/usr/bin/env python
'''
given two groups of samples, assess their ability to be correctly classified using MS loci
'''
import argparse
import logging
import sys
import cyvcf2
import intervaltree
REMOVE_CHR=True
def populate_intervals(panels, vdx, vcf, name, group, pass_only, loci):
logging.info('processing %s...', vcf)
considered = filtered = 0
found = [0] * len(panels)
vcf_in = cyvcf2.VCF(vcf)
for variant in vcf_in:
# is pass
if pass_only and variant.FILTER is not None:
filtered += 1
continue
# is indel?
if len(variant.REF) != len(variant.ALT[0]):
considered += 1
# check all the panels
for pdx, panel in enumerate(panels):
if REMOVE_CHR and variant.CHROM.startswith('chr'):
chr = variant.CHROM[3:]
else:
chr = variant.CHROM
if chr in panel and len(panel[chr][variant.POS]) > 0:
found[pdx] += 1
for interval in panel[chr][variant.POS]:
logging.debug(interval)
interval[2][0].append(vdx)
loci[pdx].add((chr, interval))
logging.info('processing %s: considered %i filtered %i found %s', vcf, considered, filtered, ' '.join([str(x) for x in found]))
def main(vcfs, names, groups, panels, pass_only):
logging.info('starting with %i vcfs and %i groups: %i in group 0 and %i group 1...', len(vcfs), len(groups), len([x for x in groups if x == 0]), len([x for x in groups if x == 1]))
# build interval trees
panel_intervals = []
for bed in panels:
logging.info('processing %s...', bed)
intervals = {}
total = 0
for idx, line in enumerate(open(bed, 'r')):
fields = line.strip('\n').split('\t')
if len(fields) < 3:
logging.warn('skipped line %i in %s', idx + 1, bed)
continue
chr, start, finish = fields[:3]
if len(fields) > 3:
annot = fields[3]
else:
annot = ''
if REMOVE_CHR and chr.startswith('chr'):
chr = chr[3:]
if chr not in intervals:
intervals[chr] = intervaltree.IntervalTree()
if len(intervals[chr][int(start):int(finish)]) > 0:
pass #logging.debug('overlap at %s %s %s', chr, start, finish)
intervals[chr][int(start):int(finish)] = ([], annot) # list of matching samples
total += int(finish) - int(start)
if (idx + 1) % 100000 == 0:
logging.debug('%i lines...', idx + 1)
panel_intervals.append(intervals)
logging.info('processing %s: %i bases', bed, total)
# assign affected variants to panels
loci = []
for _ in panels:
loci.append(set())
if len(names) == 1:
names = [names[0]] * len(vcfs)
for vdx, (vcf, name, group) in enumerate(zip(vcfs, names, groups)):
populate_intervals(panel_intervals, vdx, vcf, name, group, pass_only, loci)
# accuracy of each locus
sys.stdout.write('Panel\tChr\tStart\tEnd\tAnnot\tTP\tTN\tFP\tFN\tSpecificity\tSensitivity\tAccuracy\n')
for pdx, panel in enumerate(panels): # each panel
for locus in loci[pdx]:
# how informative is this locus?
tp = tn = fp = fn = 0
for idx, group in enumerate(groups):
if idx in locus[1][2][0]: # found in interval
if group == 0: # no good
fp += 1
else: # good
tp += 1
else: # not found in interval
if group == 0: # good
tn += 1
else: # no good
fn += 1
sensitivity = tp / (tp + fn)
specificity = tn / (tn + fp)
accuracy = (tp + tn) / (tp + tn + fp + fn)
sys.stdout.write('{panel}\t{chr}\t{start}\t{end}\t{annot}\t{tp}\t{tn}\t{fp}\t{fn}\t{specificity:.2f}\t{sensitivity:.2f}\t{accuracy:.2f}\n'.format(
panel=panel,
chr=locus[0],
start=locus[1][0],
end=locus[1][1],
annot=locus[1][2][1],
tp=tp,
tn=tn,
fp=fp,
fn=fn,
sensitivity=sensitivity,
specificity=specificity,
accuracy=accuracy
))
logging.info('done')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Assess classifiability')
parser.add_argument('--vcfs', required=True, nargs='+', help='vcfs to analyse')
parser.add_argument('--names', required=True, nargs='+', help='sample names')
parser.add_argument('--groups', required=True, nargs='+', type=int, help='which group (0 or 1)')
parser.add_argument('--panels', required=True, nargs='+', help='panels (bed file format) to assess')
parser.add_argument('--filter_pass', action='store_true', help='only pass calls')
parser.add_argument('--verbose', action='store_true', help='more logging')
args = parser.parse_args()
if args.verbose:
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', level=logging.DEBUG)
else:
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', level=logging.INFO)
main(args.vcfs, args.names, args.groups, args.panels, args.filter_pass)
|
from typing import List, Union, Tuple
from darts.commands import AddScore, ScoreValue
class ScoreTextInput:
"""This will handle the logic behind the text entry for scores."""
def __init__(self):
self.scores: List[Union[str, Tuple[str, str]]] = ['']
def __str__(self):
return ' + '.join(f"{score[0]} * {score[1]}" if isinstance(score, tuple) else score for score in self.scores)
def reset(self):
"""Reset the ScoreTextInput in it's initial state."""
self.scores = ['']
def as_command(self) -> AddScore:
"""Convert the ScoreTextInput as an AddScore command."""
scores = []
for score in self.scores:
if isinstance(score, tuple):
factor, value = score
else:
value = score
factor = '1'
factor, value = int(factor), int(value)
scores.append(ScoreValue(value=value, factor=factor))
return AddScore(scores=scores, player=None)
def del_last(self):
"""Delete the last character input."""
if self.scores:
score = self.scores.pop(-1)
if score != '':
if isinstance(score, tuple):
factor, value = score
if value == '':
score = factor
else:
score = (factor, value[:-1])
elif isinstance(score, str):
score = score[:-1]
self.scores.append(score)
def mul_score(self):
"""Turn a simple (value) score as a (factor, value) score."""
if self.scores and isinstance(self.scores[-1], str):
score = self.scores.pop(-1)
score = (score, '')
self.scores.append(score)
def add_score(self):
"""Add a new score to the list."""
self.scores.append('')
def add_digit(self, digit: str):
"""Add a digit to the latest score."""
if len(digit) == 1 and digit.isnumeric():
score = self.scores.pop(-1)
if isinstance(score, tuple):
factor, value = score
elif isinstance(score, str):
factor = score
value = None
else:
raise NotImplementedError
if value is None:
factor = factor + digit
score = factor
else:
value = value + digit
score = (factor, value)
self.scores.append(score)
|
#!/usr/bin/env python3
import base64
import hashlib
import os
import sys
def main(*filenames: str):
for filename in filenames:
with open(filename, 'rb') as f:
h = hashlib.sha3_384(f.read())
new_name = base64.urlsafe_b64encode(h.digest()).decode('utf-8')
print(f"{filename} -> {new_name}.woff2")
os.rename(filename, f"{new_name}.woff2")
if __name__ == '__main__':
main(*sys.argv[1:])
|
#!/usr/bin/env python
from azure_storage.methods import copy_blob, create_parent_parser, delete_container, delete_file, delete_folder, \
extract_common_path, move_prep, setup_arguments
from argparse import ArgumentParser, RawTextHelpFormatter
import coloredlogs
import logging
import sys
import os
class AzureContainerMove(object):
def main(self):
# Validate the container names, and prepare all the necessary clients
self.container_name, self.target_container, self.blob_service_client, self.source_container_client, \
self.target_container_client = move_prep(
passphrase=self.passphrase,
account_name=self.account_name,
container_name=self.container_name,
target_container=self.target_container)
# Rename (move) the container
self.move_container(source_container_client=self.source_container_client,
blob_service_client=self.blob_service_client,
container_name=self.container_name,
target_container=self.target_container,
path=self.path,
storage_tier=self.storage_tier)
# Delete the original container once the copy is complete
delete_container(blob_service_client=self.blob_service_client,
container_name=self.container_name,
account_name=self.account_name)
@staticmethod
def move_container(source_container_client, blob_service_client, container_name, target_container, path,
storage_tier):
"""
Rename (move) the specified container in Azure storage
:param source_container_client: type azure.storage.blob.BlobServiceClient.ContainerClient for source container
:param blob_service_client: type: azure.storage.blob.BlobServiceClient
:param container_name: type str: Name of the container in which the folder is located
:param target_container: type str: Name of the container into which the folder is to be moved
:param path: type str: Path of folders in which the files are to be placed
:param storage_tier: type str: Storage tier to use for the container
"""
# Create a generator containing all the blobs in the container
generator = source_container_client.list_blobs()
for blob_file in generator:
# Copy the file to the new container
copy_blob(blob_file=blob_file,
blob_service_client=blob_service_client,
container_name=container_name,
target_container=target_container,
path=path,
storage_tier=storage_tier,
category='container')
def __init__(self, container_name, account_name, passphrase, target_container, path, storage_tier):
# Set the container name variable
self.container_name = container_name
# Initialise necessary class variables
self.passphrase = passphrase
self.account_name = account_name
self.target_container = target_container
self.path = path
self.storage_tier = storage_tier
self.connect_str = str()
self.blob_service_client = None
self.source_container_client = None
self.target_container_client = None
class AzureMove(object):
def main(self):
# Validate the container names, and prepare all the necessary clients
self.container_name, self.target_container, self.blob_service_client, self.source_container_client, \
self.target_container_client = move_prep(
passphrase=self.passphrase,
account_name=self.account_name,
container_name=self.container_name,
target_container=self.target_container)
# Run the proper method depending on whether a file or a folder is requested
if self.category == 'file':
self.move_file(source_container_client=self.source_container_client,
object_name=self.object_name,
blob_service_client=self.blob_service_client,
container_name=self.container_name,
target_container=self.target_container,
path=self.path,
storage_tier=self.storage_tier)
delete_file(container_client=self.source_container_client,
object_name=self.object_name,
blob_service_client=self.blob_service_client,
container_name=self.container_name)
elif self.category == 'folder':
self.move_folder(source_container_client=self.source_container_client,
object_name=self.object_name,
blob_service_client=self.blob_service_client,
container_name=self.container_name,
target_container=self.target_container,
path=self.path,
category=self.category,
storage_tier=self.storage_tier)
delete_folder(container_client=self.source_container_client,
object_name=self.object_name,
blob_service_client=self.blob_service_client,
container_name=self.container_name,
account_name=self.account_name)
else:
logging.error(f'Something is wrong. There is no {self.category} option available')
raise SystemExit
@staticmethod
def move_file(source_container_client, object_name, blob_service_client, container_name, target_container,
path, storage_tier):
"""
Move the specified file to the desired container in Azure storage
:param source_container_client: type azure.storage.blob.BlobServiceClient.ContainerClient for source container
:param object_name: type str: Name and path of folder to move in Azure storage
:param blob_service_client: type: azure.storage.blob.BlobServiceClient
:param container_name: type str: Name of the container in which the folder is located
:param target_container: type str: Name of the container into which the folder is to be moved
:param path: type str: Path of folders in which the files are to be placed
:param storage_tier: type str: Storage tier to use for the file
"""
# Create a generator containing all the blobs in the container
generator = source_container_client.list_blobs()
# Create a boolean to determine if the blob has been located
present = False
for blob_file in generator:
# Filter for the blob name
if blob_file.name == object_name:
# Update the blob presence variable
present = True
# Copy the file to the new container
copy_blob(blob_file=blob_file,
blob_service_client=blob_service_client,
container_name=container_name,
target_container=target_container,
path=path,
storage_tier=storage_tier)
# Send a warning to the user that the blob could not be found
if not present:
logging.error(f'Could not locate the desired file {object_name}')
raise SystemExit
@staticmethod
def move_folder(source_container_client, object_name, blob_service_client, container_name, target_container, path,
storage_tier, category):
"""
Move the specified folder (and its contents) to the desired container in Azure storage
:param source_container_client: type azure.storage.blob.BlobServiceClient.ContainerClient for source container
:param object_name: type str: Name and path of folder to move in Azure storage
:param blob_service_client: type: azure.storage.blob.BlobServiceClient
:param container_name: type str: Name of the container in which the folder is located
:param target_container: type str: Name of the container into which the folder is to be moved
:param path: type str: Path of folders in which the files are to be placed
:param storage_tier: type str: Storage tier to use for the moved folder
:param category: type str: Category of object to be copied. Limited to file or folder
"""
# Create a generator containing all the blobs in the container
generator = source_container_client.list_blobs()
# Create a boolean to determine if the blob has been located
present = False
for blob_file in generator:
# Extract the common path between the current file and the requested folder
common_path = extract_common_path(object_name=object_name,
blob_file=blob_file)
# Only copy the file if there is a common path between the object path and the blob path (they match)
if common_path is not None:
# Update the blob presence variable
present = True
# Copy the file to the new container
copy_blob(blob_file=blob_file,
blob_service_client=blob_service_client,
container_name=container_name,
target_container=target_container,
path=path,
object_name=object_name,
category=category,
common_path=common_path,
storage_tier=storage_tier)
# Send a warning to the user that the blob could not be found
if not present:
logging.error(f'Could not locate the desired folder {object_name}')
raise SystemExit
def __init__(self, object_name, container_name, account_name, passphrase, target_container, path,
storage_tier, category):
self.object_name = object_name
# Set the container name variable
self.container_name = container_name
# Initialise necessary class variables
self.passphrase = passphrase
self.account_name = account_name
self.target_container = target_container
self.path = path
self.storage_tier = storage_tier
self.category = category
self.connect_str = str()
self.blob_service_client = None
self.source_container_client = None
self.target_container_client = None
def container_move(args):
"""
Run the AzureContainerMove method
:param args: type ArgumentParser arguments
"""
logging.info(f'Renaming container {args.container_name} to {args.target_container} in Azure storage '
f'account {args.account_name}')
move_container = AzureContainerMove(container_name=args.container_name,
account_name=args.account_name,
passphrase=args.passphrase,
target_container=args.target_container,
path=args.reset_path,
storage_tier=args.storage_tier)
move_container.main()
def file_move(args):
"""
Run the AzureMove method for a file
:param args: type ArgumentParser arguments
"""
logging.info(f'Moving file {args.file} from {args.container_name} to {args.target_container} in Azure storage '
f'account {args.account_name}')
move_file = AzureMove(object_name=args.file,
container_name=args.container_name,
account_name=args.account_name,
passphrase=args.passphrase,
target_container=args.target_container,
path=args.reset_path,
storage_tier=args.storage_tier,
category='file')
move_file.main()
def folder_move(args):
"""
Run the AzureMove method for a folder
:param args: type ArgumentParser arguments
"""
logging.info(f'Moving folder {args.folder} from {args.container_name} to {args.target_container} in Azure storage '
f'account {args.account_name}')
move_folder = AzureMove(object_name=args.folder,
container_name=args.container_name,
account_name=args.account_name,
passphrase=args.passphrase,
target_container=args.target_container,
path=args.reset_path,
storage_tier=args.storage_tier,
category='folder')
move_folder.main()
def cli():
parser = ArgumentParser(description='Move containers, files, or folders in Azure storage')
# Create the parental parser, and the subparser
subparsers, parent_parser = create_parent_parser(parser=parser)
parent_parser.add_argument('-t', '--target_container',
required=True,
help='The target container to which the container/file/folder is to be moved '
'(this can be the same as the container_name if you want to move a file/folder'
' within a container')
parent_parser.add_argument('-r', '--reset_path',
type=str,
help='Set the path of the container/file/folder within a folder in the target container '
'e.g. sequence_data/220202-m05722. If you want to place it directly in the '
'container without any nesting, use or \'\'')
parent_parser.add_argument('-s', '--storage_tier',
type=str,
default='Hot',
choices=['Hot', 'Cool', 'Archive'],
metavar='STORAGE_TIER',
help='Set the storage tier for the container/file/folder to be moved. '
'Options are "Hot", "Cool", and "Archive". Default is Hot')
# Container move subparser
container_move_subparser = subparsers.add_parser(parents=[parent_parser],
name='container',
description='Move a container in Azure storage',
formatter_class=RawTextHelpFormatter,
help='Move a container in Azure storage')
container_move_subparser.set_defaults(func=container_move)
# File move subparser
file_move_subparser = subparsers.add_parser(parents=[parent_parser],
name='file',
description='Move a file within Azure storage',
formatter_class=RawTextHelpFormatter,
help='Move a file within Azure storage')
file_move_subparser.add_argument('-f', '--file',
type=str,
required=True,
help='Name of blob file to move in Azure storage. '
'e.g. 2022-SEQ-0001_S1_L001_R1_001.fastq.gz')
file_move_subparser.set_defaults(func=file_move)
# Folder move subparser
folder_move_subparser = subparsers.add_parser(parents=[parent_parser],
name='folder',
description='Move a folder within Azure storage',
formatter_class=RawTextHelpFormatter,
help='Move a folder within Azure storage')
folder_move_subparser.add_argument('-f', '--folder',
type=str,
required=True,
help='Name of folder to move in Azure storage. '
'e.g. InterOp')
folder_move_subparser.set_defaults(func=folder_move)
# Set up the arguments, and run the appropriate subparser
arguments = setup_arguments(parser=parser)
# Return to the requested logging level, as it has been increased to WARNING to suppress the log being filled with
# information from azure.core.pipeline.policies.http_logging_policy
coloredlogs.install(level=arguments.verbosity.upper())
logging.info('Move complete')
# Prevent the arguments being printed to the console (they are returned in order for the tests to work)
sys.stderr = open(os.devnull, 'w')
return arguments
if __name__ == '__main__':
cli()
|
print('Valores de b:')
print('-------------')
print('1) b = 7')
print('2) b = 7')
print('3) b = 00000000000000000007')
print('4) b = 7 ')
print('5) b = 7%')
print()
print('Valores de d:')
print('-------------')
print('1) d = 2.208000')
print('2) d = 2')
print('3) d = 2.2')
print('4) d = 2.21')
print('5) d = 2.208')
print('6) d = 2.208')
print('7) d = 0000000000000002.208')
print('8) d = 2.208 ')
print('9) d = 2.21%')
|
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('source_path', help="Path to the video or audio file")
parser.add_argument('-o', '--output',
help="Output path for subtitles (by default, subtitles are saved in \
the same directory and name as the source path)")
def validate(args):
if not os.path.exists(args.source_path):
print(f'source_path Error: File "{args.source_path}" not exists!')
return False
if not os.path.isfile(args.source_path):
print(f'source_path Error: "{args.source_path}" is not a file!')
return False
if not args.titles or len(args.titles) != 2:
print(f'titles Error: you must provide two title!')
return False
if args.output is None:
args.output = '-'.join(args.titles) + '.mp4'
return True
|
from ._registry import DATALOADERS
from torch.utils.data import DataLoader
@DATALOADERS.register
class DataListLoader(DataLoader):
r"""Data loader which merges data objects from a
:class:`torch_geometric.data.dataset` to a python list.
.. note::
This data loader should be used for multi-gpu support via
:class:`torch_geometric.nn.DataParallel`.
Args:
dataset (Dataset): The dataset from which to load the data.
batch_size (int, optional): How many samples per batch to load.
(default: :obj:`1`)
shuffle (bool, optional): If set to :obj:`True`, the data will be
reshuffled at every epoch (default: :obj:`False`)
"""
def __init__(self, dataset, batch_size=1, shuffle=False, **kwargs):
super().__init__(
dataset, batch_size, shuffle,
collate_fn=lambda data_list: data_list, **kwargs)
|
#=======================================================================
# Title: Python script to compare diBELLA and BELLA (alignment) output
# Author: G. Guidi
# Date: 11 Feb 2019
#=======================================================================
# Run: python3 checkOutput.py <file1> <file2>
# file1 is supposed to be the query and file2 the reference
import csv
import sys
import os
import random
cwd = os.getcwd()
pair = {}
list1 = [] # Reference pair names
list2 = [] # Query pair names
with open(sys.argv[1], 'r') as one, open(sys.argv[2], 'r') as two:
# Here, we compare only read names, not alignment scores, etc.
file1 = csv.reader(one, delimiter='\t')
for row in file1:
pair = [(row[0], row[1])]
list1.append(pair)
pair = [(row[1], row[0])] # Check reversed pair
list1.append(pair)
file2 = csv.reader(two, delimiter=' ')
for row in file2:
pair = [(row[2], row[3])] # diBELLA has names in position 2,3
list2.append(pair)
pair = [(row[3], row[2])] # Check reversed pair
list2.append(pair)
m = 0
e = 0
lines = 0
randid = random.randint(1,100000)
with open(cwd + '/missingPairj' + str(randid) + '.out', 'w') as missingPair:
for pair in list1: # BELLA/reference
lines = lines + 1
if pair not in list2: # diBELLA/query
for a, b in pair:
missingPair.write(a + '\t' + b + '\n')
m = m + 1 # number of missing lines
with open(cwd + '/extraPairj' + str(randid) + '.out', 'w') as extraPair:
for pair in list2: # diBELLA/query
if pair not in list1: # BELLA/reference
for a, b in pair:
extraPair.write(a + '\t' + b + '\n')
e = e + 1 # number of missing lines
print(lines, "lines in BELLA (reference)")
print(m, "missing lines in diBELLA (query)")
print(e, "extra lines in diBELLA (query)")
|
# coding: utf-8
# pip3 install spidev
# AI開始ボタン
import os
_FILE_DIR=os.path.abspath(os.path.dirname(__file__))
import spidev
import time
import sys
import subprocess
from subprocess import Popen
from lib.led import LED
from lib.spi import SPI
# 開始ボタンのSPI接続コネクタ番号
A1 = 1
A2 = 2
START_BUTTON_SPI_PIN = A1
TEST_BUTTON_SPI_PIN = A2
spi = SPI()
led = LED()
proc = None
try:
led.start('lightline')
cmd = "python "+_FILE_DIR+"/run_arm_ai.py"
cmd_test = "python "+_FILE_DIR+"/test/run_ai_test.py"
while True:
data = spi.readadc(START_BUTTON_SPI_PIN) # data: 0-1023
if data >= 1000:
led.stop()
led.start('light 7')
print("start ai")
proc = Popen(cmd,shell=True)
proc.wait()
led.stop()
led.start('lightline')
data = spi.readadc(TEST_BUTTON_SPI_PIN) # data: 0-1023
if data >= 1000:
led.stop()
led.start('light 7')
print("start ai")
proc = Popen(cmd_test,shell=True)
proc.wait()
led.stop()
led.start('lightline')
time.sleep(0.1)
except:
import traceback
traceback.print_exc()
finally:
led.stop()
proc.terminate()
sys.exit(0)
|
from .Peer import Peer
from .PeerNetwork import PeerNetwork
from .NetworkHandler import NetworkHandler
|
from django.test import TestCase
from .views import get_dog
class DogTestCase(TestCase):
def test_get_dog(self):
self.assertNotEqual(get_dog(), None)
|
import time
import board
import busio
import adafruit_character_lcd.character_lcd_rgb_i2c as character_lcd
lcd_columns = 16
lcd_rows = 2
i2c = busio.I2C(board.SCL, board.SDA)
lcd = character_lcd.Character_LCD_RGB_I2C(i2c, lcd_columns, lcd_rows)
#lcd.color = [100, 0 , 0]
lcd.message = " Hello!"
time.sleep (1)
lcd.clear()
scroll_msg = " Sunseeker "
lcd.message = scroll_msg
print('Scrolling msg...'+scroll_msg)
for i in range (len(scroll_msg)):
time.sleep(0.5)
lcd.move_left()
print(scroll_msg[i], end ="")
lcd.clear()
print('\nEnd!')
time.sleep(1)
|
import os
import sys
import shutil
from distutils.dir_util import copy_tree
from zipfile import ZipFile
def removeDirectory(filePath):
if os.path.isdir(filePath):
t = shutil.rmtree(filePath)
def copyFile(src, dest):
srcPath, ext = os.path.splitext(src)
# zipが放り込まれたら解凍してからファイル移動する
if ext == '.zip':
# すでに存在してるなら削除
removeDirectory(srcPath)
# フォルダーを作成
os.mkdir(srcPath)
with ZipFile(src, 'r') as z:
z.extractall(srcPath)
else:
srcPath = src
#outputPath = dir_path + "\\..\\application\\app\\src\\extension\\live2d"
outputPath = dest
# コピー
copy_tree(srcPath, outputPath)
|
__all__ = [ 'x', 'y', 'z']
w = 'w'
x = 'x'
y = 'y'
z = 'z'
|
from PIL import Image
import numpy as np
from configuration import *
val_percentage = 0.05
img_list = os.listdir(train_dir)
img_list = np.random.permutation(img_list)
val_data_num = int(len(img_list) * val_percentage)
train_data = img_list[val_data_num:]
val_data = img_list[:val_data_num]
for i, t in enumerate(train_data):
img_rgb = Image.open(os.path.join(train_dir, t)).convert('RGB')
img_rgb.save(os.path.join(split_train_dir, t))
print 'processed %d train images' % i
for i, v in enumerate(val_data):
img_rgb = Image.open(os.path.join(train_dir, v)).convert('RGB')
img_rgb.save(os.path.join(split_val_dir, v))
print 'processed %d val images' % i
|
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# pylint: disable=too-many-statements
# pylint: disable=too-many-locals
from azure.cli.core.commands import CliCommandType
def load_command_table(self, _):
from azext_healthbot.generated._client_factory import cf_bot
healthbot_bot = CliCommandType(
operations_tmpl='azext_healthbot.vendored_sdks.healthbot.operations._bots_operations#BotsOperations.{}',
client_factory=cf_bot)
with self.command_group('healthbot', healthbot_bot, client_factory=cf_bot, is_experimental=True) as g:
g.custom_command('list', 'healthbot_list')
g.custom_show_command('show', 'healthbot_show')
g.custom_command('create', 'healthbot_create', supports_no_wait=True)
g.custom_command('update', 'healthbot_update')
g.custom_command('delete', 'healthbot_delete', supports_no_wait=True, confirmation=True)
g.custom_wait_command('wait', 'healthbot_show')
|
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
import os
from unittest import TestCase
from bigdl.nano.pytorch.vision.models import vision
from test.pytorch.utils._train_torch_lightning import train_with_linear_top_layer
batch_size = 256
num_workers = 0
data_dir = os.path.join(os.path.dirname(__file__), "../../data")
class TestVisionIPEX(TestCase):
def test_resnet18_ipex(self):
resnet18 = vision.resnet18(
pretrained=False, include_top=False, freeze=True)
train_with_linear_top_layer(
resnet18, batch_size, num_workers, data_dir,
use_ipex=True)
def test_resnet34_ipex(self):
resnet34 = vision.resnet34(
pretrained=False, include_top=False, freeze=True)
train_with_linear_top_layer(
resnet34, batch_size, num_workers, data_dir,
use_ipex=True)
def test_resnet50_ipex(self):
resnet50 = vision.resnet50(
pretrained=False, include_top=False, freeze=True)
train_with_linear_top_layer(
resnet50, batch_size, num_workers, data_dir,
use_ipex=True)
def test_mobilenet_v3_large_ipex(self):
mobilenet = vision.mobilenet_v3_large(
pretrained=False, include_top=False, freeze=True)
train_with_linear_top_layer(
mobilenet, batch_size, num_workers, data_dir,
use_ipex=True)
def test_mobilenet_v3_small_ipex(self):
mobilenet = vision.mobilenet_v3_small(
pretrained=False, include_top=False, freeze=True)
train_with_linear_top_layer(
mobilenet, batch_size, num_workers, data_dir,
use_ipex=True)
def test_mobilenet_v2_ipex(self):
mobilenet = vision.mobilenet_v2(
pretrained=False, include_top=False, freeze=True)
train_with_linear_top_layer(
mobilenet, batch_size, num_workers, data_dir,
use_ipex=True)
def test_shufflenet_ipex(self):
shufflenet = vision.shufflenet_v2_x1_0(
pretrained=False, include_top=False, freeze=True)
train_with_linear_top_layer(
shufflenet, batch_size, num_workers, data_dir,
use_ipex=True)
if __name__ == '__main__':
pytest.main([__file__])
|
# app.py
from flask import Flask, jsonify, request, render_template
app = Flask(__name__)
######## Example data, in sets of 3 ############
data = list(range(1,300,3))
print (data)
######## HOME ############
@app.route('/')
def test_page():
example_embed='Sending data... [this is text from python]'
# look inside `templates` and serve `index.html`
return render_template('index.html', embed=example_embed)
######## Example fetch ############
@app.route('/test', methods=['GET', 'POST'])
def testfn():
# POST request
if request.method == 'POST':
print(request.get_json()) # parse as JSON
return 'OK', 200
# GET request
else:
message = {'greeting':'Hello from Flask!'}
return jsonify(message) # serialize and use JSON headers
######## Data fetch ############
@app.route('/getdata/<transaction_id>/<second_arg>', methods=['GET','POST'])
def datafind(transaction_id,second_arg):
# POST request
if request.method == 'POST':
print('Incoming..')
print(request.get_text()) # parse as text
return 'OK', 200
# GET request
else:
message = 't_in = %s ; result: %s ; opt_arg: %s'%(transaction_id, data[int(transaction_id)], second_arg)
return message #jsonify(message) # serialize and use JSON headers
# run app
app.run(debug=True)
|
from flask import Flask, Blueprint
from flask_sockets import Sockets
html = Blueprint(r'html', __name__)
ws = Blueprint(r'ws', __name__)
@html.route('/')
def hello():
return 'Hello World!'
@ws.route('/echo')
def echo_socket(socket):
while not socket.closed:
message = socket.receive()
socket.send(message)
app = Flask(__name__)
sockets = Sockets(app)
app.register_blueprint(html, url_prefix=r'/')
sockets.register_blueprint(ws, url_prefix=r'/')
if __name__ == "__main__":
from gevent import pywsgi
from geventwebsocket.handler import WebSocketHandler
server = pywsgi.WSGIServer(('127.0.0.1', 3000), app, handler_class=WebSocketHandler)
server.serve_forever()
|
# explorer_panels.py
#
# This file is part of scqubits.
#
# Copyright (c) 2019 and later, Jens Koch and Peter Groszkowski
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
############################################################################
import scqubits.core.units as units
import scqubits.utils.sweep_plotting as splot
def display_bare_spectrum(sweep, subsys, param_val, fig_ax):
title = "bare spectrum: subsystem {} ({})".format(
sweep.get_subsys_index(subsys), subsys._sys_type
)
__ = splot.bare_spectrum(sweep, subsys, title=title, fig_ax=fig_ax)
_, axes = fig_ax
axes.axvline(param_val, color="gray", linestyle=":")
def display_bare_wavefunctions(sweep, subsys, param_val, fig_ax):
title = "wavefunctions: subsystem {} ({})".format(
sweep.get_subsys_index(subsys), subsys._sys_type
)
__ = splot.bare_wavefunction(sweep, param_val, subsys, title=title, fig_ax=fig_ax)
def display_dressed_spectrum(
sweep, initial_bare, final_bare, energy_initial, energy_final, param_val, fig_ax
):
energy_difference = energy_final - energy_initial
title = r"{} $\rightarrow$ {}: {:.4f} {}".format(
initial_bare, final_bare, energy_difference, units.get_units()
)
__ = splot.dressed_spectrum(sweep, title=title, fig_ax=fig_ax)
_, axes = fig_ax
axes.axvline(param_val, color="gray", linestyle=":")
axes.scatter([param_val] * 2, [energy_initial, energy_final], s=40, c="gray")
def display_n_photon_qubit_transitions(
sweep, photonnumber, initial_bare, param_val, fig_ax
):
title = r"{}-photon qubit transitions, {} $\rightarrow$".format(
photonnumber, initial_bare
)
__ = splot.n_photon_qubit_spectrum(
sweep,
photonnumber,
initial_state_labels=initial_bare,
title=title,
fig_ax=fig_ax,
)
_, axes = fig_ax
axes.axvline(param_val, color="gray", linestyle=":")
def display_chi_01(data_dict, qbt_index, osc_index, param_index, fig_ax):
datastore = data_dict[(osc_index, qbt_index)]
__ = splot.chi_01(datastore, param_index=param_index, fig_ax=fig_ax)
_, axes = fig_ax
axes.axvline(datastore.param_vals[param_index], color="gray", linestyle=":")
def display_charge_matrixelems(
data_dict, initial_bare, qbt_index_subsys, param_val, fig_ax
):
qbt_index, qbt_subsys = qbt_index_subsys
bare_qbt_initial = initial_bare[qbt_index]
title = r"charge matrix elements for {} [{}]".format(
type(qbt_subsys).__name__, qbt_index
)
__ = splot.charge_matrixelem(
data_dict[qbt_index_subsys],
qbt_index_subsys,
bare_qbt_initial,
title=title,
fig_ax=fig_ax,
)
_, axes = fig_ax
axes.axvline(param_val, color="gray", linestyle=":")
|
import time
from pycompss.api.api import compss_barrier
def measure(name, dataset_name, model, x, y=None):
print("==== STARTING ====", name)
compss_barrier()
s_time = time.time()
model.fit(x, y)
compss_barrier()
print("==== OUTPUT ==== ", dataset_name, time.time() - s_time)
|
import sys
sys.path.insert(0, '/srv/wcdo/src_viz')
from dash_apps import *
#### DATA ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
dash_app1 = Dash(__name__, server = app, url_base_pathname= webtype + '/language_territories_mapping/', external_stylesheets=external_stylesheets, external_scripts=external_scripts)
conn = sqlite3.connect(databases_path+'diversity_categories_production.db'); cursor = conn.cursor();
query = 'SELECT WikimediaLanguagecode, languagenameEnglishethnologue, territoryname, territorynameNative, QitemTerritory, demonym, demonymNative, ISO3166, ISO31662, regional, country, indigenous, languagestatuscountry, officialnationalorregional, region, subregion, intermediateregion FROM wikipedia_languages_territories_mapping;'
df = pd.read_sql_query(query, conn)
#df = df[['territoryname','territorynameNative','QitemTerritory','WikimediaLanguagecode','demonym','demonymNative','ISO3166','ISO31662']]
df.WikimediaLanguagecode = df['WikimediaLanguagecode'].str.replace('-','_')
df.WikimediaLanguagecode = df['WikimediaLanguagecode'].str.replace('be_tarask', 'be_x_old')
df.WikimediaLanguagecode = df['WikimediaLanguagecode'].str.replace('nan', 'zh_min_nan')
df = df.set_index('WikimediaLanguagecode')
df['Language Name'] = pd.Series(languages[['languagename']].to_dict('dict')['languagename'])
df = df.reset_index()
columns_dict = {'Language Name':'Language','WikimediaLanguagecode':'Wiki','QitemTerritory':'WD Qitem','territoryname':'Territory','territorynameNative':'Territory (Local)','demonymNative':'Demonyms (Local)','ISO3166':'ISO 3166', 'ISO31662':'ISO 3166-2','country':'Country','region':'Region','subregion':'Subregion'}
df=df.rename(columns=columns_dict)
### DASH APP ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
title = 'Language Territories Mapping'
dash_app1.title = title+title_addenda
dash_app1.layout = html.Div([
navbar,
html.H3(title, style={'textAlign':'center'}),
dcc.Markdown(
'''This page contains a copy of the latest version of the **Language Territories Mapping database** (see wikipedia_language_territories_mapping.csv in [github project page](https://github.com/marcmiquel/WCDO/tree/master/language_territories_mapping)). The first version of this database has been generated using Ethnologue,
Wikidata and Wikipedia language pages. Wikimedians are invited to suggest changes by e-mailing [tools.wcdo@tools.wmflabs.org](mailto:tools.wcdo@tools.wmflabs.org).
The database contains all the territories (political divisions of first and second level) in which a language
is spoken because it is indigeneous or official, along with some specific metadata used in the generation of
Cultural Context Content (CCC) dataset.
The following table is a reduced version of the database with the Language name, wikicode, Wikidata Qitem for
the territory, territory in native language, demonyms in native language, ISO 3166 and ISO 3166-2, whereas
the full database includes the Qitem for the language, language names in Native languages among other information.
Additionally, the full table is extended with the database country_regions.csv, which presents an equivalence
table between countries, world regions (continents) and subregions (see country_regions.csv in the github).'''.replace(' ', '')),
dash_table.DataTable(
id='datatable-languageterritories',
columns=[
{"name": i, "id": i, "deletable": True, "selectable": True} for i in ['Wiki','Language','WD Qitem','Territory','Territory (Local)','ISO 3166','ISO 3166-2','Region','Subregion']
],
data=df.to_dict('records'),
filter_action="native",
sort_action="native",
sort_mode="multi",
),
footbar,
], className="container")
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
#### CALLBACKS ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
# none by now.
|
print('=-='*20)
print('Analisador de Triângulo')
print('=-='*20)
a = float(input('Informe o primeiro lado :'))
b = float(input('Informe o segundo lado: '))
c = float(input('Informe o terceiro lado: '))
if a < b + c and b < a + c and c < a + b:
print('Pode ser um triângulo')
else:
print('Não pode ser um triângulo')
|
from . import *
from bfg9000.tools.doppel import Doppel
class TestDoppel(ToolTestCase):
tool_type = Doppel
def test_env(self):
with mock.patch('bfg9000.shell.which', return_value=['command']):
self.assertIsInstance(self.env.tool('doppel'), Doppel)
def test_kind_args(self):
self.assertEqual(type(self.tool.kind_args('program')), list)
self.assertEqual(type(self.tool.kind_args('data')), list)
self.assertRaises(ValueError, self.tool.kind_args, 'unknown')
def test_call_onto(self):
self.assertEqual(self.tool('onto', 'src', 'dst'),
[self.tool, '-p', 'src', 'dst'])
def test_call_into(self):
self.assertEqual(self.tool('into', 'src', 'dst'),
[self.tool, '-ipN', 'src', 'dst'])
self.assertEqual(self.tool('into', ['src1', 'src2'], 'dst'),
[self.tool, '-ipN', 'src1', 'src2', 'dst'])
self.assertEqual(self.tool('into', 'src', 'dst', directory='dir'),
[self.tool, '-ipN', '-C', 'dir', 'src', 'dst'])
def test_call_archive(self):
self.assertEqual(self.tool('archive', 'src', 'dst', format='tar'),
[self.tool, '-ipN', '-f', 'tar', 'src', 'dst'])
self.assertEqual(
self.tool('archive', ['src1', 'src2'], 'dst', format='tar'),
[self.tool, '-ipN', '-f', 'tar', 'src1', 'src2', 'dst']
)
self.assertEqual(
self.tool('archive', 'src', 'dst', directory='dir', format='tar'),
[self.tool, '-ipN', '-f', 'tar', '-C', 'dir', 'src', 'dst']
)
self.assertEqual(
self.tool('archive', 'src', 'dst', format='tar',
dest_prefix='pre'),
[self.tool, '-ipN', '-f', 'tar', '-P', 'pre', 'src', 'dst']
)
def test_call_invalid(self):
self.assertRaises(TypeError, self.tool, 'unknown', 'src', 'dst')
|
from autokeras.image.image_supervised import ImageClassifier, ImageRegressor
from autokeras.text.text_supervised import TextClassifier, TextRegressor
from autokeras.net_module import CnnGenerator, MlpModule
|
from dynamic_fixtures.fixtures.basefixture import BaseFixture
class Fixture(BaseFixture):
pass
|
import os
TITLE = "epg-grabber"
EPG_XMLTV_TIMEFORMAT = "%Y%m%d%H%M%S"
CONFIG_REGEX = r"^[-\w\s]+(?:;[-.&\w\s]*)$"
PERIOD = "."
SITES_DIR = "sites"
METADATA_DIR = os.path.join(SITES_DIR, "channels_metadata")
CONFIG_DIR = os.path.join(SITES_DIR, "channels_config")
EMPTY_CONFIG_ERROR_MESSAGE = """
The URL {config_url} does not contain any valid configurations.
The file should contain a list of site configuration as below:
mewatch;Channel5.Sg
mewatch;Channel8.Sg
mewatch;ChannelU.Sg"""
TESTS_FILE = "tests.txt"
DEVELOP_FILE = "local.txt"
|
##########################################################################################
# Wislight lightbulb commands
##########################################################################################
import pexpect, time, argparse
class BulbWislight():
DEVICE = "98:7B:F3:6C:0E:09"
color = [0,0,0]
brightness = 0
def __init__(self):
# self.saveState()
self.brightness=255
def sendCommand(self,cmd):
cmd = list(cmd)
package = ''.join('%02x'%x for x in cmd)
command = "char-write-cmd 0x25 "+package
print(command)
child = pexpect.spawn("gatttool -I")
child.sendline("connect {0}".format(self.DEVICE))
child.expect("Connection successful", timeout=5)
child.sendline(command)
child.expect(pexpect.TIMEOUT, timeout=0.5)
def saveState(self):
command = "char-read-uuid 0xfff1"
child = pexpect.spawn("gatttool -I")
child.sendline("connect {0}".format(self.DEVICE))
child.expect("Connection successful", timeout=5)
child.sendline(command)
child.expect("value:", timeout=5)
child.expect("\r\n", timeout=5)
result = child.before
state = str(result).split(' ')
self.color[0] = int(state[2],16)
self.color[1] = int(state[3],16)
self.color[2] = int(state[4],16)
self.brightness = int(state[5],16)
return child.before
def switchOn(self):
if self.brightness == 0:
self.brightness = 100
self.sendCommand((0xa1, self.color[0], self.color[1], self.color[2], self.brightness))
def switchOff(self):
self.sendCommand((0xa1, self.color[0], self.color[1], self.color[2], 0x00))
def setBrightness(self,value):
self.sendCommand((0xa1, self.color[0], self.color[1], self.color[2], value))
self.brightness = value
def set_color(self,r,g,b,i):
self.sendCommand((0xa1, r, g, b, i))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-r",type=int)
parser.add_argument("-g",type=int)
parser.add_argument("-b",type=int)
parser.add_argument("-i",type=int)
args = parser.parse_args()
bulb = BulbWislight
bulb().set_color(args.r, args.g, args.b, args.i)
|
from unittest import TestCase
from esrally import exceptions
from esrally.utils import versions
class VersionsTests(TestCase):
def test_is_version_identifier(self):
self.assertFalse(versions.is_version_identifier(None))
self.assertFalse(versions.is_version_identifier(""))
self.assertFalse(versions.is_version_identifier(" \t "))
self.assertFalse(versions.is_version_identifier("5-ab-c"))
self.assertFalse(versions.is_version_identifier("5.1"))
self.assertFalse(versions.is_version_identifier("5"))
self.assertTrue(versions.is_version_identifier("5.0.0"))
self.assertTrue(versions.is_version_identifier("1.7.3"))
self.assertTrue(versions.is_version_identifier("20.3.7-SNAPSHOT"))
self.assertFalse(versions.is_version_identifier(None, strict=False))
self.assertFalse(versions.is_version_identifier("", strict=False))
self.assertTrue(versions.is_version_identifier("5.1", strict=False))
self.assertTrue(versions.is_version_identifier("5", strict=False))
self.assertTrue(versions.is_version_identifier("23", strict=False))
self.assertTrue(versions.is_version_identifier("20.3.7-SNAPSHOT", strict=False))
def test_finds_components_for_valid_version(self):
self.assertEqual((5, 0, 3, None), versions.components("5.0.3"))
self.assertEqual((5, 0, 3, "SNAPSHOT"), versions.components("5.0.3-SNAPSHOT"))
self.assertEqual((25, None, None, None), versions.components("25", strict=False))
self.assertEqual((5, 1, None, None), versions.components("5.1", strict=False))
def test_major_version(self):
self.assertEqual(5, versions.major_version("5.0.3"))
self.assertEqual(5, versions.major_version("5.0.3-SNAPSHOT"))
self.assertEqual(25, versions.major_version("25.0.3"))
def test_components_ignores_invalid_versions(self):
with self.assertRaises(exceptions.InvalidSyntax) as ctx:
versions.components("5.0.0a")
self.assertEqual(r"version string '5.0.0a' does not conform to pattern '^(\d+)\.(\d+)\.(\d+)(?:-(.+))?$'", ctx.exception.args[0])
def test_versions_parses_correct_version_string(self):
self.assertEqual(["5.0.3", "5.0", "5"], versions.versions("5.0.3"))
self.assertEqual(["5.0.0-SNAPSHOT", "5.0.0", "5.0", "5"], versions.versions("5.0.0-SNAPSHOT"))
self.assertEqual(["10.3.63", "10.3", "10"], versions.versions("10.3.63"))
def test_versions_rejects_invalid_version_strings(self):
with self.assertRaises(exceptions.InvalidSyntax) as ctx:
versions.versions("5.0.0a-SNAPSHOT")
self.assertEqual(r"version string '5.0.0a-SNAPSHOT' does not conform to pattern '^(\d+)\.(\d+)\.(\d+)(?:-(.+))?$'"
, ctx.exception.args[0])
def test_find_best_match(self):
self.assertEqual("master", versions.best_match(["1.7", "2", "5.0.0-alpha1", "5", "master"], "6.0.0-alpha1"),
"Assume master for versions newer than latest alternative available")
self.assertEqual("5", versions.best_match(["1.7", "2", "5.0.0-alpha1", "5", "master"], "5.1.0-SNAPSHOT"),
"Best match for specific version")
self.assertEqual("master", versions.best_match(["1.7", "2", "5.0.0-alpha1", "5", "master"], None),
"Assume master on unknown version")
self.assertIsNone(versions.best_match(["1.7", "2", "5.0.0-alpha1", "5", "master"], "0.4"), "Reject versions that are too old")
|
#coding: utf-8
'''
Machinery needed to register 'UCS' encoding as a valid python encoding.
To use UCS encoding, `ucs_codec.register_UCS()`
Created on Feb 4, 2016
@author: mike
'''
from __future__ import print_function, unicode_literals
import codecs
from cslavonic.ucs_decode import ucs_encode, ucs_decode
### Codec APIs
class Codec(codecs.Codec):
def encode(self, input_, errors='strict'):
return ucs_encode(input_, errors)
def decode(self, input_, errors='strict'):
return ucs_decode(input_, errors)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input_, final=False):
return ucs_encode(input_, self.errors)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input_, final=False):
return ucs_decode(input_, self.errors)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
_ucs_codec_info = codecs.CodecInfo(
name='UCS',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
def register_UCS():
def search(encoding):
if encoding in ('UCS', 'ucs'):
return _ucs_codec_info
codecs.register(search)
|
import pytest
from tests.mocks import MockOktaClient
from okta.models import DomainResponse
class TestDomainResource:
"""
Integration Tests for the Domain Resource
"""
@pytest.mark.vcr()
@pytest.mark.asyncio
async def test_list_domains(self, fs):
client = MockOktaClient(fs)
domains_list_resp, _, err = await client.list_domains()
for domain in domains_list_resp.domains:
assert isinstance(domain, DomainResponse)
assert domain.domain
@pytest.mark.vcr()
@pytest.mark.asyncio
async def test_create_and_delete_domain(self, fs):
client = MockOktaClient(fs)
domain_config = {
"domain": "login.example.com",
"certificateSourceType": "MANUAL"
}
domain, _, err = await client.create_domain(domain_config)
assert err is None
try:
assert isinstance(domain, DomainResponse)
assert domain.id
finally:
_, err = await client.delete_domain(domain.id)
@pytest.mark.vcr()
@pytest.mark.asyncio
async def test_get_domain(self, fs):
client = MockOktaClient(fs)
domain_config = {
"domain": "login.example.com",
"certificateSourceType": "MANUAL"
}
domain, _, err = await client.create_domain(domain_config)
assert err is None
try:
get_domain, _, err = await client.get_domain(domain.id)
assert isinstance(domain, DomainResponse)
assert domain.id == get_domain.id
assert domain.domain == get_domain.domain
finally:
_, err = await client.delete_domain(domain.id)
assert err is None
|
"""
Database Input / Output functions
"""
import pandas as pd
import pymysql
import IEDC_pass
def db_conn(fn):
"""
Decorator function to provide a connection to a function. This was originally inspired by
http://initd.org/psycopg/articles/2010/10/22/passing-connections-functions-using-decorator/
This ensures that the connection is closed after the connection is closed after the function completes gracefully or
non-gracefully, i.e. when some kind of exception occurs. This is good practice to ensure there are not too many open
connections on the server. It also does a rollback() in case of an exception.
Possibly this could also be solved more simply using `with pymysql.connect()`.
"""
def db_conn_(*args, **kwargs):
conn = pymysql.connect(host=IEDC_pass.IEDC_server,
port=int(IEDC_pass.IEDC_port),
user=IEDC_pass.IEDC_user,
passwd=IEDC_pass.IEDC_pass,
db=IEDC_pass.IEDC_database,
charset='utf8')
try:
rv = fn(conn, *args, **kwargs)
except (KeyboardInterrupt, SystemExit):
conn.rollback()
# conn.close()
print("Keyboard interupt - don't worry connection was closed")
raise
except BaseException as e:
conn.rollback()
# conn.close()
print("Exception: %s" % e)
print("Something went wrong! But I was smart and closed the connection!")
raise
finally:
pass
conn.close()
return rv
return db_conn_
def db_cursor_write(fn):
"""
Decorator function for the database cursor (writing)
http://initd.org/psycopg/articles/2010/10/22/passing-connections-functions-using-decorator/
"""
def db_cursor_write_(*args, **kwargs):
conn = pymysql.connect(host=IEDC_pass.IEDC_server,
port=int(IEDC_pass.IEDC_port),
user=IEDC_pass.IEDC_user,
passwd=IEDC_pass.IEDC_pass,
db=IEDC_pass.IEDC_database,
charset='utf8')
curs = conn.cursor()
try:
#print curs, args, kwargs
rv = fn(curs, *args, **kwargs)
except (KeyboardInterrupt, SystemExit):
#print args, kwargs
conn.rollback()
conn.close()
print("Keyboard interupt - don't worry connection was closed")
raise
except BaseException as error:
#print args, kwargs
conn.rollback()
conn.close()
print("Exception: %s" % error)
print ("But I was smart and closed the connection!")
raise
else:
conn.commit()
curs.close()
return rv
return db_cursor_write_
@db_conn
def get_sql_table_as_df(conn, table, columns=['*'], db=IEDC_pass.IEDC_database,
index='id', addSQL=''):
"""
Download a table from the SQL database and return it as a nice dataframe.
:param conn: Database connection. No need to worry. The decorator takes care of this.
:param table: table name
:param columns: List of columns to get from the SQL table
:param db: database name
:param index: Column name to be used as dataframe index. String.
:param addSQL: Add more arguments to the SQL query, e.g. "WHERE classification_id = 1"
:return: Dataframe of SQL table
"""
# Don't show this to anybody, please. SQL injections are a big nono...
# https://www.w3schools.com/sql/sql_injection.asp
columns = ', '.join(c for c in columns if c not in "'[]")
df = pd.read_sql("SELECT %s FROM %s.%s %s;" % (columns, db, table, addSQL),
conn, index_col=index)
return df
@db_cursor_write
def run_this_command(curs, sql_cmd):
curs.execute(sql_cmd)
@db_cursor_write
def dict_sql_insert(curs, table, d):
# https://stackoverflow.com/a/14834646/2075003
placeholder = ", ".join(["%s"] * len(d))
sql = "INSERT INTO `{table}` ({columns}) VALUES ({values});".format(table=table, columns=",".join(d.keys()),
values=placeholder)
curs.execute(sql, list(d.values()))
@db_cursor_write
def bulk_sql_insert(curs, table, cols, data):
"""
:param curs:
:param table:
:param cols:
:param data: data as list
:return:
"""
sql = """
INSERT INTO %s
(%s)
VALUES (%s);
""" % (table, ', '.join(cols), ','.join([' %s' for _ in cols]))
curs.executemany(sql, data)
|
"""Pytest conftest fixtures."""
import os
import sys
import pytest
from prometheuspvesd.utils import Singleton
@pytest.fixture(autouse=True)
def reset_singletons():
Singleton._instances = {}
@pytest.fixture(autouse=True)
def reset_os_environment():
os.environ = {}
@pytest.fixture(autouse=True)
def reset_sys_argv():
sys.argv = ["prometheus-pve-sd"]
|
import numpy as np
import pandas as pd
from scipy.spatial.transform import Rotation
g = 9.80665 # m per sec^2
### Utilities functions ###
# check for axis of specified length and return its position
def matchAxis(shape,l=3):
if l not in shape:
print("No axis of length ",l)
return None
# find matching axis
ax = np.where(np.array(shape)==l)[0][0]
return ax
# magnitude calculations for triaxial signals
def getMagnitude(signal):
ax = matchAxis(signal.shape)
return signal.apply(lambda x: (x.iloc[0]**2+x.iloc[1]**2+x.iloc[2]**2)**.5,axis=ax)
### User-facing processing functions for use with data classes ###
def vectorsToRotation(vec1, vec2):
''' Find the rotation that aligns vec1 to vec2
:param vec1: A 3d "source" vector
:param vec2: A 3d "destination" vector
:return Rotation: scipy Rotation object from rotation matrix
https://www.theochem.ru.nl/~pwormer/Knowino/knowino.org/wiki/Rotation_matrix.html#Vector_rotation
'''
a, b = (vec1 / np.linalg.norm(vec1)).reshape(3), (vec2 / np.linalg.norm(vec2)).reshape(3)
v = np.cross(a, b)
c = np.dot(a, b)
s = np.linalg.norm(v)
kmat = np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]])
rotation_matrix = np.eye(3) + kmat + kmat.dot(kmat) * ((1 - c) / (s ** 2))
return Rotation.from_matrix(rotation_matrix)
# calculate euler angles from accel data, when magnitude close to 1g (within tolerance bounds)
def getInclinations(accelTri, accelMag = None, tol=None, gAxis = 0, eAxis = 'XYZ'):
'''
https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.transform.Rotation.as_euler.html
'''
ax = matchAxis(accelTri.shape)
refVector = np.zeros((3,1))
refVector[gAxis] = 1
# function for apply method
f = lambda x: pd.Series(vectorsToRotation(refVector,x.values).as_euler(eAxis,True),
index=[c for c in eAxis])
I = accelTri.apply(f,axis=ax)
if tol is not None:
if accelMag is None:
accelMag = getMagnitude(accelTri)
dropInds = (accelMag>(1+tol)*g) | (accelMag<(1-tol)*g)
I.loc[dropInds,:] = np.nan
return I
def getSpectrogram(signal):
pass
|
from django.urls import path
# Create your views here.
from . import views
urlpatterns = [
path("signup/", views.SignUpView.as_view(), name="signup"),
]
|
# coding:utf-8
import json
import pandas as pd
import time
def read_acc_value(f) :
data = open(f).read()
accValues = json.loads(data)['accValues']
X = []
Y = []
for x, y in accValues:
t = time.localtime(x / 1000)
x = time.strftime("%Y-%m-%d", t)
X.append(x)
Y.append(y)
df = pd.DataFrame({
'date': X,
'value': Y
})
return df
|
# https://leetcode.com/problems/sort-colors/
class Solution(object):
def sortColors(self, nums):
"""
:type nums: List[int]
:rtype: None Do not return anything, modify nums in-place instead.
"""
# for i in range(0,len(nums)):
i = 0
iterator = 0
while iterator < len(nums):
if nums[i] == 0:
element = nums[i]
nums.pop(i)
nums.insert(0, element)
# i += 1
elif nums[i] == 2:
element = nums[i]
nums.pop(i)
nums.append(element)
i -= 1
i += 1
iterator += 1
# numList = [2, 2, 1, 0, 2, 1, 0]
# numList = [2,0,2,1,1,0]
numList = []
Solution().sortColors(numList)
print('**********')
print(numList)
# print (Solution().sortColors([3, 3, 2, 1, 3, 2, 1]))
|
''' orchestration module '''
''' imports '''
# functions
#from .funcs import func
import numpy as np
'''
'''
''' orchestration functions '''
def func(var):
''' var
Args:
var (np.array): track buffer
Returns:
(np.array): var
'''
# return
return var
|
import numpy as np
import awkward
from awkward import JaggedArray
#for later
#func = numbaize(formula,['p%i'%i for i in range(nParms)]+[varnames[i] for i in range(nEvalVars)])
def convert_jec_txt_file(jecFilePath):
jec_f = open(jecFilePath,'r')
layoutstr = jec_f.readline().strip().strip('{}')
jec_f.close()
name = jecFilePath.split('/')[-1].split('.')[0]
layout = layoutstr.split()
if not layout[0].isdigit():
raise Exception('First column of JEC descriptor must be a digit!')
#setup the file format
nBinnedVars = int(layout[0])
nBinColumns = 2*nBinnedVars
nEvalVars = int(layout[nBinnedVars+1])
formula = layout[nBinnedVars+nEvalVars+2]
nParms = 0
while( formula.count('[%i]'%nParms) ):
formula = formula.replace('[%i]'%nParms,'p%i'%nParms)
nParms += 1
#protect function names with vars in them
funcs_to_cap = ['max','exp']
for f in funcs_to_cap:
formula = formula.replace(f,f.upper())
templatevars = ['x','y','z','w','t','s']
varnames = [layout[i+nBinnedVars+2] for i in range(nEvalVars)]
for find,replace in zip(templatevars,varnames):
formula = formula.replace(find,replace)
#restore max
for f in funcs_to_cap:
formula = formula.replace(f.upper(),f)
nFuncColumns = 2*nEvalVars + nParms
nTotColumns = nFuncColumns + 1
#parse the columns
minMax = ['Min','Max']
columns = []
dtypes = []
offset = 1
for i in range(nBinnedVars):
columns.extend(['%s%s'%(layout[i+offset],mm) for mm in minMax])
dtypes.extend(['<f8','<f8'])
columns.append('NVars')
dtypes.append('<i8')
offset += nBinnedVars + 1
for i in range(nEvalVars):
columns.extend(['%s%s'%(layout[i+offset],mm) for mm in minMax])
dtypes.extend(['<f8','<f8'])
for i in range(nParms):
columns.append('p%i'%i)
dtypes.append('<f8')
pars = np.genfromtxt(jecFilePath,
dtype=tuple(dtypes),
names=tuple(columns),
skip_header=1,
unpack=True,
encoding='ascii'
)
#the first bin is always usual for JECs
#the next bins may vary in number, so they're jagged arrays... yay
bins = {}
offset_col = 0
offset_name = 1
bin_order = []
for i in range(nBinnedVars):
binMins = None
binMaxs = None
if i == 0:
binMins = np.unique(pars[columns[0]])
binMaxs = np.unique(pars[columns[1]])
bins[layout[i+offset_name]] = np.union1d(binMins,binMaxs)
else:
counts = np.zeros(0,dtype=np.int)
allBins = np.zeros(0,dtype=np.double)
for binMin in bins[bin_order[0]][:-1]:
binMins = np.unique(pars[np.where(pars[columns[0]] == binMin)][columns[i+offset_col]])
binMaxs = np.unique(pars[np.where(pars[columns[0]] == binMin)][columns[i+offset_col+1]])
theBins = np.union1d(binMins,binMaxs)
allBins = np.append(allBins,theBins)
counts = np.append(counts,theBins.size)
bins[layout[i+offset_name]] = JaggedArray.fromcounts(counts,allBins)
bin_order.append(layout[i+offset_name])
offset_col += 1
#skip nvars to the variable columns
#the columns here define clamps for the variables defined in columns[]
# ----> clamps can be different from bins
# ----> if there is more than one binning variable this array is jagged
# ----> just make it jagged all the time
binshapes = tuple([bins[thebin].size-1 for thebin in bin_order])
clamp_mins = {}
clamp_maxs = {}
var_order = []
offset_col = 2*nBinnedVars+1
offset_name = nBinnedVars + 2
jagged_counts = np.ones(bins[bin_order[0]].size-1,dtype=np.int)
if len(bin_order) > 1:
jagged_counts = np.maximum(bins[bin_order[1]].counts - 1,0) #need counts-1 since we only care about Nbins
for i in range(nEvalVars):
clamp_mins[layout[i+offset_name]] = JaggedArray.fromcounts(jagged_counts,np.atleast_1d(pars[columns[i+offset_col]]))
clamp_maxs[layout[i+offset_name]] = JaggedArray.fromcounts(jagged_counts,np.atleast_1d(pars[columns[i+offset_col+1]]))
var_order.append(layout[i+offset_name])
offset_col += 1
#now get the parameters, which we will look up with the clamps
parms = []
parm_order = []
offset_col = 2*nBinnedVars+1 + 2*nEvalVars
for i in range(nParms):
parms.append(JaggedArray.fromcounts(jagged_counts,pars[columns[i+offset_col]]))
parm_order.append('p%i'%(i))
wrapped_up = {}
wrapped_up[(name,'jet_energy_corrector')] = (formula,
(bins,bin_order),
(clamp_mins,clamp_maxs,var_order),
(parms,parm_order))
return wrapped_up
|
import types
__docformat__ = "restructuredtext"
def walk_recursive_generators(generator):
"""Walk a tree of generators without yielding things recursively.
Let's suppose you have this:
>>> def generator0():
... yield 3
... yield 4
...
>>> def generator1():
... yield 2
... for i in generator0():
... yield i
... yield 5
...
>>> def generator2():
... yield 1
... for i in generator1():
... yield i
... yield 6
...
>>> for i in generator2():
... print i
...
1
2
3
4
5
6
Notice the way the generators are recursively yielding values. This
library uses a technique called "bounce" that is usually used to
implement stackless interpreters. It lets you write:
>>> def generator0():
... yield 3
... yield 4
...
>>> def generator1():
... yield 2
... yield generator0()
... yield 5
...
>>> def generator2():
... yield 1
... yield generator1()
... yield 6
...
>>> for i in walk_recursive_generators(generator2()):
... print i
...
1
2
3
4
5
6
Look Ma! No recursive yields!
"""
stack = [generator]
while stack:
for x in stack[-1]:
if isinstance(x, types.GeneratorType):
stack.append(x) # Recurse.
break
else:
yield x
else:
stack.pop()
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
|
import os
import random
import itertools
import numpy as np
import torch
import torch.nn as nn
import torch.utils.data
import torchvision.transforms as transforms
from torchvision.utils import make_grid
from torch.autograd import Variable
from PIL import Image
import matplotlib.pyplot as plt
from tensorboardX import SummaryWriter
import time
import cv2
##################################################################
from dataset import UnalignedDataset
from model_base import ResNetBlock, Generator, Discriminator
from model_cyclegan import CycleGAN
##################################################################
def train(log_dir, device, lr, beta1, lambda_idt, lambda_A, lambda_B, lambda_mask,
num_epoch, num_epoch_resume, save_epoch_freq):
model = CycleGAN(log_dir=log_dir, device=device, lr=lr, beta1=beta1,
lambda_idt=lambda_idt, lambda_A=lambda_A, lambda_B=lambda_B, lambda_mask=lambda_mask)
if num_epoch_resume != 0:
model.log_dir = 'logs'
print('load model {}'.format(num_epoch_resume))
model.load('epoch' + str(num_epoch_resume))
writer = SummaryWriter(log_dir)
for epoch in range(num_epoch):
print('epoch {} started'.format(epoch + 1 + num_epoch_resume))
t1 = time.perf_counter()
losses = model.train(train_loader)
t2 = time.perf_counter()
get_processing_time = t2 - t1
print('epoch: {}, elapsed_time: {} sec losses: {}'
.format(epoch + 1 + num_epoch_resume, get_processing_time, losses))
writer.add_scalar('loss_G_A', losses[0], epoch + 1 + num_epoch_resume)
writer.add_scalar('loss_D_A', losses[1], epoch + 1 + num_epoch_resume)
writer.add_scalar('loss_G_B', losses[2], epoch + 1 + num_epoch_resume)
writer.add_scalar('loss_D_B', losses[3], epoch + 1 + num_epoch_resume)
writer.add_scalar('loss_cycle_A', losses[4], epoch + 1 + num_epoch_resume)
writer.add_scalar('loss_cycle_B', losses[5], epoch + 1 + num_epoch_resume)
writer.add_scalar('loss_idt_A', losses[6], epoch + 1 + num_epoch_resume)
writer.add_scalar('loss_idt_B', losses[7], epoch + 1 + num_epoch_resume)
writer.add_scalar('loss_mask', losses[8], epoch + 1 + num_epoch_resume)
if (epoch + 1 + num_epoch_resume) % save_epoch_freq == 0:
model.save('epoch%d' % (epoch + 1 + num_epoch_resume))
if __name__ == '__main__':
# random seeds
torch.manual_seed(1234)
np.random.seed(1234)
random.seed(1234)
# image
height = 128
width = 256
# training details
batch_size = 1
lr = 0.0002 # initial learning rate for adam
beta1 = 0.5 # momentum term of adam
num_epoch = 100
num_epoch_resume = 0
save_epoch_freq = 1
# weights of loss function
# lambda_idt = 5
# lambda_A = 10.0
# lambda_B = 10.0
# lambda_mask = 10.0
lambda_idt = 5.0
lambda_A = 10.0
lambda_B = 10.0
lambda_mask = 0
# files, dirs
log_dir = 'logs'
# gpu
device = torch.device("cuda:0" if torch.cuda.is_available else "cpu")
print('device {}'.format(device))
# dataset
train_dataset = UnalignedDataset(is_train=True)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
# train
train(log_dir, device, lr, beta1, lambda_idt, lambda_A, lambda_B, lambda_mask,
num_epoch, num_epoch_resume, save_epoch_freq)
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
#
# michael a.g. aïvázis
# orthologue
# (c) 1998-2018 all rights reserved
#
"""
Exercise building subvectors out of existing vectors
"""
def test():
# package access
import gsl
# pick a size
n = 20
# make one
v = gsl.vector(shape=n)
# fill it
for i in range(n): v[i] = i
# show me
# print('v:')
# v.print(format='{:6.2f}', indent=' '*4)
# pick some parameters
start = int(n/4)
shape = n - start
# make a subvector
s = v.view(start=start, shape=shape)
# show me
# print('s:')
# s.print(format='{:6.2f}', indent=' '*4)
# check the length
assert len(s) == shape
# check the contents
for i in range(shape):
assert s[i] == start + i
# now modify
s.fill(0)
# and check
for i in range(shape):
assert v[start+i] == 0
# all done
return
# main
if __name__ == '__main__':
test()
# end of file
|
import argparse
import json
from pathlib import Path
from PIL import Image
INFO = {
"contributor": "",
"date_created": "",
"description": "",
"url": "",
"version": "",
"year": ""
}
LICENSES = [
{
"name": "",
"id": 0,
"url": ""
}
]
CATEGORIES = [
{
'id': 1,
'name': 'person',
'supercategory': 'person',
},
{
'id': 2,
'name': 'ignore',
'supercategory': 'ignore',
},
]
def extract_txt_info(annots):
for annot in annots:
name_ann = annot.split(' ')
img_name = name_ann[0]
img_annots = [int(i) for i in name_ann[1:]]
yield img_name, img_annots
ap = argparse.ArgumentParser()
ap.add_argument('--bbox_txt', help='bbox annotations path', required=True)
ap.add_argument('--ignore_txt', help='ignore annotations path', required=True)
ap.add_argument('--output_json', help='path of output annotation file in coco format', required=True)
args = ap.parse_args()
input_txt = Path(args.bbox_txt)
assert input_txt.is_file(), 'input txt does not exist.'
with open(input_txt, 'r') as file:
lines = file.readlines()
input_stripped = [line.strip('\n') for line in lines]
ignore_txt = Path(args.ignore_txt)
assert ignore_txt.is_file(), 'input txt does not exist.'
with open(ignore_txt, 'r') as file:
lines = file.readlines()
ignore_stripped = [line.strip('\n') for line in lines]
ignore_dict = dict(extract_txt_info(ignore_stripped))
json_dict = {"info": INFO, "licenses": LICENSES, "categories": CATEGORIES, "images": [], "annotations": []}
output_images = []
output_annotations = []
for img_name, img_annots in extract_txt_info(input_stripped):
ignore_annots = ignore_dict.get(img_name, [])
if len(ignore_annots):
continue
# images info
if img_name.startswith('ad'):
img = Image.open("/media/data/datasets/PersDet/WiderPedestrian_street/images/" + img_name)
else:
img = Image.open("/media/data/datasets/PersDet/WiderPedestrian_cctv/images/" + img_name)
image_id = len(output_images)+1
image_info = {'id': image_id,
'file_name': img_name,
'height': img.size[1],
'width': img.size[0]
}
output_images.append(image_info)
# bbox annotations info
for i in range(0, len(img_annots), 4):
bbox_annots = img_annots[i:i+4]
area = bbox_annots[2] * bbox_annots[3]
annotation_info = {"id": len(output_annotations)+1,
"image_id": image_id,
"category_id": 1,
"segmentation": [],
"area": area,
"bbox": bbox_annots,
"iscrowd": 0,
"attributes": {"occluded": False}}
output_annotations.append(annotation_info)
# print(output_images)
# print(output_annotations)
json_dict["images"] = output_images
json_dict["annotations"] = output_annotations
with open(args.output_json, 'w') as outfile:
json.dump(json_dict, outfile)
|
# SPDX-License-Identifier: Apache-2.0
from io import BytesIO
import numpy as np
import onnx # noqa
from onnx import shape_inference, TensorProto
from onnx.numpy_helper import from_array, to_array
from onnx.helper import make_tensor
from ..proto.onnx_helper_modified import (
make_node, make_tensor_value_info, make_graph,
make_model, ValueInfoProto
)
from ..proto import get_latest_tested_opset_version
from onnx import onnx_pb as onnx_proto
from ..common._topology import Variable
def load_onnx_model(onnx_file_or_bytes):
"""
Loads an *ONNX* file.
:param onnx_file_or_bytes: *ONNX* file or bytes
:return: *ONNX* model
"""
if isinstance(onnx_file_or_bytes, str):
with open(onnx_file_or_bytes, "rb") as f:
return onnx.load(f)
elif hasattr(onnx_file_or_bytes, 'read'):
return onnx.load(onnx_file_or_bytes)
else:
b = BytesIO(onnx_file_or_bytes)
return onnx.load(b)
def save_onnx_model(model, filename=None):
"""
Saves a model as a file or bytes.
:param model: *ONNX* model
:param filename: filename or None to return bytes
:return: bytes
"""
content = model.SerializeToString()
if filename is not None:
if hasattr(filename, 'write'):
filename.write(content)
else:
with open(filename, "wb") as f:
f.write(content)
return content
def enumerate_model_node_outputs(model, add_node=False):
"""
Enumerates all the nodes of a model.
:param model: ONNX graph
:param add_node: if False, the function enumerates
all output names from every node, otherwise, it
enumerates tuple (output name, node)
:return: enumerator
"""
if not hasattr(model, "graph"):
raise TypeError("Parameter model is not an ONNX model but "
"{}".format(type(model)))
for node in model.graph.node:
for out in node.output:
yield (out, node) if add_node else out
def enumerate_model_initializers(model, add_node=False):
"""
Enumerates all the initializers of a model.
:param model: ONNX graph
:param add_node: if False, the function enumerates
all output names from every node, otherwise, it
enumerates tuple (output name, node)
:return: enumerator
"""
for node in model.graph.initializer:
yield (node.name, node) if add_node else node.name
def select_model_inputs_outputs(model, outputs=None, inputs=None):
"""
Takes a model and changes its outputs.
:param model: *ONNX* model
:param inputs: new inputs
:param outputs: new outputs
:return: modified model
The function removes unneeded files.
"""
if inputs is not None:
raise NotImplementedError("Parameter inputs cannot be empty.")
if outputs is None:
raise RuntimeError("Parameter outputs cannot be None.")
if not isinstance(outputs, list):
outputs = [outputs]
mark_var = {}
for out in enumerate_model_node_outputs(model):
mark_var[out] = 0
for inp in model.graph.input:
mark_var[inp.name] = 0
for out in outputs:
if out not in mark_var:
raise ValueError("Output '{}' not found in model.".format(out))
mark_var[out] = 1
nodes = model.graph.node[::-1]
mark_op = {}
for node in nodes:
mark_op[node.name] = 0
# We mark all the nodes we need to keep.
nb = 1
while nb > 0:
nb = 0
for node in nodes:
if mark_op[node.name] == 1:
continue
mod = False
for out in node.output:
if mark_var[out] == 1:
mark_op[node.name] = 1
mod = True
break
if not mod:
continue
nb += 1
for inp in node.input:
if mark_var.get(inp, 0) == 1:
continue
mark_var[inp] = 1
nb += 1
# All nodes verifies mark_op[node.name] == 1
keep_nodes = [node for node in nodes if mark_op[node.name] == 1]
var_out = []
for out in outputs:
value_info = ValueInfoProto()
value_info.name = out
var_out.append(value_info)
graph = make_graph(keep_nodes, model.graph.name, model.graph.input,
var_out, model.graph.initializer)
onnx_model = make_model(graph)
onnx_model.ir_version = model.ir_version
onnx_model.producer_name = model.producer_name
onnx_model.producer_version = model.producer_version
onnx_model.domain = model.domain
onnx_model.model_version = model.model_version
onnx_model.doc_string = model.doc_string
if len(model.metadata_props) > 0:
values = {p.key: p.value for p in model.metadata_props}
onnx.helper.set_model_props(onnx_model, values)
if len(onnx_model.graph.input) != len(model.graph.input):
raise RuntimeError("Input mismatch {} != {}".format(
len(onnx_model.input), len(model.input)))
# fix opset import
del onnx_model.opset_import[:]
for oimp in model.opset_import:
op_set = onnx_model.opset_import.add()
op_set.domain = oimp.domain
op_set.version = oimp.version
return onnx_model
def infer_outputs(op_type, inputs, outputs=None, initializer=None,
target_opset=None, **atts):
"""
Infers outputs type and shapes given an ONNX operator.
"""
if isinstance(op_type, str):
required_outputs = []
if outputs:
for o in outputs:
if hasattr(o, 'onnx_name'):
required_outputs.append(o.onnx_name)
elif isinstance(o, str):
required_outputs.append(o)
else:
raise TypeError("Unable to require output {}.".format(o))
node = make_node(op_type, [i.onnx_name for i in inputs],
required_outputs, **atts)
node = [node]
elif hasattr(op_type, 'nodes'):
node = op_type.nodes
else:
raise RuntimeError("Unable to build ONNX nodes from type {}.".format(
type(op_type)))
input_init = inputs.copy()
if initializer:
input_init.extend(initializer)
onnx_inputs = []
for input in input_init:
if isinstance(input, Variable):
onnx_type = input.type.to_onnx_type()
tensor_type = onnx_type.tensor_type
shape = [tensor_type.shape.dim[i].dim_value
for i in range(len(tensor_type.shape.dim))]
inp = make_tensor_value_info(input.onnx_name,
tensor_type.elem_type,
tuple(shape))
onnx_inputs.append(inp)
elif isinstance(input, onnx.TensorProto):
v = make_tensor_value_info(
input.name, input.data_type.real,
list(d for d in input.dims))
onnx_inputs.append(v)
elif isinstance(input, onnx.AttributeProto):
value_info = ValueInfoProto()
value_info.name = input.name
onnx_type = onnx_proto.TypeProto()
onnx_type.tensor_type.elem_type = input.type
value_info.type.CopyFrom(onnx_type)
onnx_inputs.append(value_info)
else:
onnx_inputs.append(input)
graph = make_graph(node, 'infer_shapes',
onnx_inputs, [])
original_model = make_model(graph, producer_name='skl2onnx')
domains = {}
for n in node:
domains[n.domain] = max(domains.get(n.domain, 1),
getattr(n, 'op_version', 1))
for i, (k, v) in enumerate(domains.items()):
if i == 0 and len(original_model.opset_import) == 1:
op_set = original_model.opset_import[0]
else:
op_set = original_model.opset_import.add()
op_set.domain = k
if target_opset:
if isinstance(target_opset, dict):
op_set.version = target_opset.get(
k, get_latest_tested_opset_version())
else:
op_set.version = target_opset
else:
op_set.version = get_latest_tested_opset_version()
try:
inferred_model = shape_inference.infer_shapes(original_model)
except RuntimeError as e:
raise RuntimeError(
"Unable to infer shape of node '{}'\n{}".format(
op_type, original_model)) from e
shapes = Variable.from_pb(inferred_model.graph.value_info)
if len(shapes) == 0:
raise RuntimeError("Shape inference fails.\n"
"*Inputs*\n{}\n*Model*\n{}'".format(
onnx_inputs, original_model))
return shapes
def change_onnx_domain(model, ops):
"""
Takes a model and changes its outputs.
:param model: *ONNX* model
:param ops: dictionary { optype: ('optype', 'new domain') }
:return: modified model
The function removes unneeded files.
"""
nodes = model.graph.node
for node in nodes:
rep = ops.get(node.op_type, None)
if rep is None:
continue
node.op_type = rep[0]
node.domain = rep[1]
graph = make_graph(nodes, model.graph.name, model.graph.input,
model.graph.output, model.graph.initializer)
onnx_model = make_model(graph)
onnx_model.ir_version = model.ir_version
onnx_model.producer_name = model.producer_name
onnx_model.producer_version = model.producer_version
onnx_model.domain = model.domain
onnx_model.model_version = model.model_version
onnx_model.doc_string = model.doc_string
if len(model.metadata_props) > 0:
values = {p.key: p.value for p in model.metadata_props}
onnx.helper.set_model_props(onnx_model, values)
if len(onnx_model.graph.input) != len(model.graph.input):
raise RuntimeError("Input mismatch {} != {}".format(
len(onnx_model.input), len(model.input)))
# fix opset import
domain_set = set()
has_domain = False
del onnx_model.opset_import[:]
for oimp in model.opset_import:
op_set = onnx_model.opset_import.add()
op_set.domain = oimp.domain
op_set.version = oimp.version
domain_set.add(oimp.domain)
if not has_domain:
has_domain = oimp.domain in domain_set
for v in ops.values():
if v[1] not in domain_set:
op_set = onnx_model.opset_import.add()
op_set.domain = v[1]
op_set.version = 1
return onnx_model
def add_output_initializer(model_onnx, name, value, suffix='_init'):
"""
Add a constant and link it to one output.
It allows the user to store arrays into the graph
and retrieve them when using it.
The initializer is named `name + suffix`, the output
is named `name`.
:param model_onnx: ONNX graph
:param name: initializer name (initializer name, output name)
:param value: array to store
:param suffix: name of the initializer
:return: new model
It is possible to add multiple constant by using list:
``add_output_initializer(model_onnx, ['name1', 'name2'], [v1, v2])``.
"""
if isinstance(name, str):
name_list = [name]
value_list = [value]
else:
name_list = name
value_list = value
if len(name_list) != len(value_list):
raise ValueError(
"Mismatched names and values. There are %d names and %d values."
"" % (len(name_list), len(value_list)))
nodes = list(model_onnx.graph.node)
inits = list(model_onnx.graph.initializer)
outputs = list(model_onnx.graph.output)
for name, value in zip(name_list, value_list):
name_output = name
name_init = name + suffix
names = set(i.name for i in model_onnx.graph.initializer)
if name_output in names or name_init in names:
raise ValueError(
"Names %r or %r is already taken by an initializer: %r." % (
name_output, name_init, ", ".join(sorted(names))))
names = set(i.name for i in model_onnx.graph.output)
if name_output in names or name_init in names:
raise ValueError(
"Names %r or %r is already taken by an output: %r." % (
name_output, name_init, ", ".join(sorted(names))))
names = set(i.name for i in model_onnx.graph.input)
if name_output in names or name_init in names:
raise ValueError(
"Names %r or %r is already taken by an output: %r." % (
name_output, name_init, ", ".join(sorted(names))))
try:
cst = from_array(value, name=name_init)
except RuntimeError as e:
st = str(value.dtype).lower()
if st.startswith('u') or st.startswith("<u"):
cst_value = np.array([s.encode('utf-8') for s in value])
cst = make_tensor(
name_init, data_type=TensorProto.STRING,
dims=value.shape, vals=list(cst_value))
else:
raise e
inits.append(cst)
outputs.append(make_tensor_value_info(
name_output, cst.data_type, cst.dims))
nodes.append(make_node('Identity', [name_init], [name_output]))
graph = make_graph(
nodes, model_onnx.graph.name, model_onnx.graph.input,
outputs, inits)
onnx_model = make_model(graph)
onnx_model.ir_version = model_onnx.ir_version
onnx_model.producer_name = model_onnx.producer_name
onnx_model.producer_version = model_onnx.producer_version
onnx_model.domain = model_onnx.domain
onnx_model.model_version = model_onnx.model_version
onnx_model.doc_string = model_onnx.doc_string
if len(model_onnx.metadata_props) > 0:
values = {p.key: p.value for p in model_onnx.metadata_props}
onnx.helper.set_model_props(onnx_model, values)
if len(onnx_model.graph.input) != len(model_onnx.graph.input):
raise RuntimeError("Input mismatch {} != {}".format(
len(onnx_model.input), len(model_onnx.input)))
# fix opset import
del onnx_model.opset_import[:]
for oimp in model_onnx.opset_import:
op_set = onnx_model.opset_import.add()
op_set.domain = oimp.domain
op_set.version = oimp.version
return onnx_model
def get_initializers(model_onnx):
"""
Retrieves the list of initializers in a model in a
dictionary `{ name: value }`.
"""
res = {}
for init in model_onnx.graph.initializer:
res[init.name] = to_array(init)
return res
def update_onnx_initializers(model_onnx, new_inits):
"""
Updates initializer in a ONNX model.
:param model_onnx: ONNX model
:param new_inits: new initializers
:return: list of updated initializers
"""
updated = []
replace_weights = []
replace_indices = []
for i, w in enumerate(model_onnx.graph.initializer):
if w.name in new_inits:
replace_weights.append(from_array(new_inits[w.name], w.name))
replace_indices.append(i)
updated.append(w.name)
replace_indices.sort(reverse=True)
for w_i in replace_indices:
del model_onnx.graph.initializer[w_i]
model_onnx.graph.initializer.extend(replace_weights)
return updated
|
import glob
import os
import shutil
target_path = "/observations/solarnet-campaign/homogenization/catania"
os.makedirs(target_path, exist_ok=True)
for f in glob.glob("/observations/solarnet-campaign/ftp.oact.inaf.it/Romano/SOLARNET_SPRING/**/*.fts", recursive=True):
shutil.move(f, os.path.join(target_path, os.path.basename(f)))
|
# 也是二分法
class Solution:
def mySqrt(self, x: int) -> int:
if x < 2:
return x
left, right = 2, x // 2 + 1
while left <= right:
mid = left + (right - left) // 2
if mid * mid > x:
right = mid - 1
elif mid * mid < x:
left = mid + 1
else:
return mid
return right
if __name__ == '__main__':
s = Solution()
res = s.mySqrt(0)
print(res)
|
```You are given a data structure of employee information, which includes the employee's unique id, their importance value and their direct subordinates' id.
For example, employee 1 is the leader of employee 2, and employee 2 is the leader of employee 3. They have importance value 15, 10 and 5, respectively. Then employee 1 has a data structure like [1, 15, [2]], and employee 2 has [2, 10, [3]], and employee 3 has [3, 5, []]. Note that although employee 3 is also a subordinate of employee 1, the relationship is not direct.
Now given the employee information of a company, and an employee id, you need to return the total importance value of this employee and all their subordinates.
Example 1:
Input: [[1, 5, [2, 3]], [2, 3, []], [3, 3, []]], 1
Output: 11
Explanation:
Employee 1 has importance value 5, and he has two direct subordinates: employee 2 and employee 3. They both have importance value 3. So the total importance value of employee 1 is 5 + 3 + 3 = 11 ```
#BFS
"""
# Definition for Employee.
class Employee(object):
def __init__(self, id, importance, subordinates):
#################
:type id: int
:type importance: int
:type subordinates: List[int]
#################
self.id = id
self.importance = importance
self.subordinates = subordinates
"""
class Solution(object):
def getImportance(self, employees, id):
emps = {employee.id: employee for employee in employees}
queue = [(emps[id])]
res = 0
while queue:
e = queue.pop(0)
res += e.importance
if e.subordinates:
for i in e.subordinates:
queue.append((emps[i]))
return res
#DFS
"""
# Definition for Employee.
class Employee(object):
def __init__(self, id, importance, subordinates):
#################
:type id: int
:type importance: int
:type subordinates: List[int]
#################
self.id = id
self.importance = importance
self.subordinates = subordinates
"""
class Solution(object):
def getImportance(self, employees, id):
emps = {employee.id: employee for employee in employees}
def dfs(id):
sum_Imp = emps[id].importance
for sub_id in emps[id].subordinates:
sum_Imp += dfs(sub_id)
return sum_Imp
return dfs(id)
|
"""
Move data from ingestion to production
"""
import requests
from b2stage.endpoints.commons.b2handle import B2HandleEndpoint
from restapi import decorators
from restapi.connectors import celery
from restapi.utilities.logs import log
from seadata.endpoints.commons.cluster import ClusterContainerEndpoint
from seadata.endpoints.commons.seadatacloud import EndpointsInputSchema
from seadata.endpoints.commons.seadatacloud import Metadata as md
#################
# REST CLASS
class MoveToProductionEndpoint(B2HandleEndpoint, ClusterContainerEndpoint):
labels = ["ingestion"]
@decorators.auth.require()
@decorators.use_kwargs(EndpointsInputSchema)
@decorators.endpoint(
path="/ingestion/<batch_id>/approve",
summary="Approve files in a batch that are passing all qcs",
responses={200: "Registration executed"},
)
def post(self, batch_id, **json_input):
params = json_input.get("parameters", {})
if len(params) < 1:
return self.send_errors("parameters is empty", code=400)
files = params.get("pids", {})
if len(files) < 1:
return self.send_errors("pids' parameter is empty list", code=400)
filenames = []
for data in files:
if not isinstance(data, dict):
return self.send_errors(
"File list contains at least one wrong entry", code=400,
)
# print("TEST", data)
for key in md.keys: # + [md.tid]:
value = data.get(key)
if value is None:
error = f"Missing parameter: {key}"
return self.send_errors(error, code=400)
error = None
value_len = len(value)
if value_len > md.max_size:
error = f"Param '{key}': exceeds size {md.max_size}"
elif value_len < 1:
error = f"Param '{key}': empty"
if error is not None:
return self.send_errors(error, code=400)
filenames.append(data.get(md.tid))
################
# 1. check if irods path exists
try:
imain = self.get_main_irods_connection()
batch_path = self.get_irods_batch_path(imain, batch_id)
log.debug("Batch path: {}", batch_path)
if not imain.is_collection(batch_path):
return self.send_errors(
f"Batch '{batch_id}' not enabled (or no permissions)", code=404,
)
################
# 2. make batch_id directory in production if not existing
prod_path = self.get_irods_production_path(imain, batch_id)
log.debug("Production path: {}", prod_path)
obj = self.init_endpoint()
imain.create_collection_inheritable(prod_path, obj.username)
################
# ASYNC
log.info("Submit async celery task")
c = celery.get_instance()
task = c.celery_app.send_task(
"move_to_production_task",
args=[batch_id, batch_path, prod_path, json_input],
queue="ingestion",
routing_key="ingestion",
)
log.info("Async job: {}", task.id)
return self.return_async_id(task.id)
except requests.exceptions.ReadTimeout:
return self.send_errors("B2SAFE is temporarily unavailable", code=503)
|
#!/usr/bin/env python
"""
Copyright (c) 2006-2021 sqlmap developers (http://sqlmap.org/)
See the file 'LICENSE' for copying permission
"""
try:
import mimerpy
except:
pass
import logging
from lib.core.common import getSafeExString
from lib.core.data import conf
from lib.core.data import logger
from lib.core.exception import SqlmapConnectionException
from plugins.generic.connector import Connector as GenericConnector
class Connector(GenericConnector):
"""
Homepage: https://github.com/mimersql/MimerPy
User guide: https://github.com/mimersql/MimerPy/blob/master/README.rst
API: https://www.python.org/dev/peps/pep-0249/
License: MIT
"""
def connect(self):
self.initConnection()
try:
self.connector = mimerpy.connect(hostname=self.hostname, username=self.user, password=self.password, database=self.db, port=self.port, connect_timeout=conf.timeout)
except mimerpy.OperationalError as ex:
raise SqlmapConnectionException(getSafeExString(ex))
self.initCursor()
self.printConnected()
def fetchall(self):
try:
return self.cursor.fetchall()
except mimerpy.ProgrammingError as ex:
logger.log(logging.WARN if conf.dbmsHandler else logging.DEBUG, "(remote) %s" % getSafeExString(ex))
return None
def execute(self, query):
try:
self.cursor.execute(query)
except (mimerpy.OperationalError, mimerpy.ProgrammingError) as ex:
logger.log(logging.WARN if conf.dbmsHandler else logging.DEBUG, "(remote) %s" % getSafeExString(ex))
except mimerpy.InternalError as ex:
raise SqlmapConnectionException(getSafeExString(ex))
self.connector.commit()
def select(self, query):
self.execute(query)
return self.fetchall()
|
# -*- coding: utf-8 -*-
from __future__ import print_function, division
import argparse
from datetime import datetime
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.autograd import Variable
from torchvision import datasets, transforms
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
#from PIL import Image
import time
import os
# from model import ft_net, ft_net_dense, ft_net_NAS, PCB
from model import ft_net, ft_net_dense, ft_net_NAS
from model import PCB_dense as PCB
from random_erasing import RandomErasing
import yaml
import math
import random
import numpy as np
from shutil import copyfile
from log import *
from util import *
from reid_metric import compute_mAP
def train_model(model, criterion, optimizer, scheduler, log_file, stage, num_epochs=25):
since = time.time()
best_model_wts = model.state_dict()
best_acc = 0.0
best_epoch = 0
last_model_wts = model.state_dict()
warm_up = 0.1 # We start from the 0.1*lrRate
warm_iteration = round(dataset_sizes['train']/opt.batchsize)*opt.warm_epoch # first 5 epoch
for epoch in range(num_epochs):
logger.info('Epoch {}/{}'.format(epoch, num_epochs - 1))
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
running_loss = 0.0
running_corrects = 0.0
scheduler.step()
model.train(True) # Set model to training mode
for data in dataloaders[phase]:
# get the inputs
inputs, labels = data
if use_gpu:
inputs = Variable(inputs.cuda())
labels = Variable(labels.cuda())
else:
inputs, labels = Variable(inputs), Variable(labels)
# zero the parameter gradients
optimizer.zero_grad()
outputs, _, _ = model(inputs)
if opt.PCB == 'none':
_, preds = torch.max(outputs.data, 1)
loss = criterion(outputs, labels)
else:
part = {}
sm = nn.Softmax(dim=1)
num_part = 6
for i in range(num_part):
part[i] = outputs[i]
score = sm(part[0]) + sm(part[1]) + sm(part[2]) + sm(part[3]) + sm(part[4]) + sm(part[5])
_, preds = torch.max(score.data, 1)
loss = criterion(part[0], labels)
for i in range(num_part-1):
loss += criterion(part[i+1], labels)
# backward + optimize only if in training phase
if epoch < opt.warm_epoch:
warm_up = min(1.0, warm_up + 0.9 / warm_iteration)
loss *= warm_up
if opt.fp16: # we use optimier to backward loss
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
optimizer.step()
# statistics
version = torch.__version__
if int(version[0])>0 or int(version[2]) > 3: # for the new version like 0.4.0, 0.5.0 and 1.0.0
running_loss += loss.item() * inputs.size(0)
else : # for the old version like 0.3.0 and 0.3.1
running_loss += loss.data[0] * inputs.size(0)
running_corrects += float(torch.sum(preds == labels.data))
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects / dataset_sizes[phase]
if epoch_acc > best_acc:
best_acc = epoch_acc
best_epoch = epoch
best_model_wts = model.state_dict()
logger.info('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc))
log_file.write('{} epoch : {} Loss: {:.4f} Acc: {:.4f}'.format(epoch, phase, epoch_loss, epoch_acc) + '\n')
# deep copy the model
else: # phase = 'val'
model.train(False) # Set model to evaluate mode
with torch.no_grad():
query_feature, query_label = extract_feature_and_label(opt, model, dataloaders['valid_query'])
gallery_feature, gallery_label = extract_feature_and_label(opt, model, dataloaders['valid_gallery'])
cmc, mAP = compute_mAP(query_feature, query_label, gallery_feature, gallery_label)
logger.info('Validation Results')
logger.info("mAP: {:.1%}".format(mAP))
for r in [1, 5, 10]:
logger.info("CMC curve, Rank-{:<3}:{:.1%}".format(r, cmc[r - 1]))
last_model_wts = model.state_dict()
if epoch%10 == 9:
save_network(opt, model, epoch, stage)
time_elapsed = time.time() - since
logger.info('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
time_elapsed = time.time() - since
logger.info('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
logger.info('Best val acc: {:4f}'.format(best_acc))
model.load_state_dict(best_model_wts)
save_network(opt, model, best_epoch, stage)
save_network(opt, model, 'best', stage)
model.load_state_dict(last_model_wts)
save_network(opt, model, 'last', stage)
return model
######################################################################
# PCB train
# ------------------
# Step1 : train the PCB model
# According to original paper, we set the difference learning rate for difference layers.
def pcb_train(model, criterion, log_file, stage, num_epoch):
ignored_params = list(map(id, model.classifiers.parameters()))
base_params = filter(lambda p: id(p) not in ignored_params, model.parameters())
optimizer_ft = optim.SGD([
# {'params': base_params, 'lr': 0.01},
# {'params': model.classifiers.parameters(), 'lr': 0.1},
{'params': base_params, 'lr': 0.1 * opt.lr},
{'params': model.classifiers.parameters(), 'lr': opt.lr}
], weight_decay=5e-4, momentum=0.9, nesterov=True)
# Decay LR by a factor of 0.1 every 40 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=40, gamma=0.1)
model = train_model(model, criterion, optimizer_ft, exp_lr_scheduler,
log_file, stage, num_epochs=num_epoch)
return model
######################################################################
# RPP train
# ------------------
# Setp 2&3: train the rpp layers
# According to original paper, we set the learning rate at 0.01 for rpp layers.
def rpp_train(model, criterion, log_file, stage, num_epoch):
# ignored_params = list(map(id, get_net(opt, model).avgpool.parameters()))
# base_params = filter(lambda p: id(p) not in ignored_params, get_net(opt, model).parameters())
# optimizer_ft = optim.SGD([
# {'params': base_params, 'lr': 0.00},
# {'params': get_net(opt, model).avgpool.parameters(), 'lr': 0.01},
# ], weight_decay=5e-4, momentum=0.9, nesterov=True)
# optimizer_ft = optim.SGD(model.avgpool.parameters(), lr=0.01,
# weight_decay=5e-4, momentum=0.9, nesterov=True)
optimizer_ft = optim.SGD(model.avgpool.parameters(), lr=0.1 * opt.lr,
weight_decay=5e-4, momentum=0.9, nesterov=True)
# Decay LR by a factor of 0.1 every 100 epochs (never use)
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=100, gamma=0.1)
model = train_model(model, criterion, optimizer_ft, exp_lr_scheduler,
log_file, stage, num_epochs=num_epoch)
return model
######################################################################
# full train
# ------------------
# Step 4: train the whole net
# According to original paper, we set the difference learning rate for the whole net
def full_train(model, criterion, log_file, stage, num_epoch):
ignored_params = list(map(id, model.classifiers.parameters()))
ignored_params += list(map(id, model.avgpool.parameters()))
base_params = filter(lambda p: id(p) not in ignored_params, model.parameters())
optimizer_ft = optim.SGD([
# {'params': base_params, 'lr': 0.001},
# {'params': model.classifiers.parameters(), 'lr': 0.01},
# {'params': model.avgpool.parameters(), 'lr': 0.01},
{'params': base_params, 'lr': 0.01 * opt.lr},
{'params': model.classifiers.parameters(), 'lr': 0.1 * opt.lr},
{'params': model.avgpool.parameters(), 'lr': 0.1 * opt.lr},
], weight_decay=5e-4, momentum=0.9, nesterov=True)
# Decay LR by a factor of 0.1 every 100 epochs (never use)
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=100, gamma=0.1)
model = train_model(model, criterion, optimizer_ft, exp_lr_scheduler,
log_file, stage, num_epochs=num_epoch)
return model
if __name__ == '__main__':
try:
from apex.fp16_utils import *
from apex import amp, optimizers
except ImportError: # will be 3.x series
logger.warning('This is not an error. If you want to use low precision, i.e., fp16, please install the apex with cuda support (https://github.com/NVIDIA/apex) and update pytorch to 1.0')
#### Options ####
parser = argparse.ArgumentParser(description='Training')
parser.add_argument('--gpu_ids',default='0', type=str, help='gpu_ids: e.g. 0 0,1,2 0,2')
parser.add_argument('--data_dir',default='../Market/pytorch',type=str, help='training dir path')
parser.add_argument('--model_dir',default='./model', type=str, help='output model dir')
parser.add_argument('--log_dir',default='./logs/train', type=str, help='log dir')
parser.add_argument('--train_all', action='store_true', help='use all training data' )
parser.add_argument('--color_jitter', action='store_true', help='use color jitter in training' )
parser.add_argument('--batchsize', default=32, type=int, help='batchsize')
parser.add_argument('--stride', default=2, type=int, help='stride')
parser.add_argument('--erasing_p', default=0, type=float, help='Random Erasing probability, in [0,1]')
parser.add_argument('--use_dense', action='store_true', help='use densenet121' )
parser.add_argument('--use_NAS', action='store_true', help='use NAS' )
parser.add_argument('--warm_epoch', default=0, type=int, help='the first K epoch that needs warm up')
parser.add_argument('--lr', default=0.05, type=float, help='learning rate')
parser.add_argument('--droprate', default=0.5, type=float, help='drop rate')
parser.add_argument('--PCB', default='none', choices=['none', 'resnet', 'densenet'], help='use PCB')
parser.add_argument('--RPP', action='store_true', help='use RPP')
parser.add_argument('--fp16', action='store_true', help='use float16 instead of float32, which will save about 50% memory' )
parser.add_argument('--scales', default='1', type=str, help='multiple_scale: e.g. 1 1,1.1 1,1.1,1.2')
opt = parser.parse_args()
#### log ####
subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
log_dir = os.path.join(os.path.expanduser(opt.log_dir), subdir)
if not os.path.isdir(log_dir): # Create the log directory if it doesn't exist
os.makedirs(log_dir)
set_logger(logger, log_dir)
#### seed ####
seed = 0
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
#### gpu ####
str_ids = opt.gpu_ids.split(',')
opt.gids = []
for str_id in str_ids:
gid = int(str_id)
if gid >=0:
opt.gids.append(gid)
if len(opt.gids)>0:
torch.cuda.set_device(opt.gids[0])
use_gpu = torch.cuda.is_available()
#### multi scale ####
logger.info('We use the scale: %s' % opt.scales)
str_ms = opt.scales.split(',')
opt.ms = []
for s in str_ms:
s_f = float(s)
opt.ms.append(math.sqrt(s_f))
#### Load Data ####
transform_train_list = [
#transforms.RandomResizedCrop(size=128, scale=(0.75,1.0), ratio=(0.75,1.3333), interpolation=3), #Image.BICUBIC)
transforms.Resize((256,128), interpolation=3),
transforms.Pad(10),
transforms.RandomCrop((256,128)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]
transform_val_list = [
transforms.Resize(size=(256,128),interpolation=3), #Image.BICUBIC
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]
if opt.PCB != 'none':
transform_train_list = [
transforms.Resize((384,192), interpolation=3),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]
transform_val_list = [
transforms.Resize(size=(384,192),interpolation=3), #Image.BICUBIC
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]
if opt.erasing_p>0:
transform_train_list = transform_train_list + [RandomErasing(probability = opt.erasing_p, mean=[0.0, 0.0, 0.0])]
if opt.color_jitter:
transform_train_list = [transforms.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0)] + transform_train_list
logger.info('transform {0}'.format(transform_train_list))
data_transforms = {
'train': transforms.Compose(transform_train_list),
'val': transforms.Compose(transform_val_list),
}
train_all = ''
if opt.train_all:
train_all = '_all'
image_datasets = {}
image_datasets['train'] = datasets.ImageFolder(os.path.join(opt.data_dir, 'train' + train_all), data_transforms['train'])
image_datasets['valid_query'] = datasets.ImageFolder(os.path.join(opt.data_dir, 'valid_query'), data_transforms['val'])
image_datasets['valid_gallery'] = datasets.ImageFolder(os.path.join(opt.data_dir, 'valid_gallery'), data_transforms['val'])
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=opt.batchsize,
shuffle=True, num_workers=8, pin_memory=True) for x in ['train', 'valid_query', 'valid_gallery']} # 8 workers may work faster
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'valid_query', 'valid_gallery']}
class_names = image_datasets['train'].classes
since = time.time()
inputs, classes = next(iter(dataloaders['train']))
logger.debug('dataloaders cost time {0} s'.format(time.time()-since))
#### model ####
if opt.use_dense:
model = ft_net_dense(len(class_names), opt.droprate)
elif opt.use_NAS:
model = ft_net_NAS(len(class_names), opt.droprate)
else:
model = ft_net(len(class_names), opt.droprate, opt.stride)
if opt.PCB != 'none':
model = PCB(len(class_names))
opt.nclasses = len(class_names)
# logger.debug(str(model))
#### save args and model ####
if not os.path.isdir(opt.model_dir):
os.mkdir(opt.model_dir)
with open(os.path.join(opt.model_dir, 'opts.yaml'),'w') as fp:
yaml.dump(vars(opt), fp, default_flow_style=False)
log_file = open(os.path.join(opt.model_dir, 'train.log'), 'w')
copyfile('./main_train.py', os.path.join(opt.model_dir, 'main_train.py'))
copyfile('./model.py', os.path.join(opt.model_dir, 'model.py'))
#### start train ####
# step1: PCB training #
stage = 'pcb'
model = PCB(len(class_names))
if use_gpu:
model = model.cuda()
opt.warm_epoch = 5
criterion = nn.CrossEntropyLoss()
model = pcb_train(model, criterion, log_file, stage, 120)
# step2&3: RPP training #
if opt.RPP:
stage = 'rpp'
model = model.convert_to_rpp()
if use_gpu:
model = model.cuda()
opt.warm_epoch = 0
model = rpp_train(model, criterion, log_file, stage, 10)
# step4: whole net training #
stage = 'full'
opt.warm_epoch = 0
full_train(model, criterion, log_file, stage, 20)
log_file.close()
|
'''
Rolien e Naej são os desenvolvedores de um grande portal de programação. Para ajudar no novo sistema de cadastro do site, eles requisitaram a sua ajuda. Seu trabalho é fazer um código que valide as senhas que são cadastradas no portal, para isso você deve atentar aos requisitos a seguir:
A senha deve conter, no mínimo, uma letra maiúscula, uma letra minúscula e um número;
A mesma não pode ter nenhum caractere de pontuação, acentuação ou espaço;
Além disso, a senha pode ter de 6 a 32 caracteres.
Entrada
A entrada contém vários casos de teste e termina com final de arquivo. Cada linha tem uma string S, correspondente a senha que é inserida pelo usuário no momento do cadastro.
Saída
A saída contém uma linha, que pode ser “Senha valida.”, caso a senha tenha cada item dos requisitos solicitados anteriormente, ou “Senha invalida.”, se um ou mais requisitos não forem atendidos.
'''
while True:
try:
maiuscula = minuscula = numero = valida = espaco = 0
senha = str(input())
if senha.isalnum():
senha = list(senha)
for s in senha:
if s.isupper():
maiuscula += 1
if s.lower():
minuscula += 1
if s.isdigit():
numero += 1
if s == ' ':
espaco += 1
if maiuscula > 0 and minuscula > 0 and numero > 0 and 6 <= len(senha) <= 32 and espaco == 0:
valida += 1
if valida > 0:
print('Senha valida.')
else:
print('Senha invalida.')
except EOFError:
break
|
"""Db tools."""
import csv
import json
import os
from datetime import datetime
from typing import Dict, List
import boto3
from botocore import config
from covid_api.core.config import DT_FORMAT, INDICATOR_BUCKET
from covid_api.models.static import IndicatorObservation
s3_params = dict(service_name="s3")
lambda_params = dict(
service_name="lambda",
region_name="us-east-1",
config=config.Config(
read_timeout=900, connect_timeout=900, retries={"max_attempts": 0}
),
)
if os.environ.get("AWS_ENDPOINT_URL"):
print("Loading from local")
s3_params["endpoint_url"] = os.environ["AWS_ENDPOINT_URL"]
lambda_params["endpoint_url"] = os.environ["AWS_ENDPOINT_URL"]
s3 = boto3.client(**s3_params)
_lambda = boto3.client(**lambda_params)
def invoke_lambda(
lambda_function_name: str, payload: dict = None, invocation_type="RequestResponse"
):
"""Invokes a lambda function using the boto3 lambda client.
Params:
-------
lambda_function_name (str): name of the lambda to invoke
payload (Optional[dict]): data into invoke the lambda function with (will be accessible
in the lambda handler function under the `event` param)
invocation_type (Optional[str] = ["RequestResponse", "Event", "DryRun"]):
RequestReponse will run the lambda synchronously (holding up the thread
until the lambda responds
Event will run asynchronously
DryRun will only verify that the user/role has the correct permissions to invoke
the lambda function
Returns:
--------
(dict) Lambda invocation response, see:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/lambda.html#Lambda.Client.invoke
- NOTE:
The current configuration specifies a RequestResponse invocation, which does
indeed run synchronously, but returns a status succeeded of 202 (Accepted) when
it should return a 200 status. 202 status is expected from the `Event` invocation
type (indicated lamdba was initiated but we don't know it's status)
- NOTE:
The current configuration should directly return the lambda output under
response["Payload"]: StreamingBody, however the byte string currently being returned
contains lambda invocation/runtime details from the logs. (eg:
```
START RequestId: 7c61eb52-735d-1ce4-0df2-a975197924eb Version: 1
END RequestId: 7c61eb52-735d-1ce4-0df2-a975197924eb
REPORT RequestId: 7c61eb52-735d-1ce4-0df2-a975197924eb Init Duration: 232.54 ms Duration: 3.02 ms Billed Duration: 100 ms Memory Size: 128 MB Max Memory Used: 33 MB
{"result":"success","input":"test"}
```
when we only expect the JSON object: {"result":"success", "input":"test"} to be returned
)
To load just the lambda output use:
```
response = r["Payload"].read().decode("utf-8")
lambda_output = json.loads(
response[response.index("{") : (response.index("}") + 1)]
)
```
where r is the output of this function.
"""
lambda_invoke_params = dict(
FunctionName=lambda_function_name, InvocationType=invocation_type
)
if payload:
lambda_invoke_params.update(dict(Payload=json.dumps(payload)))
return _lambda.invoke(**lambda_invoke_params)
def s3_get(bucket: str, key: str):
"""Get AWS S3 Object."""
response = s3.get_object(Bucket=bucket, Key=key)
return response["Body"].read()
def get_indicator_site_metadata(identifier: str, folder: str) -> Dict:
"""Get Indicator metadata for a specific site."""
try:
key = f"indicators/{folder}/{identifier}.json"
return json.loads(s3_get(INDICATOR_BUCKET, key))
except Exception:
return {}
def indicator_folders() -> List:
"""Get Indicator folders."""
response = s3.list_objects_v2(
Bucket=INDICATOR_BUCKET, Prefix="indicators/", Delimiter="/",
)
return [obj["Prefix"].split("/")[1] for obj in response.get("CommonPrefixes", [])]
def indicator_exists(identifier: str, indicator: str):
"""Check if an indicator exists for a site"""
try:
s3.head_object(
Bucket=INDICATOR_BUCKET, Key=f"indicators/{indicator}/{identifier}.csv",
)
return True
except Exception:
try:
s3.head_object(
Bucket=INDICATOR_BUCKET,
Key=f"indicators/{indicator}/{identifier}.json",
)
return True
except Exception:
return False
def get_indicators(identifier) -> List:
"""Return indicators info."""
indicators = []
for folder in indicator_folders():
if indicator_exists(identifier, folder):
indicator = dict(id=folder)
try:
data = []
# metadata for reading the data and converting to a consistent format
metadata_json = s3_get(
INDICATOR_BUCKET, f"indicators/{folder}/metadata.json"
)
metadata_dict = json.loads(metadata_json.decode("utf-8"))
# read the actual indicator data
indicator_csv = s3_get(
INDICATOR_BUCKET, f"indicators/{folder}/{identifier}.csv"
)
indicator_lines = indicator_csv.decode("utf-8").split("\n")
reader = csv.DictReader(indicator_lines,)
# top level metadata is added directly to the response
top_level_fields = {
k: v for k, v in metadata_dict.items() if isinstance(v, str)
}
# for each row (observation), format the data correctly
for row in reader:
date = datetime.strptime(
row[metadata_dict["date"]["column"]],
metadata_dict["date"]["format"],
).strftime(DT_FORMAT)
other_fields = {
k: row.get(v["column"], None)
for k, v in metadata_dict.items()
if isinstance(v, dict) and v.get("column") and k != "date"
}
# validate and parse the row
i = IndicatorObservation(**other_fields)
data.append(dict(date=date, **i.dict(exclude_none=True)))
# add to the indicator dictionary
indicator["domain"] = dict(
date=[
min(
data, key=lambda x: datetime.strptime(x["date"], DT_FORMAT),
)["date"],
max(
data, key=lambda x: datetime.strptime(x["date"], DT_FORMAT),
)["date"],
],
indicator=[
min(data, key=lambda x: x["indicator"])["indicator"],
max(data, key=lambda x: x["indicator"])["indicator"],
],
)
indicator["data"] = data
indicator.update(top_level_fields)
except Exception as e:
print(e)
pass
try:
site_metadata = get_indicator_site_metadata(identifier, folder)
# this will, intentionally, overwrite the name from the data if present
if "name" in site_metadata:
indicator["name"] = site_metadata.get("name")
indicator["notes"] = site_metadata.get("notes", None)
indicator["highlight_bands"] = site_metadata.get(
"highlight_bands", None
)
except Exception as e:
print(e)
pass
indicators.append(indicator)
return indicators
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.