content stringlengths 5 1.05M |
|---|
# -*- coding: utf-8 -*-
"""
Fine-grained Classification based on textual cues
"""
# Python modules
import torch
import torch.nn as nn
import time
import torch
import numpy as np
import glob
import os
import json
from PIL import Image, ImageDraw, ImageFile
import torchvision
from torch.autograd import Variable
from torchvision import transforms
from sklearn.metrics import average_precision_score
import pdb
import sys
from tqdm import tqdm
import pickle
# Own modules
from utils import *
from options import *
from data.data_generator import *
from models.models import load_model
from custom_optim import *
# __author__ = "Andres Mafla Delgado;
# __email__ = "amafla@cvc.uab.cat;
# READS A LIST OF IMAGES BELONGING TO CON-TEXT OR BOTTLES AND EXTRACTS THE FEATURES AND PROBS - blur experiments
def test(args, net, cuda, num_classes, gt_annotations, text_embedding, local_feats, image_name2features_index, text_bboxes, local_bboxes, images_to_process):
processed_imgs = 0
# Switch to evaluation mode
net.eval()
test_transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
with torch.no_grad():
images_to_proc = images_to_process.keys()
gt_annotations_test = gt_annotations['test'].keys()
image_list = [i for i in images_to_proc if i in gt_annotations_test]
print('Processing %d images\n' %(len(image_list)))
# For Precision Metric
precision_per_class = [0.0] * num_classes
class_total = [0.00001] * num_classes
for image in tqdm(image_list):
sample_size = 1
if args.dataset == 'context':
img_path = args.base_dir + 'Context/data/JPEGImages/' + image
relative_path = '/Context/old_fisher_vectors/'
label = np.zeros(28)
else:
img_path = args.base_dir + 'Drink_Bottle/' + image
relative_path = 'Drink_Bottle/old_fisher_vectors/'
label = np.zeros(20)
img = Image.open(img_path).convert('RGB')
data = test_transform(img)
data = data.view(-1, 3, 224, 224)
# Labels
img_class = images_to_process[image]
label[int(img_class) - 1] = 1
# Textual Features
if args.blur == 'none' or args.blur == 'visual':
if args.ocr =='yolo_phoc' and args.embedding == 'fisher':
with open ( args.base_dir + relative_path + image.replace('images/','')[:-3] +'json', 'r') as fp:
fisher_vector = json.load(fp)
text_features = np.resize(fisher_vector, (1, 38400))
else:
if args.dataset == 'bottles':
image = image.replace('images/','')
text_embedding_sample = np.asarray(text_embedding[image])
text_features = np.zeros((args.max_textual, 300))
if np.shape(text_embedding_sample)[0] == 0:
text_embedding_sample = np.zeros((1, 300))
elif np.shape(text_embedding_sample)[0] > args.max_textual:
text_embedding_sample = text_embedding_sample[0:args.max_textual]
text_features[:len(text_embedding_sample)] = text_embedding_sample
elif args.blur == 'text':
text_features = np.zeros((15,300))
text_features = torch.from_numpy(text_features)
text_features = text_features.type(torch.FloatTensor)
if args.embedding == 'fisher':
text_features = text_features.view(sample_size, 38400)
else:
text_features = text_features.view(sample_size, 15, 300)
# SCENE TEXT BBOXES ONLY FOR GOOGLE OCR
if args.blur == 'none' or args.blur =='visual':
text_bboxes_sample = np.asarray(text_bboxes[image.replace('images/','')])
text_bboxes_features = np.zeros((args.max_textual, 4))
if np.shape(text_bboxes_sample)[0] == 0:
text_bboxes_sample = np.zeros((1, 4))
elif np.shape(text_bboxes_sample)[0] > args.max_textual:
text_bboxes_sample = text_bboxes_sample[0:args.max_textual]
text_bboxes_features[:len(text_bboxes_sample)] = text_bboxes_sample
elif args.blur == 'text':
text_bboxes_features = np.zeros((15,4))
text_bboxes_features = torch.from_numpy(text_bboxes_features)
text_bboxes_features = text_bboxes_features.type(torch.FloatTensor)
text_bboxes_features = text_bboxes_features.view(sample_size, 15, 4)
# LOCAL VISUAL FEATURES
if args.blur == 'none' or args.blur == 'text':
if args.dataset == 'bottles':
image = 'images/'+ image
local_features_index = image_name2features_index[image]
local_feats_sample = local_feats[int(local_features_index)]
local_feats_sample = torch.from_numpy(local_feats_sample[:int(args.max_visual)][:])
elif args.blur == 'visual':
local_feats_sample = np.zeros((36, 2048))
local_feats_sample = torch.from_numpy(local_feats_sample)
local_feats_sample = local_feats_sample.type(torch.FloatTensor)
local_feats_sample = local_feats_sample.view(sample_size, 36, 2048)
# LOCAL VISUAL BBOXES
if args.blur == 'none' or args.blur == 'text':
local_bboxes_features = local_bboxes[int(local_features_index)]
local_bboxes_features = torch.from_numpy(local_bboxes_features[:int(args.max_visual)][:])
elif args.blur == 'visual':
local_bboxes_features = np.zeros ((36,4))
local_bboxes_features = torch.from_numpy(local_bboxes_features)
local_bboxes_features = local_bboxes_features.type(torch.FloatTensor)
local_bboxes_features = local_bboxes_features.view(sample_size, 36, 4)
# pdb.set_trace()
# Move to GPU
if cuda:
data, text_features, local_feats_sample, text_bboxes_features, local_bboxes_features = data.cuda(), text_features.cuda(),\
local_feats_sample.cuda(), text_bboxes_features.cuda(), local_bboxes_features.cuda()
data = Variable(data)
output, attn_mask, affinity_matrix = net(data, text_features, sample_size, local_feats_sample, text_bboxes_features,
local_bboxes_features)
class_total[int(img_class) - 1] += 1
# Precision
softmax = nn.Softmax(dim=1)
predicted = softmax(output)
predicted = predicted.data.cpu().numpy()
y_true = label
precision_per_class[int(img_class) - 1] += average_precision_score(y_true, predicted.reshape(num_classes, ))
processed_imgs += 1
total_precision = [0.0] * num_classes
for ix, value in enumerate (precision_per_class):
total_precision[ix] = precision_per_class[ix]/class_total[ix]
#print ('Average Precision for %d class: %.4f' % (ix + 1, total_precision[ix] ))
total_mAP =sum(total_precision) / num_classes
print('Mean Average Precision (mAP) is: %.4f' % (100 * total_mAP))
print ('Process Completed - %d Processed Images' %(processed_imgs))
return
def main():
print('Preparing data')
data_path = args.base_dir
if args.dataset == 'context':
num_classes = 28
if args.embedding == 'fasttext':
weight_file = '/SSD/GCN_classification/best/context_fullGCN_bboxes_fasttext_google_ocr_concat_mean_split1/checkpoint_context.weights'
elif args.embedding == 'fisher':
weight_file = '/SSD/GCN_classification/backup/context_orig_fisherNet_fisher_yolo_phoc_concat_mean/checkpoint_context.weights'
elif args.embedding == 'glove':
weight_file = '/SSD/GCN_classification/backup/context_lenet_glove_e2e_mlt_concat_mean/checkpoint_context.weights'
else:
print('Embedding not implemented for Performance eval')
with open(data_path + '/Context/data/split_1.json', 'r') as fp:
gt_annotations = json.load(fp)
if args.embedding != 'glove':
with open(data_path + '/Context/google_ocr/text_embeddings/Context_fasttext.json','r') as fp:
text_embedding = json.load(fp)
else:
with open(data_path + '/Context/' + args.ocr + '/text_embeddings/Context_' + args.embedding + '.pickle','rb') as fp:
text_embedding = pickle.load(fp)
# Load Local features from Faster R-CNN VG
with open(data_path + '/Context/context_local_feats.npy', 'rb') as fp:
local_feats = np.load(fp, encoding='bytes')
# Create img_name to index of local features
with open(data_path + '/Context/context_local_feats_image_ids.txt', 'r') as fp:
image_ids = fp.readlines()
image_name2features_index = {}
for item in image_ids:
img_name = item.strip().split(',')[0].split('/')[-1].replace('\'', '')
idx = item.strip().split(',')[1].replace(')', '').replace(' ', '')
image_name2features_index[img_name] = idx
# BBOXES LOADING FOR TEXT FEATURES
# Load BBOXES of Scene Text
with open(data_path + '/Context/google_ocr/bboxes/Context_bboxes.json', 'r') as fp:
text_bboxes = json.load(fp)
# Load BBOXES of Local Visual Features
with open(data_path + '/Context/context_bboxes.npy', 'rb') as fp:
local_bboxes = np.load(fp, encoding='bytes')
# Images with and without text
with open (data_path + '/Context/data/images_with_text.json','r') as fp:
images_with_text = json.load(fp)
with open (data_path + '/Context/data/images_no_text.json','r') as fp:
images_no_text = json.load(fp)
else:
num_classes = 20
if args.embedding == 'fasttext':
weight_file = '/SSD/GCN_classification/best/bottles_fullGCN_bboxes_fasttext_google_ocr_concat_mean_split2/checkpoint_bottles.weights'
elif args.embedding == 'fisher':
weight_file = '/SSD/GCN_classification/backup/bottles_orig_fisherNet_fisher_yolo_phoc_concat_mean/checkpoint_bottles.weights'
elif args.embedding == 'glove':
weight_file = '/SSD/GCN_classification/backup/bottles_lenet_glove_e2e_mlt_concat_mean/checkpoint_bottles.weights'
else:
print('Embedding not implemented for Performance eval')
with open(data_path + '/Drink_Bottle/split_2.json', 'r') as fp:
gt_annotations = json.load(fp)
if args.embedding != 'glove':
with open(data_path + '/Drink_Bottle/google_ocr/text_embeddings/Drink_Bottle_fasttext.json','r') as fp:
text_embedding = json.load(fp)
else:
with open(data_path + '/Drink_Bottle/' + args.ocr + '/text_embeddings/Drink_Bottle_' + args.embedding + '.pickle','rb') as fp:
text_embedding = pickle.load(fp)
# Load Local features from Faster R-CNN VG
with open(data_path + '/Drink_Bottle/bottles_local_feats.npy', 'rb') as fp:
local_feats = np.load(fp, encoding='bytes')
# Create img_name to index of local features
with open(data_path + '/Drink_Bottle/bottles_local_feats_image_ids.txt', 'r') as fp:
image_ids = fp.readlines()
image_name2features_index = {}
for item in image_ids:
# Sample: ('/SSD/Datasets/Drink_Bottle/images/14/982.jpg', 0)
img_name = item.strip().split(',')[0].replace('\'', '').split('/')[-3:]
img_name = img_name[0] + '/' + img_name[1] + '/' + img_name[2]
idx = item.strip().split(',')[1].replace(')', '').replace(' ', '')
image_name2features_index[img_name] = idx
# BBOXES LOADING FOR TEXT FEATURES
# Load BBOXES of Scene Text
with open(data_path + '/Drink_Bottle/google_ocr/bboxes/Drink_Bottle_bboxes.json', 'r') as fp:
text_bboxes = json.load(fp)
# Load BBOXES of Local Visual Features
with open(data_path + '/Drink_Bottle/bottles_bboxes.npy', 'rb') as fp:
local_bboxes = np.load(fp, encoding='bytes')
# Images with and without text
with open (data_path + '/Drink_Bottle/images_with_text.json','r') as fp:
images_with_text = json.load(fp)
with open (data_path + '/Drink_Bottle/images_no_text.json','r') as fp:
images_no_text = json.load(fp)
embedding_size = get_embedding_size(args.embedding)
print('Loading Model')
net = load_model(args, num_classes, embedding_size)
checkpoint = load_checkpoint(weight_file)
net.load_state_dict(checkpoint)
print('Checking CUDA')
if args.cuda and args.ngpu > 1:
print('\t* Data Parallel **NOT TESTED**')
net = torch.nn.DataParallel(net, device_ids=list(range(args.ngpu)))
if args.cuda:
print('\t* CUDA ENABLED!')
net = net.cuda()
print('\n*** TEST ***\n')
print('\n*** Evaluating Performance on Images WITH Scene Text ***\n')
test(args, net, args.cuda, num_classes, gt_annotations, text_embedding, local_feats, image_name2features_index, text_bboxes, local_bboxes, images_with_text)
print('\n*** Evaluating Performance on Images WITHOUT Scene Text ***\n')
test(args, net, args.cuda, num_classes, gt_annotations, text_embedding, local_feats, image_name2features_index, text_bboxes, local_bboxes, images_no_text)
print('\nProcess Completed!')
sys.exit()
if __name__ == '__main__':
# Parse options
args = Options_Test().parse()
print('Parameters:\t' + str(args))
# Check cuda & Set random seed
args.cuda = args.ngpu > 0 and torch.cuda.is_available()
main() |
import os
import cv2
import numpy as np
from skimage.measure import compare_ssim, compare_psnr
#root = '/home/mengqi/fileserver/results/cross_scale/train-L_HR-L_LR-pix2pix/results_test/1215_0100_nc3/test_latest/images'
root = '/home/mengqi/fileserver/results/cross_scale/train-L_HR-L_LR-cyclegan/results_test/1215_0100_nc3/test_3000epoch/images'
R_real_HR = cv2.imread(os.path.join(root, 'R_real_B.png'))
R_fake_HR = cv2.imread(os.path.join(root, 'R_fake_B.png'))
R_real_LR = cv2.imread(os.path.join(root, 'R_real_A.png'))
bicubic_psnr = compare_psnr(im_true = R_real_HR, im_test = R_real_LR)
synth_psnr = compare_psnr(im_true = R_real_HR, im_test = R_fake_HR)
print("PSNR: bicubic {}, synth {}".format(bicubic_psnr, synth_psnr))
bicubic_ssim = compare_ssim(X = R_real_HR, Y = R_real_LR, multichannel=True)
synth_ssim = compare_ssim(X = R_real_HR, Y = R_fake_HR, multichannel=True)
print("SSIM: bicubic {}, synth {}".format(bicubic_ssim, synth_ssim))
|
"""
Implementation of the ABY3 framework.
"""
from __future__ import absolute_import
from typing import Tuple, List, Union, Optional, Callable
import abc
import sys
from math import log2, ceil
from functools import reduce
import numpy as np
import tensorflow as tf
from ...tensor.factory import (
AbstractFactory,
AbstractTensor,
AbstractConstant,
)
from ...tensor.helpers import inverse
from ...tensor.fixed import FixedpointConfig, _validate_fixedpoint_config
from ...tensor import fixed64, fixed64_ni
from ...tensor.native import native_factory
from ...tensor.boolfactory import bool_factory
from ...player import Player
from ...config import get_config
from ..protocol import Protocol, memoize
from ...operations import secure_random as crypto
TFEInputter = Callable[[], Union[List[tf.Tensor], tf.Tensor]]
TF_NATIVE_TYPES = [tf.bool, tf.int8, tf.int16, tf.int32, tf.int64]
_THISMODULE = sys.modules[__name__]
# ===== Share types =====
ARITHMETIC = 0
BOOLEAN = 1
# ===== Factory =====
i64_factory = native_factory(tf.int64)
b_factory = bool_factory()
class ABY3(Protocol):
"""ABY3 framework."""
def __init__(
self,
server_0=None,
server_1=None,
server_2=None,
use_noninteractive_truncation=True,
):
self._initializers = list()
config = get_config()
self.servers = [None, None, None]
self.servers[0] = config.get_player(server_0 if server_0 else "server0")
self.servers[1] = config.get_player(server_1 if server_1 else "server1")
self.servers[2] = config.get_player(server_2 if server_2 else "server2")
int_factory = i64_factory
if use_noninteractive_truncation:
fixedpoint_config = fixed64_ni
else:
fixedpoint_config = fixed64
self.fixedpoint_config = fixedpoint_config
self.int_factory = int_factory
self.bool_factory = b_factory
self.pairwise_keys, self.pairwise_nonces = self.setup_pairwise_randomness()
self.b2a_keys_1, self.b2a_keys_2, self.b2a_nonce = self.setup_b2a_generator()
@property
def nbits(self):
return self.int_factory.nbits
def setup_pairwise_randomness(self):
"""
Initial setup for pairwise randomness: Every two parties hold a shared key.
"""
if not crypto.supports_seeded_randomness():
raise NotImplementedError("Secure randomness implementation is not available.")
keys = [[None, None], [None, None], [None, None]]
with tf.device(self.servers[0].device_name):
seed_0 = crypto.secure_seed()
with tf.device(self.servers[1].device_name):
seed_1 = crypto.secure_seed()
with tf.device(self.servers[2].device_name):
seed_2 = crypto.secure_seed()
# Replicated keys
# NOTE: The following `with` contexts do NOT have any impact for the Python-only operations.
# We use them here only for indicating "which server has which seed".
# In other words, `keys[0][1] = seed_1` only stores the TF graph node `seed_1` in the
# Python list `keys`, but does NOT actually "send" `seed_1` to server 0, which only happens
# when a future TF operation on server 0 uses `keys[0][1]`.
# The same NOTE applies to other places where we use Python list to store TF graph nodes in the
# `with` context.
with tf.device(self.servers[0].device_name):
keys[0][0] = seed_0
keys[0][1] = seed_1
with tf.device(self.servers[1].device_name):
keys[1][0] = seed_1
keys[1][1] = seed_2
with tf.device(self.servers[2].device_name):
keys[2][0] = seed_2
keys[2][1] = seed_0
# nonces[0] for server 0 and 1, nonces[1] for server 1 and 2, nonces[2] for server 2 and 0
nonces = np.array([0, 0, 0], dtype=np.int)
return keys, nonces
def setup_b2a_generator(self):
"""
Initial setup for generating shares during the conversion
from boolean sharing to arithmetic sharing
"""
if not crypto.supports_seeded_randomness():
raise NotImplementedError("Secure randomness implementation is not available.")
# Type 1: Server 0 and 1 hold three keys, while server 2 holds two
b2a_keys_1 = [[None, None, None], [None, None, None], [None, None, None]]
with tf.device(self.servers[0].device_name):
seed_0 = crypto.secure_seed()
with tf.device(self.servers[1].device_name):
seed_1 = crypto.secure_seed()
with tf.device(self.servers[2].device_name):
seed_2 = crypto.secure_seed()
with tf.device(self.servers[0].device_name):
b2a_keys_1[0][0] = seed_0
b2a_keys_1[0][1] = seed_1
b2a_keys_1[0][2] = seed_2
with tf.device(self.servers[1].device_name):
b2a_keys_1[1][0] = seed_0
b2a_keys_1[1][1] = seed_1
b2a_keys_1[1][2] = seed_2
with tf.device(self.servers[2].device_name):
b2a_keys_1[2][0] = seed_0
b2a_keys_1[2][2] = seed_2
# Type 2: Server 1 and 2 hold three keys, while server 0 holds two
b2a_keys_2 = [[None, None, None], [None, None, None], [None, None, None]]
with tf.device(self.servers[0].device_name):
seed_0 = crypto.secure_seed()
with tf.device(self.servers[1].device_name):
seed_1 = crypto.secure_seed()
with tf.device(self.servers[2].device_name):
seed_2 = crypto.secure_seed()
with tf.device(self.servers[0].device_name):
b2a_keys_2[0][0] = seed_0
b2a_keys_2[0][1] = seed_1
with tf.device(self.servers[1].device_name):
b2a_keys_2[1][0] = seed_0
b2a_keys_2[1][1] = seed_1
b2a_keys_2[1][2] = seed_2
with tf.device(self.servers[2].device_name):
b2a_keys_2[2][0] = seed_0
b2a_keys_2[2][1] = seed_1
b2a_keys_2[2][2] = seed_2
b2a_nonce = 0
return b2a_keys_1, b2a_keys_2, b2a_nonce
def define_constant(
self,
value: Union[np.ndarray, int, float],
apply_scaling: bool = True,
share_type=ARITHMETIC,
name: Optional[str] = None,
factory: Optional[AbstractFactory] = None,
):
"""
Define a constant to use in computation.
.. code-block:: python
x = prot.define_constant(np.array([1,2,3,4]), apply_scaling=False)
:See: tf.constant
:param bool apply_scaling: Whether or not to scale the value.
:param str name: What name to give to this node in the graph.
:param AbstractFactory factory: Which tensor type to represent this value with.
"""
assert isinstance(value, (np.ndarray, int, float))
if isinstance(value, (int, float)):
value = np.array([value])
factory = factory or self.int_factory
value = self._encode(value, apply_scaling)
with tf.name_scope("constant{}".format("-" + name if name else "")):
with tf.device(self.servers[0].device_name):
x_on_0 = factory.constant(value)
with tf.device(self.servers[1].device_name):
x_on_1 = factory.constant(value)
with tf.device(self.servers[2].device_name):
x_on_2 = factory.constant(value)
return ABY3Constant(self, [x_on_0, x_on_1, x_on_2], apply_scaling, share_type)
def define_private_variable(
self,
initial_value,
apply_scaling: bool = True,
share_type=ARITHMETIC,
name: Optional[str] = None,
factory: Optional[AbstractFactory] = None,
):
"""
Define a private variable.
This will take the passed value and construct shares that will be split up
between those involved in the computation.
For example, in a three party replicated sharing, this will split the value into
three shares and transfer two shares to each party in a secure manner.
:see tf.Variable
:param Union[np.ndarray,tf.Tensor,ABY3PublicTensor] initial_value: The initial value.
:param bool apply_scaling: Whether or not to scale the value.
:param str name: What name to give to this node in the graph.
:param AbstractFactory factory: Which tensor type to represent this value with.
"""
init_val_types = (np.ndarray, tf.Tensor, ABY3PrivateTensor)
assert isinstance(initial_value, init_val_types), type(initial_value)
factory = factory or self.int_factory
suffix = "-" + name if name else ""
with tf.name_scope("private-var{}".format(suffix)):
if isinstance(initial_value, np.ndarray):
initial_value = self._encode(initial_value, apply_scaling)
v = factory.tensor(initial_value)
shares = self._share(v, share_type=share_type)
elif isinstance(initial_value, tf.Tensor):
initial_value = self._encode(initial_value, apply_scaling)
v = factory.tensor(initial_value)
shares = self._share(v, share_type=share_type)
elif isinstance(initial_value, ABY3PrivateTensor):
shares = initial_value.unwrapped
else:
raise TypeError(("Don't know how to turn {} "
"into private variable").format(type(initial_value)))
# The backing factory for the shares might have changed after the sharing step
factory = shares[0][0].factory
x = [[None, None], [None, None], [None, None]]
with tf.device(self.servers[0].device_name):
x[0][0] = factory.variable(shares[0][0])
x[0][1] = factory.variable(shares[0][1])
with tf.device(self.servers[1].device_name):
x[1][0] = factory.variable(shares[1][0])
x[1][1] = factory.variable(shares[1][1])
with tf.device(self.servers[2].device_name):
x[2][0] = factory.variable(shares[2][0])
x[2][1] = factory.variable(shares[2][1])
x = ABY3PrivateVariable(self, x, apply_scaling, share_type)
self._initializers.append(x.initializer)
return x
def define_local_computation(
self,
player,
computation_fn,
arguments=None,
apply_scaling=True,
share_type=ARITHMETIC,
name=None,
factory=None,
):
"""
Define a local computation that happens on plaintext tensors.
:param player: Who performs the computation and gets to see the values in plaintext.
:param apply_scaling: Whether or not to scale the outputs.
:param name: Optional name to give to this node in the graph.
:param factory: Backing tensor type to use for outputs.
"""
factory = factory or self.int_factory
if isinstance(player, str):
player = get_config().get_player(player)
assert isinstance(player, Player)
def share_output(v: tf.Tensor):
assert v.shape.is_fully_defined(), (
"Shape of return value '{}' on '{}' not fully defined".format(
name if name else "",
player.name,
))
v = self._encode(v, apply_scaling)
w = factory.tensor(v)
x = self._share_and_wrap(w, apply_scaling, share_type, player)
return x
def reconstruct_input(x, player):
if isinstance(x, tf.Tensor):
return x
if isinstance(x, ABY3PublicTensor):
w, _ = x.unwrapped
v = self._decode(w, x.is_scaled)
return v
if isinstance(x, ABY3PrivateTensor):
shares = x.unwrapped
w = self._reconstruct(shares, player, share_type)
v = self._decode(w, x.is_scaled)
return v
raise TypeError(("Don't know how to process input argument "
"of type {}").format(type(x)))
with tf.name_scope(name if name else "local-computation"):
with tf.device(player.device_name):
if arguments is None:
inputs = []
else:
if not isinstance(arguments, (list, tuple)):
arguments = [arguments]
inputs = [reconstruct_input(x, player) for x in arguments]
outputs = computation_fn(*inputs)
if isinstance(outputs, tf.Operation):
return outputs
if isinstance(outputs, tf.Tensor):
return share_output(outputs)
if isinstance(outputs, (list, tuple)):
return [share_output(output) for output in outputs]
raise TypeError("Don't know how to handle results of "
"type {}".format(type(outputs)))
def define_private_input(
self,
player,
inputter_fn,
apply_scaling: bool = True,
share_type=ARITHMETIC,
name: Optional[str] = None,
factory: Optional[AbstractFactory] = None,
):
"""
Define a private input.
This represents a `private` input owned by the specified player into the graph.
:param Union[str,Player] player: Which player owns this input.
:param bool apply_scaling: Whether or not to scale the value.
:param str name: What name to give to this node in the graph.
:param AbstractFactory factory: Which backing type to use for this input
(e.g. `int100` or `int64`).
"""
suffix = "-" + name if name else ""
return self.define_local_computation(player=player,
computation_fn=inputter_fn,
arguments=[],
apply_scaling=apply_scaling,
share_type=share_type,
name="private-input{}".format(suffix),
factory=factory)
def define_public_input(
self,
player: Union[str, Player],
inputter_fn: TFEInputter,
apply_scaling: bool = True,
share_type=ARITHMETIC,
name: Optional[str] = None,
factory: Optional[AbstractFactory] = None,
):
"""
Define a public input.
This represents a `public` input owned by the specified player into the
graph.
:param Union[str,Player] player: Which player owns this input.
:param bool apply_scaling: Whether or not to scale the value.
:param str name: What name to give to this node in the graph.
"""
if isinstance(player, str):
player = get_config().get_player(player)
assert isinstance(player, Player)
factory = factory or self.int_factory
suffix = "-" + name if name else ""
def helper(v: tf.Tensor) -> "ABY3PublicTensor":
assert v.shape.is_fully_defined(), (
"Shape of input '{}' on '{}' is not fully defined".format(
name if name else "",
player.name,
))
v = self._encode(v, apply_scaling)
w = factory.tensor(v)
return ABY3PublicTensor(self, [w, w, w], apply_scaling, share_type)
with tf.name_scope("public-input{}".format(suffix)):
with tf.device(player.device_name):
inputs = inputter_fn()
if isinstance(inputs, tf.Tensor):
# single input -> single output
v = inputs
return helper(v)
if isinstance(inputs, (list, tuple)):
# multiple inputs -> multiple outputs
return [helper(v) for v in inputs]
raise TypeError(
("Don't know how to handle inputs of type {}").format(type(inputs)))
def define_public_tensor(
self,
tensor: tf.Tensor,
apply_scaling: bool = True,
share_type=ARITHMETIC,
name: Optional[str] = None,
factory: Optional[AbstractFactory] = None,
):
"""
Convert a tf.Tensor to an ABY3PublicTensor.
"""
assert isinstance(tensor, tf.Tensor)
assert tensor.shape.is_fully_defined(), (
"Shape of input '{}' is not fully defined".format(name if name else ""))
factory = factory or self.int_factory
with tf.name_scope("public-tensor"):
tensor = self._encode(tensor, apply_scaling)
w = factory.tensor(tensor)
return ABY3PublicTensor(self, [w, w, w], apply_scaling, share_type)
def define_output(
self,
player,
arguments,
outputter_fn,
name=None,
):
"""
Define an output for this graph.
:param player: Which player this output will be sent to.
"""
def result_wrapper(*args):
op = outputter_fn(*args)
# wrap in tf.group to prevent sending back any tensors (which might hence
# be leaked)
return tf.group(op)
return self.define_local_computation(
player=player,
computation_fn=result_wrapper,
arguments=arguments,
name="output{}".format("-" + name if name else ""),
)
@property
def initializer(self) -> tf.Operation:
return tf.group(*self._initializers)
def add_initializers(self, *initializers):
self._initializers.append(tf.group(*initializers))
def clear_initializers(self) -> None:
del self._initializers[:]
def _encode(
self,
rationals: Union[tf.Tensor, np.ndarray],
apply_scaling: bool,
factory=None,
) -> Union[tf.Tensor, np.ndarray]:
"""
Encode tensor of rational numbers into tensor of ring elements. Output is
of same type as input to allow function to be used for constants.
"""
with tf.name_scope("encode"):
# we first scale as needed
if apply_scaling:
scaled = rationals * self.fixedpoint_config.scaling_factor
else:
scaled = rationals
# and then we round to integers
if isinstance(scaled, np.ndarray):
integers = scaled.astype(int).astype(object)
elif isinstance(scaled, tf.Tensor):
factory = factory or self.int_factory
tf_native_type = factory.native_type
assert tf_native_type in TF_NATIVE_TYPES
integers = tf.cast(scaled, dtype=tf_native_type)
else:
raise TypeError("Don't know how to encode {}".format(type(rationals)))
assert type(rationals) == type(integers)
return integers
@memoize
def _decode(self, elements: AbstractTensor, is_scaled: bool) -> tf.Tensor:
"""Decode tensor of ring elements into tensor of rational numbers."""
with tf.name_scope("decode"):
scaled = elements.to_native()
if not is_scaled:
return scaled
return scaled / self.fixedpoint_config.scaling_factor
def _share(self, secret: AbstractTensor, share_type: str, player=None):
"""Secret-share an AbstractTensor.
Args:
secret: `AbstractTensor`, the tensor to share.
Returns:
A pair of `AbstractTensor`, the shares.
"""
with tf.name_scope("share"):
if share_type == ARITHMETIC or share_type == BOOLEAN:
share0 = secret.factory.sample_uniform(secret.shape)
share1 = secret.factory.sample_uniform(secret.shape)
if share_type == ARITHMETIC:
share2 = secret - share0 - share1
elif share_type == BOOLEAN:
share2 = secret ^ share0 ^ share1
# Replicated sharing
shares = ((share0, share1), (share1, share2), (share2, share0))
return shares
else:
raise NotImplementedError("Unknown share type.")
def _share_and_wrap(
self,
secret: AbstractTensor,
is_scaled: bool,
share_type: str,
player=None,
) -> "ABY3PrivateTensor":
shares = self._share(secret, share_type, player)
return ABY3PrivateTensor(self, shares, is_scaled, share_type)
def _reconstruct(self, shares, player, share_type):
"""
Reconstruct the plaintext value at a specified player.
The shares might locate at three different players, so we need the 'player' argument
in order to optimally use two local shares and one (probably) remote share to
minimize communication.
:param shares:
:param player: Where to reconstruct
:return:
"""
def helper(s0, s1, s2):
if share_type == ARITHMETIC:
return s0 + s1 + s2
elif share_type == BOOLEAN:
return s0 ^ s1 ^ s2
else:
raise NotImplementedError("Only arithmetic and boolean sharings are supported.")
with tf.name_scope("reconstruct"):
if share_type == ARITHMETIC or share_type == BOOLEAN:
if player == self.servers[0]:
return helper(shares[0][0], shares[0][1], shares[2][0])
elif player == self.servers[1]:
return helper(shares[1][0], shares[1][1], shares[0][0])
elif player == self.servers[2]:
return helper(shares[2][0], shares[2][1], shares[1][0])
else:
# The player is not any of the three ABY3 servers, so
# we just let each server give one share to this player
# in order to have a fair communication cost for each server
return helper(shares[0][0], shares[1][0], shares[2][0])
else:
raise NotImplementedError("Unknown share type.")
def _gen_zero_sharing(self, shape, share_type=ARITHMETIC, factory=None):
def helper(f0, f1):
if share_type == ARITHMETIC:
return f0 - f1
elif share_type == BOOLEAN:
return f0 ^ f1
else:
raise NotImplementedError("Only arithmetic and boolean sharings are supported.")
factory = factory or self.int_factory
with tf.name_scope("zero-sharing"):
with tf.device(self.servers[0].device_name):
f00 = factory.sample_seeded_uniform(
shape=shape,
seed=self.pairwise_keys[0][0] + self.pairwise_nonces[2]) # yapf: disable
f01 = factory.sample_seeded_uniform(
shape=shape,
seed=self.pairwise_keys[0][1] + self.pairwise_nonces[0]) # yapf: disable
a0 = helper(f00, f01)
with tf.device(self.servers[1].device_name):
f10 = factory.sample_seeded_uniform(
shape=shape,
seed=self.pairwise_keys[1][0] + self.pairwise_nonces[0]) # yapf: disable
f11 = factory.sample_seeded_uniform(
shape=shape,
seed=self.pairwise_keys[1][1] + self.pairwise_nonces[1]) # yapf: disable
a1 = helper(f10, f11)
with tf.device(self.servers[2].device_name):
f20 = factory.sample_seeded_uniform(
shape=shape,
seed=self.pairwise_keys[2][0] + self.pairwise_nonces[1]) # yapf: disable
f21 = factory.sample_seeded_uniform(
shape=shape,
seed=self.pairwise_keys[2][1] + self.pairwise_nonces[2]) # yapf: disable
a2 = helper(f20, f21)
self.pairwise_nonces = self.pairwise_nonces + 1
return a0, a1, a2
def _gen_random_sharing(self, shape, share_type=ARITHMETIC, factory=None):
r = [[None] * 2 for _ in range(3)]
factory = factory or self.int_factory
with tf.name_scope("random-sharing"):
with tf.device(self.servers[0].device_name):
r[0][0] = factory.sample_seeded_uniform(
shape=shape,
seed=self.pairwise_keys[0][0] + self.pairwise_nonces[2]) # yapf: disable
r[0][1] = factory.sample_seeded_uniform(
shape=shape,
seed=self.pairwise_keys[0][1] + self.pairwise_nonces[0]) # yapf: disable
with tf.device(self.servers[1].device_name):
r[1][0] = factory.sample_seeded_uniform(
shape=shape,
seed=self.pairwise_keys[1][0] + self.pairwise_nonces[0]) # yapf: disable
r[1][1] = factory.sample_seeded_uniform(
shape=shape,
seed=self.pairwise_keys[1][1] + self.pairwise_nonces[1]) # yapf: disable
with tf.device(self.servers[2].device_name):
r[2][0] = factory.sample_seeded_uniform(
shape=shape,
seed=self.pairwise_keys[2][0] + self.pairwise_nonces[1]) # yapf: disable
r[2][1] = factory.sample_seeded_uniform(
shape=shape,
seed=self.pairwise_keys[2][1] + self.pairwise_nonces[2]) # yapf: disable
self.pairwise_nonces = self.pairwise_nonces + 1
return ABY3PrivateTensor(self, r, True, share_type)
def _gen_b2a_sharing(self, shape, b2a_keys):
shares = [[None, None], [None, None], [None, None]]
with tf.device(self.servers[0].device_name):
shares[0][0] = self.int_factory.sample_seeded_uniform(
shape=shape,
seed=b2a_keys[0][0] + self.b2a_nonce) # yapf: disable
shares[0][1] = self.int_factory.sample_seeded_uniform(
shape=shape,
seed=b2a_keys[0][1] + self.b2a_nonce) # yapf: disable
x_on_0 = None
if b2a_keys[0][2] is not None:
share_2 = self.int_factory.sample_seeded_uniform(
shape=shape,
seed=b2a_keys[0][2] + self.b2a_nonce) # yapf: disable
x_on_0 = shares[0][0] ^ shares[0][1] ^ share_2
with tf.device(self.servers[1].device_name):
shares[1][0] = self.int_factory.sample_seeded_uniform(
shape=shape,
seed=b2a_keys[1][1] + self.b2a_nonce) # yapf: disable
shares[1][1] = self.int_factory.sample_seeded_uniform(
shape=shape,
seed=b2a_keys[1][2] + self.b2a_nonce) # yapf: disable
x_on_1 = None
if b2a_keys[1][0] is not None:
share_0 = self.int_factory.sample_seeded_uniform(
shape=shape,
seed=b2a_keys[1][0] + self.b2a_nonce) # yapf: disable
x_on_1 = share_0 ^ shares[1][0] ^ shares[1][1]
with tf.device(self.servers[2].device_name):
shares[2][0] = self.int_factory.sample_seeded_uniform(
shape=shape,
seed=b2a_keys[2][2] + self.b2a_nonce) # yapf: disable
shares[2][1] = self.int_factory.sample_seeded_uniform(
shape=shape,
seed=b2a_keys[2][0] + self.b2a_nonce) # yapf: disable
x_on_2 = None
if b2a_keys[2][1] is not None:
share_1 = self.int_factory.sample_seeded_uniform(
shape=shape,
seed=b2a_keys[2][1] + self.b2a_nonce) # yapf: disable
x_on_2 = share_1 ^ shares[2][0] ^ shares[2][1]
self.b2a_nonce = self.b2a_nonce + 1
return x_on_0, x_on_1, x_on_2, shares
def _ot(
self,
sender,
receiver,
helper,
m0,
m1,
c_on_receiver,
c_on_helper,
key_on_sender,
key_on_helper,
nonce,
):
"""
Three-party OT protocol.
'm0' and 'm1' are the two messages located on the sender.
'c_on_receiver' and 'c_on_helper' should be the same choice bit, located on receiver and helper respectively.
'key_on_sender' and 'key_on_helper' should be the same key, located on sender and helper respectively.
'nonce' is a non-repeating ID for this call of the OT protocol.
"""
assert m0.shape == m1.shape, "m0 shape {}, m1 shape {}".format(m0.shape, m1.shape)
assert m0.factory == self.int_factory
assert m1.factory == self.int_factory
assert c_on_receiver.factory == self.bool_factory
assert c_on_helper.factory == self.bool_factory
with tf.name_scope("OT"):
int_factory = self.int_factory
with tf.device(sender.device_name):
w_on_sender = int_factory.sample_seeded_uniform(shape=[2] + m0.shape.as_list(),
seed=key_on_sender + nonce)
masked_m0 = m0 ^ w_on_sender[0]
masked_m1 = m1 ^ w_on_sender[1]
with tf.device(helper.device_name):
w_on_helper = int_factory.sample_seeded_uniform(shape=[2] + m0.shape.as_list(),
seed=key_on_helper + nonce)
w_c = int_factory.where(c_on_helper.value,
w_on_helper[1],
w_on_helper[0],
v2=False)
with tf.device(receiver.device_name):
masked_m_c = int_factory.where(c_on_receiver.value,
masked_m1,
masked_m0,
v2=False)
m_c = masked_m_c ^ w_c
return m_c
@memoize
def assign(self, variable: "ABY3PrivateVariable", value) -> tf.Operation:
"""See tf.assign."""
assert isinstance(variable, ABY3PrivateVariable), type(variable)
assert isinstance(value, ABY3PrivateTensor), type(value)
assert variable.is_scaled == value.is_scaled, ("Scaling must match: {}, {}".format(
variable.is_scaled,
value.is_scaled,
))
var_shares = variable.unwrapped
val_shares = value.unwrapped
with tf.name_scope("assign"):
# Having this control_dependencies is important in order to avoid that
# computationally-dependent shares are updated in different pace
# (e.g., share0 is computed from share1, and we need to make sure that
# share1 is NOT already updated).
with tf.control_dependencies([
val_shares[0][0].value, val_shares[0][1].value, val_shares[1][0].value,
val_shares[1][1].value, val_shares[2][0].value, val_shares[2][1].value
]):
with tf.device(self.servers[0].device_name):
op00 = var_shares[0][0].assign_from_same(val_shares[0][0])
op01 = var_shares[0][1].assign_from_same(val_shares[0][1])
with tf.device(self.servers[1].device_name):
op10 = var_shares[1][0].assign_from_same(val_shares[1][0])
op11 = var_shares[1][1].assign_from_same(val_shares[1][1])
with tf.device(self.servers[2].device_name):
op20 = var_shares[2][0].assign_from_same(val_shares[2][0])
op21 = var_shares[2][1].assign_from_same(val_shares[2][1])
op = tf.group(op00, op01, op10, op11, op20, op21)
return op
@memoize
def add(self, x, y):
"""
Adds two tensors `x` and `y`.
:param ABY3Tensor x: The first operand.
:param ABY3Tensor y: The second operand.
"""
x, y = self.lift(x, y)
return self.dispatch("add", x, y)
def lift(self, x, y=None, share_type=ARITHMETIC):
"""
Convenience method for working with mixed typed tensors in programs:
combining any of the ABY3 objects together with e.g. ints and floats
will automatically lift the latter into ABY3 objects.
Lifting will guarantee the two outputs are both scaled or unscaled if at
least one of them is lifted from int or float.
"""
if y is None:
if isinstance(x, (np.ndarray, int, float)):
return self.define_constant(x, share_type=share_type)
if isinstance(x, tf.Tensor):
return self.define_public_tensor(x, share_type=share_type)
if isinstance(x, ABY3Tensor):
return x
raise TypeError("Don't know how to lift {}".format(type(x)))
if isinstance(x, (np.ndarray, int, float)):
if isinstance(y, (np.ndarray, int, float)):
x = self.define_constant(x, share_type=share_type)
y = self.define_constant(y, share_type=share_type)
return x, y
if isinstance(y, tf.Tensor):
x = self.define_constant(x, share_type=share_type)
y = self.define_public_tensor(y, share_type=share_type)
return x, y
if isinstance(y, ABY3Tensor):
x = self.define_constant(
x,
apply_scaling=y.is_scaled,
share_type=share_type,
factory=y.backing_dtype,
)
return x, y
raise TypeError(("Don't know how to lift " "{}, {}").format(type(x), type(y)))
if isinstance(x, tf.Tensor):
if isinstance(y, (np.ndarray, int, float)):
x = self.define_public_tensor(x, share_type=share_type)
y = self.define_constant(y, share_type=share_type)
return x, y
if isinstance(y, tf.Tensor):
x = self.define_public_tensor(x, share_type=share_type)
y = self.define_public_tensor(y, share_type=share_type)
return x, y
if isinstance(y, ABY3Tensor):
x = self.define_public_tensor(
x,
apply_scaling=y.is_scaled,
share_type=share_type,
factory=y.backing_dtype,
)
return x, y
raise TypeError(("Don't know how to lift " "{}, {}").format(type(x), type(y)))
if isinstance(x, ABY3Tensor):
if isinstance(y, (np.ndarray, int, float)):
y = self.define_constant(
y,
apply_scaling=x.is_scaled,
share_type=share_type,
factory=x.backing_dtype,
)
return x, y
if isinstance(y, tf.Tensor):
y = self.define_public_tensor(
y,
apply_scaling=x.is_scaled,
share_type=share_type,
factory=x.backing_dtype,
)
return x, y
if isinstance(y, ABY3Tensor):
return x, y
raise TypeError(("Don't know how to lift " "{}, {}").format(type(x), type(y)))
@memoize
def add_n(self, tensors):
# TODO(Morten) we could optimize by doing lazy reductions, potentially
# segmenting as needed
return reduce(lambda x, y: x + y, tensors)
@memoize
def sub(self, x, y):
x, y = self.lift(x, y)
return self.dispatch("sub", x, y)
@memoize
def negative(self, x):
x = self.lift(x)
return self.dispatch("negative", x)
@memoize
def mul(self, x, y):
x, y = self.lift(x, y)
return self.dispatch("mul", x, y)
@memoize
def mul_trunc2(self, x, y):
x, y = self.lift(x, y)
return self.dispatch("mul_trunc2", x, y)
@memoize
def div(self, x, y):
"""
Performs a true division of `x` by `y` where `y` is public.
No flooring is performing if `y` is an integer type as it is implicitly
treated as a float.
"""
assert isinstance(x, ABY3Tensor)
if isinstance(y, float):
y_inverse = 1. / y
elif isinstance(y, int):
y_inverse = 1. / float(y)
elif isinstance(y, ABY3PublicTensor):
y_inverse = 1. / y.decode()
else:
raise TypeError("Don't know how to divide by type {}".format(type(y)))
return self.mul(x, y_inverse)
@memoize
def pow(self, x, p):
x = self.lift(x)
return self.dispatch("pow", x, p)
@memoize
def matmul(self, x, y):
x, y = self.lift(x, y)
return self.dispatch("matmul", x, y)
def gather_bit(self, x, even):
assert x.share_type is BOOLEAN
return self.dispatch("gather_bit", x, even)
def xor_indices(self, x):
assert x.share_type is BOOLEAN
return self.dispatch("xor_indices", x)
@memoize
def transpose(self, x, perm=None):
x = self.lift(x)
return self.dispatch("transpose", x, perm)
def indexer(self, x: "ABY3Tensor", slc) -> "ABY3Tensor":
return self.dispatch("indexer", x, slc)
def reshape(self, x: "ABY3Tensor", axe) -> "ABY3Tensor":
return self.dispatch("reshape", x, axe)
@memoize
def concat(self, xs, axis):
if all(isinstance(x, ABY3PublicTensor) for x in xs):
return _concat_public(self, xs, axis=axis)
if all(isinstance(x, ABY3PrivateTensor) for x in xs):
return _concat_private(self, xs, axis=axis)
raise TypeError("Don't know how to do a concat {}".format(type(xs)))
@memoize
def reduce_sum(self, x, axis=None, keepdims=False):
x = self.lift(x)
return self.dispatch("reduce_sum", x, axis=axis, keepdims=keepdims)
@memoize
def truncate(self, x: "ABY3Tensor"):
return self.dispatch("truncate", x)
@memoize
def reveal(self, x):
return self.dispatch("reveal", x)
@memoize
def B_xor(self, x, y):
x, y = self.lift(x, y, share_type=BOOLEAN)
return self.dispatch("B_xor", x, y)
@memoize
def B_and(self, x, y):
x, y = self.lift(x, y, share_type=BOOLEAN)
return self.dispatch("B_and", x, y)
@memoize
def B_or(self, x, y):
x, y = self.lift(x, y, share_type=BOOLEAN)
return self.dispatch("B_or", x, y)
@memoize
def B_not(self, x):
x = self.lift(x, share_type=BOOLEAN)
return self.dispatch("B_not", x)
@memoize
def B_ppa(self, x, y, n_bits=None, topology="kogge_stone"):
x, y = self.lift(x, y, share_type=BOOLEAN)
return self.dispatch("B_ppa", x, y, n_bits, topology)
@memoize
def B_add(self, x, y):
x, y = self.lift(x, y, share_type=BOOLEAN)
return self.dispatch("B_add", x, y)
@memoize
def B_sub(self, x, y):
x, y = self.lift(x, y, share_type=BOOLEAN)
return self.dispatch("B_sub", x, y)
@memoize
def lshift(self, x, steps):
return self.dispatch("lshift", x, steps)
@memoize
def rshift(self, x, steps):
return self.dispatch("rshift", x, steps)
@memoize
def logical_rshift(self, x, steps):
return self.dispatch("logical_rshift", x, steps)
@memoize
def A2B(self, x, nbits=None):
return self.dispatch("A2B", x, nbits)
@memoize
def B2A(self, x, nbits=None):
return self.dispatch("B2A", x, nbits)
@memoize
def mul_AB(self, x, y):
"""
Callers should make sure y is boolean sharing whose backing TF native type is `tf.bool`.
There is no automatic lifting for boolean sharing in the mixed-protocol multiplication.
"""
x = self.lift(x)
return self.dispatch("mul_AB", x, y)
@memoize
def bit_extract(self, x, i):
if x.share_type == BOOLEAN or x.share_type == ARITHMETIC:
return self.dispatch("bit_extract", x, i)
else:
raise ValueError("unsupported share type: {}".format(x.share_type))
@memoize
def msb(self, x):
return self.bit_extract(x, self.nbits - 1)
@memoize
def polynomial(self, x, coeffs):
x = self.lift(x)
return self.dispatch("polynomial", x, coeffs)
@memoize
def polynomial_piecewise(self, x, c, coeffs):
return self.dispatch("polynomial_piecewise", x, c, coeffs)
@memoize
def sigmoid(self, x, approx_type="piecewise_linear"):
return self.dispatch("sigmoid", x, approx_type)
@memoize
def gather(self, x, indices, axis):
raise NotImplementedError("Unsupported share type: {}".format(x.share_type))
@memoize
def stack(self, xs, axis):
raise TypeError("Don't know how to do a stack {}".format(type(xs)))
def write(self, x, filename_prefix):
if not isinstance(x, ABY3PrivateTensor):
raise TypeError("Only support writing ABY3PrivateTensor to disk.")
return self.dispatch("write", x, filename_prefix)
def read(self, filename_prefix, batch_size, n_columns):
return self.dispatch("read", filename_prefix, batch_size, n_columns)
def iterate(
self,
tensor: "ABY3PrivateTensor",
batch_size: int,
repeat=True,
shuffle=True,
seed: int = None,
):
if not isinstance(tensor, ABY3PrivateTensor):
raise TypeError("Only support iterating ABY3PrivateTensor.")
return self.dispatch("iterate", tensor, batch_size, repeat, shuffle, seed)
def blinded_shuffle(self, tensor: "ABY3PrivateTensor"):
"""
Shuffle the rows of the given tenosr privately.
After the shuffle, none of the share holder could know the exact shuffle order.
"""
if not isinstance(tensor, ABY3PrivateTensor):
raise TypeError(("Only support blindly shuffle ABY3PrivateTensor. "
"For public tensor, use the shuffle() method"))
return self.dispatch("blinded_shuffle", tensor)
def dispatch(self, base_name, *args, container=None, **kwargs):
"""
Finds the correct protocol logicto perform based on the dispatch_id
attribute of the input tensors in args.
"""
suffix = "_".join([arg.dispatch_id for arg in args if hasattr(arg, "dispatch_id")])
func_name = "_{}_{}".format(base_name, suffix)
if container is None:
container = _THISMODULE
func = getattr(container, func_name, None)
if func is not None:
return func(self, *args, **kwargs) # pylint: disable=not-callable
raise TypeError(("Don't know how to {}: {}").format(base_name,
[type(arg) for arg in args]))
#
# Classes representing the base values in the ABY3 protocol.
#
class ABY3Tensor(abc.ABC):
"""
This class functions mostly as a convenient way of exposing operations
directly on the various tensor objects, ie allowing one to write `x + y`
instead of `prot.add(x, y)`. Since this functionality is shared among all
tensors we put it in this superclass.
This class should never be instantiated on its own.
Instead you should use your chosen protocols factory methods::
x = prot.define_private_input(tf.constant(np.array([1,2,3,4])))
y = prot.define_public_input(tf.constant(np.array([4,5,6,7])))
z = x + y
with config.Session() as sess:
answer = z.reveal().eval(sess)
print(answer) # => [5, 7, 9, 11]
"""
def __init__(self, prot, is_scaled, share_type):
self.prot = prot
self.is_scaled = is_scaled
self.share_type = share_type
@property
@abc.abstractmethod
def shape(self) -> List[int]:
"""
:rtype: List[int]
:returns: The shape of this tensor.
"""
pass
@property
@abc.abstractmethod
def unwrapped(self) -> Tuple[AbstractTensor, ...]:
pass
def add(self, other):
"""
Add `other` to this ABY3Tensor. This can be another tensor with the same
backing or a primitive.
This function returns a new ABY3Tensor and does not modify this one.
:param ABY3Tensor other: a or primitive (e.g. a float)
:return: A new ABY3Tensor with `other` added.
:rtype: ABY3Tensor
"""
if self.share_type == ARITHMETIC:
return self.prot.add(self, other)
else:
raise ValueError("unsupported share type for add: {}".format(self.share_type))
def __add__(self, other):
"""
See :meth:`~tf_encrypted.protocol.aby3.ABY3Tensor.add`
"""
return self.add(other)
def __radd__(self, other):
return self + other
def reduce_sum(self, axis=None, keepdims=False):
"""
Like :meth:`tensorflow.reduce_sum`
:param int axis: The axis to reduce along
:param bool keepdims: If true, retains reduced dimensions with length 1.
:return: A new ABY3Tensor
:rtype: ABY3Tensor
"""
return self.prot.reduce_sum(self, axis, keepdims)
def sum(self, axis=None, keepdims=False):
"""
See :meth:`ABY3Tensor.reduce_sum`
"""
return self.reduce_sum(axis, keepdims)
def sub(self, other):
"""
Subtract `other` from this tensor.
:param ABY3Tensor other: to subtract
:return: A new ABY3Tensor
:rtype: ABY3Tensor
"""
if self.share_type == ARITHMETIC:
return self.prot.sub(self, other)
else:
raise ValueError("unsupported share type for sub: {}".format(self.share_type))
def __sub__(self, other):
return self.sub(other)
def __rsub__(self, other):
if self.share_type == ARITHMETIC:
return self.prot.sub(other, self)
else:
raise ValueError("unsupported share type for sub: {}".format(self.share_type))
def mul(self, other):
"""
Multiply this tensor with `other`
:param ABY3Tensor other: to multiply
:return: A new ABY3Tensor
:rtype: ABY3Tensor
"""
return self.prot.mul(self, other)
def __mul__(self, other):
return self.prot.mul(self, other)
def __rmul__(self, other):
return self.prot.mul(other, self)
def __truediv__(self, other):
return self.prot.div(self, other)
def __mod__(self, other):
return self.prot.mod(self, other)
def __pow__(self, p):
return self.prot.pow(self, p)
def square(self):
"""
Square this tensor.
:return: A new ABY3Tensor
:rtype: ABY3Tensor
"""
return self.prot.square(self)
def matmul(self, other):
"""
MatMul this tensor with `other`. This will perform matrix multiplication,
rather than elementwise like
:meth:`~tf_encrypted.protocol.aby3.ABY3Tensor.mul`
:param ABY3Tensor other: to mul
:return: A new ABY3Tensor
:rtype: ABY3Tensor
"""
return self.prot.matmul(self, other)
def dot(self, other):
"""
:return: A new ABY3Tensor
:rtype: ABY3Tensor
"""
return self.matmul(other)
def __getitem__(self, slc):
return self.prot.indexer(self, slc)
def transpose(self, perm=None):
"""
Transpose this tensor.
See :meth:`tensorflow.transpose`
:param List[int]: A permutation of the dimensions of this tensor.
:return: A new ABY3Tensor
:rtype: ABY3Tensor
"""
return self.prot.transpose(self, perm)
def truncate(self):
"""
Truncate this tensor.
`TODO`
:return: A new ABY3Tensor
:rtype: ABY3Tensor
"""
return self.prot.truncate(self)
def expand_dims(self, axis=None):
"""
:See: tf.expand_dims
:return: A new ABY3Tensor
:rtype: ABY3Tensor
"""
return self.prot.expand_dims(self, axis=axis)
def reshape(self, shape: List[int]) -> "ABY3Tensor":
"""
:See: tf.reshape
:param List[int] shape: The new shape of the tensor.
:rtype: ABY3Tensor
:returns: A new tensor with the contents of this tensor, but with the new
specified shape.
"""
return self.prot.reshape(self, shape)
def __neg__(self):
return self.prot.negative(self)
def negative(self) -> "ABY3Tensor":
"""
:See: tf.negative
:rtype: ABY3Tensor
:returns: A new tensor with numerical negative value element-wise computed.
"""
return self.prot.negative(self)
def reduce_max(self, axis: int) -> "ABY3Tensor":
"""
:See: tf.reduce_max
:param int axis: The axis to take the max along
:rtype: ABY3Tensor
:returns: A new ABY3 tensor with the max value from each axis.
"""
return self.prot.reduce_max(self, axis)
def bitwise_xor(self, other):
if self.share_type == BOOLEAN:
return self.prot.B_xor(self, other)
else:
raise ValueError("Unsupported share type for xor: {}".format(self.share_type))
def __xor__(self, other):
return self.bitwise_xor(other)
def bitwise_and(self, other):
if self.share_type == BOOLEAN:
return self.prot.B_and(self, other)
else:
raise ValueError("unsupported share type for and: {}".format(self.share_type))
def __and__(self, other):
return self.bitwise_and(other)
def bitwise_or(self, other):
if self.share_type == BOOLEAN:
return self.prot.B_or(self, other)
else:
raise ValueError("unsupported share type for and: {}".format(self.share_type))
def __or__(self, other):
return self.bitwise_or(other)
def invert(self):
if self.share_type == BOOLEAN:
return self.prot.B_not(self)
else:
raise ValueError("unsupported share type for and: {}".format(self.share_type))
def __invert__(self):
return self.invert()
def __lshift__(self, steps):
return self.prot.lshift(self, steps)
def lshift(self, steps):
return self.prot.lshift(self, steps)
def __rshift__(self, steps):
return self.prot.rshift(self, steps)
def rshift(self, steps):
return self.prot.rshift(self, steps)
def arith_rshift(self, steps):
return self.rshift(steps)
def logical_rshift(self, steps):
return self.prot.logical_rshift(self, steps)
def write(self, filename_prefix):
return self.prot.write(self, filename_prefix)
class ABY3PublicTensor(ABY3Tensor):
"""
This class represents a public tensor, known by at least by the three servers
but potentially known by more. Although there is only a single value we
replicate it on both servers to avoid sending it from one to the other
in the operations where it's needed by both (eg multiplication).
"""
dispatch_id = "public"
def __init__(self, prot: ABY3, values: List[AbstractTensor], is_scaled: bool, share_type) -> None:
assert all(isinstance(v, AbstractTensor) for v in values)
assert all((v.shape == values[0].shape) for v in values)
super(ABY3PublicTensor, self).__init__(prot, is_scaled, share_type)
self.values = values
def __repr__(self) -> str:
return "ABY3PublicTensor(shape={}, share_type={})".format(self.shape, self.share_type)
@property
def shape(self) -> List[int]:
return self.values[0].shape
@property
def backing_dtype(self):
return self.values[0].factory
@property
def unwrapped(self) -> Tuple[AbstractTensor, ...]:
"""
Unwrap the tensor.
This will return the value for each of the parties that collectively own
the tensor.
In most cases, this will be the same value on each device.
.. code-block:: python
x_0, y_0, z_0 = tensor.unwrapped
# x_0 == 10 with the value pinned to player_0's device.
# y_0 == 10 with the value pinned to player_1's device.
# z_0 == 10 with the value pinned to player_2's device.
In most cases you will want to work on this data on the specified device.
.. code-block:: python
x_0, y_0, z_0= tensor.unwrapped
with tf.device(prot.player_0.device_name):
# act on x_0
with tf.device(prot.player_1.device_name):
# act on y_0
with tf.device(prot.player_2.device_name):
# act on z_0
In most cases you will not need to use this method. All funtions
will hide this functionality for you (e.g. `add`, `mul`, etc).
"""
return self.values
def decode(self) -> Union[np.ndarray, tf.Tensor]:
return self.prot._decode(self.values[0], self.is_scaled) # pylint: disable=protected-access
def to_native(self):
return self.decode()
class ABY3Constant(ABY3PublicTensor):
"""
This class essentially represents a public value, however it additionally
records the fact that the underlying value was declared as a constant.
"""
def __init__(self, prot, constants, is_scaled,
share_type):
assert all(isinstance(c, AbstractConstant) for c in constants)
assert all((c.shape == constants[0].shape) for c in constants)
super(ABY3Constant, self).__init__(prot, constants,
is_scaled, share_type)
self.constants = constants
def __repr__(self) -> str:
return "ABY3Constant(shape={}, share_type={})".format(self.shape, self.share_type)
class ABY3PrivateTensor(ABY3Tensor):
"""
This class represents a private value that may be unknown to everyone.
"""
dispatch_id = "private"
def __init__(self, prot, shares, is_scaled, share_type):
assert len(shares) == 3
assert all((ss.shape == shares[0][0].shape) for s in shares for ss in s), "Shares have different shapes."
super(ABY3PrivateTensor, self).__init__(prot, is_scaled, share_type)
self.shares = shares
def __repr__(self) -> str:
return "ABY3PrivateTensor(shape={}, share_type={})".format(self.shape,
self.share_type)
@property
def shape(self) -> List[int]:
return self.shares[0][0].shape
@property
def backing_dtype(self):
return self.shares[0][0].factory
@property
def unwrapped(self):
return self.shares
def reveal(self) -> ABY3PublicTensor:
return self.prot.reveal(self)
class ABY3PrivateVariable(ABY3PrivateTensor):
"""
This class essentially represents a private value, however it additionally
records the fact that the backing tensor was declared as a variable in
order to allow treating it as a variable itself.
"""
def __init__(self, prot, shares, is_scaled, share_type):
super(ABY3PrivateVariable, self).__init__(prot, shares, is_scaled, share_type)
self.shares = shares
self.initializer = tf.group(*[var.initializer for share in shares for var in share])
def __repr__(self) -> str:
return "ABY3PrivateVariable(shape={}, share_type={})".format(
self.shape, self.share_type)
#
# reveal helpers
#
def _reveal_private(prot, x):
assert isinstance(x, ABY3PrivateTensor), type(x)
with tf.name_scope("reveal"):
shares = x.unwrapped
with tf.device(prot.servers[0].device_name):
z_on_0 = prot._reconstruct(shares, prot.servers[0], x.share_type)
with tf.device(prot.servers[1].device_name):
z_on_1 = prot._reconstruct(shares, prot.servers[1], x.share_type)
with tf.device(prot.servers[2].device_name):
z_on_2 = prot._reconstruct(shares, prot.servers[2], x.share_type)
return ABY3PublicTensor(prot, [z_on_0, z_on_1, z_on_2], x.is_scaled, x.share_type)
#
# add helpers
#
def _add_private_private(prot, x, y):
assert isinstance(x, ABY3PrivateTensor), type(x)
assert isinstance(y, ABY3PrivateTensor), type(y)
z = [[None] * 2 for _ in range(3)]
with tf.name_scope("add"):
for i in range(3):
with tf.device(prot.servers[i].device_name):
z[i][0] = x.shares[i][0] + y.shares[i][0]
z[i][1] = x.shares[i][1] + y.shares[i][1]
return ABY3PrivateTensor(prot, z, x.is_scaled, x.share_type)
def _add_private_public(prot, x, y):
assert isinstance(x, ABY3PrivateTensor), type(x)
assert isinstance(y, ABY3PublicTensor), type(y)
assert x.is_scaled == y.is_scaled, ("Cannot mix different encodings: "
"{} {}").format(x.is_scaled, y.is_scaled)
shares = x.unwrapped
y_on_0, _, y_on_2 = y.unwrapped
z = [[None] * 2 for _ in range(3)]
with tf.name_scope("add"):
with tf.device(prot.servers[0].device_name):
z[0][0] = shares[0][0] + y_on_0
z[0][1] = shares[0][1]
with tf.device(prot.servers[1].device_name):
z[1][0] = shares[1][0]
z[1][1] = shares[1][1]
with tf.device(prot.servers[2].device_name):
z[2][0] = shares[2][0]
z[2][1] = shares[2][1] + y_on_2
return ABY3PrivateTensor(prot, z, x.is_scaled, x.share_type)
def _add_public_private(prot, x, y):
assert isinstance(x, ABY3PublicTensor), type(x)
assert isinstance(y, ABY3PrivateTensor), type(y)
assert x.is_scaled == y.is_scaled, ("Cannot mix different encodings: "
"{} {}").format(x.is_scaled, y.is_scaled)
x_on_0, _, x_on_2 = x.unwrapped
shares = y.unwrapped
z = [[None] * 2 for _ in range(3)]
with tf.name_scope("add"):
with tf.device(prot.servers[0].device_name):
z[0][0] = shares[0][0] + x_on_0
z[0][1] = shares[0][1]
with tf.device(prot.servers[1].device_name):
z[1][0] = shares[1][0]
z[1][1] = shares[1][1]
with tf.device(prot.servers[2].device_name):
z[2][0] = shares[2][0]
z[2][1] = shares[2][1] + x_on_2
return ABY3PrivateTensor(prot, z, x.is_scaled, y.share_type)
def _add_public_public(prot, x, y):
assert isinstance(x, ABY3PublicTensor), type(x)
assert isinstance(y, ABY3PublicTensor), type(y)
assert x.is_scaled == y.is_scaled, "Cannot add tensors with different scales"
x_shares = x.unwrapped
y_shares = y.unwrapped
z = [None] * 3
with tf.name_scope("add"):
for i in range(3):
z[i] = x_shares[i] + y_shares[i]
return ABY3PublicTensor(prot, z, x.is_scaled, x.share_type)
#
# sub helpers
#
def _sub_private_private(prot, x, y):
assert isinstance(x, ABY3PrivateTensor), type(x)
assert isinstance(y, ABY3PrivateTensor), type(y)
assert x.is_scaled == y.is_scaled
z = [[None] * 2 for _ in range(3)]
with tf.name_scope("sub"):
x_shares = x.unwrapped
y_shares = y.unwrapped
for i in range(3):
with tf.device(prot.servers[i].device_name):
z[i][0] = x_shares[i][0] - y_shares[i][0]
z[i][1] = x_shares[i][1] - y_shares[i][1]
return ABY3PrivateTensor(prot, z, x.is_scaled, x.share_type)
def _sub_private_public(prot, x, y):
assert isinstance(x, ABY3PrivateTensor), type(x)
assert isinstance(y, ABY3PublicTensor), type(y)
assert x.is_scaled == y.is_scaled
shares = x.unwrapped
y_on_0, _, y_on_2 = y.unwrapped
z = [[None] * 2 for _ in range(3)]
with tf.name_scope("sub"):
with tf.device(prot.servers[0].device_name):
z[0][0] = shares[0][0] - y_on_0
z[0][1] = shares[0][1]
with tf.device(prot.servers[1].device_name):
z[1][0] = shares[1][0]
z[1][1] = shares[1][1]
with tf.device(prot.servers[2].device_name):
z[2][0] = shares[2][0]
z[2][1] = shares[2][1] - y_on_2
return ABY3PrivateTensor(prot, z, x.is_scaled, x.share_type)
def _sub_public_private(prot, x, y):
assert isinstance(x, ABY3PublicTensor), type(x)
assert isinstance(y, ABY3PrivateTensor), type(y)
x_on_0, _, x_on_2 = x.unwrapped
shares = y.unwrapped
z = [[None] * 2 for _ in range(3)]
with tf.name_scope("sub"):
with tf.device(prot.servers[0].device_name):
z[0][0] = x_on_0 - shares[0][0]
z[0][1] = -shares[0][1]
with tf.device(prot.servers[1].device_name):
z[1][0] = -shares[1][0]
z[1][1] = -shares[1][1]
with tf.device(prot.servers[2].device_name):
z[2][0] = -shares[2][0]
z[2][1] = x_on_2 - shares[2][1]
return ABY3PrivateTensor(prot, z, x.is_scaled, y.share_type)
#
# negative helpers
#
def _negative_private(prot, x):
assert isinstance(x, ABY3PrivateTensor), type(x)
x_shares = x.unwrapped
z = [[None, None], [None, None], [None, None]]
with tf.name_scope("negative"):
for i in range(3):
with tf.device(prot.servers[i].device_name):
z[i][0] = -x_shares[i][0]
z[i][1] = -x_shares[i][1]
z = ABY3PrivateTensor(prot, z, x.is_scaled, x.share_type)
return z
def _negative_public(prot, x):
assert isinstance(x, ABY3PublicTensor), type(x)
x_on_0, x_on_1, x_on_2 = x.unwrapped
with tf.name_scope("negative"):
with tf.device(prot.servers[0].device_name):
x_on_0_neg = -x_on_0
with tf.device(prot.servers[1].device_name):
x_on_1_neg = -x_on_1
with tf.device(prot.servers[2].device_name):
x_on_2_neg = -x_on_2
x_neg = ABY3PublicTensor(prot, [x_on_0_neg, x_on_1_neg, x_on_2_neg], x.is_scaled,
x.share_type)
return x_neg
#
# mul helpers
#
def _mul_public_private(prot, x, y):
assert isinstance(x, ABY3PublicTensor), type(x)
assert isinstance(y, ABY3PrivateTensor), type(y)
x_on_0, x_on_1, x_on_2 = x.unwrapped
shares = y.unwrapped
z = [[None, None], [None, None], [None, None]]
with tf.name_scope("mul"):
with tf.device(prot.servers[0].device_name):
z[0][0] = shares[0][0] * x_on_0
z[0][1] = shares[0][1] * x_on_0
with tf.device(prot.servers[1].device_name):
z[1][0] = shares[1][0] * x_on_1
z[1][1] = shares[1][1] * x_on_1
with tf.device(prot.servers[2].device_name):
z[2][0] = shares[2][0] * x_on_2
z[2][1] = shares[2][1] * x_on_2
z = ABY3PrivateTensor(prot, z, x.is_scaled or y.is_scaled, y.share_type)
z = prot.truncate(z) if x.is_scaled and y.is_scaled else z
return z
def _mul_private_public(prot, x, y):
assert isinstance(x, ABY3PrivateTensor), type(x)
assert isinstance(y, ABY3PublicTensor), type(y)
shares = x.unwrapped
y_on_0, y_on_1, y_on_2 = y.unwrapped
z = [[None, None], [None, None], [None, None]]
with tf.name_scope("mul"):
with tf.device(prot.servers[0].device_name):
z[0][0] = shares[0][0] * y_on_0
z[0][1] = shares[0][1] * y_on_0
with tf.device(prot.servers[1].device_name):
z[1][0] = shares[1][0] * y_on_1
z[1][1] = shares[1][1] * y_on_1
with tf.device(prot.servers[2].device_name):
z[2][0] = shares[2][0] * y_on_2
z[2][1] = shares[2][1] * y_on_2
z = ABY3PrivateTensor(prot, z, x.is_scaled or y.is_scaled, x.share_type)
z = prot.truncate(z) if x.is_scaled and y.is_scaled else z
return z
def _mul_private_private(prot, x, y):
assert isinstance(x, ABY3PrivateTensor), type(x)
assert isinstance(y, ABY3PrivateTensor), type(y)
x_shares = x.unwrapped
y_shares = y.unwrapped
z = [[None, None], [None, None], [None, None]]
with tf.name_scope("mul"):
a0, a1, a2 = prot._gen_zero_sharing(x.shape)
with tf.device(prot.servers[0].device_name):
z0 = x_shares[0][0] * y_shares[0][0] \
+ x_shares[0][0] * y_shares[0][1] \
+ x_shares[0][1] * y_shares[0][0] \
+ a0
with tf.device(prot.servers[1].device_name):
z1 = x_shares[1][0] * y_shares[1][0] \
+ x_shares[1][0] * y_shares[1][1] \
+ x_shares[1][1] * y_shares[1][0] \
+ a1
with tf.device(prot.servers[2].device_name):
z2 = x_shares[2][0] * y_shares[2][0] \
+ x_shares[2][0] * y_shares[2][1] \
+ x_shares[2][1] * y_shares[2][0] \
+ a2
# Re-sharing
with tf.device(prot.servers[0].device_name):
z[0][0] = z0
z[0][1] = z1
with tf.device(prot.servers[1].device_name):
z[1][0] = z1
z[1][1] = z2
with tf.device(prot.servers[2].device_name):
z[2][0] = z2
z[2][1] = z0
z = ABY3PrivateTensor(prot, z, x.is_scaled or y.is_scaled, x.share_type)
z = prot.truncate(z) if x.is_scaled and y.is_scaled else z
return z
def _mul_trunc2_private_private(prot, x, y):
"""
Multiplication with the Trunc2 protocol in the ABY3 paper.
This is more efficient (in terms of communication rounds)
than `mul` in the onlline phase only when pre-computation
is left out of consideration.
"""
assert isinstance(x, ABY3PrivateTensor), type(x)
assert isinstance(y, ABY3PrivateTensor), type(y)
# If there will not be any truncation, then just call the simple multiplication protocol.
if not (x.is_scaled and y.is_scaled):
return _mul_private_private(prot, x, y)
x_shares = x.unwrapped
y_shares = y.unwrapped
shape = x_shares[0][0].shape
amount = prot.fixedpoint_config.precision_fractional
with tf.name_scope("mul_trunc2"):
# Step 1: Generate a Random Truncation Pair
# If TF is smart enough, this part is supposed to be pre-computation.
r = prot._gen_random_sharing(shape, share_type=BOOLEAN)
r_trunc = r.arith_rshift(amount)
r = prot.B2A(r)
r_trunc = prot.B2A(r_trunc)
# Step 2: Compute 3-out-of-3 sharing of (x*y - r)
a0, a1, a2 = prot._gen_zero_sharing(x.shape)
with tf.device(prot.servers[0].device_name):
z0 = x_shares[0][0] * y_shares[0][0] \
+ x_shares[0][0] * y_shares[0][1] \
+ x_shares[0][1] * y_shares[0][0] \
+ a0 - r.shares[0][0]
with tf.device(prot.servers[1].device_name):
z1 = x_shares[1][0] * y_shares[1][0] \
+ x_shares[1][0] * y_shares[1][1] \
+ x_shares[1][1] * y_shares[1][0] \
+ a1 - r.shares[1][0]
with tf.device(prot.servers[2].device_name):
z2 = x_shares[2][0] * y_shares[2][0] \
+ x_shares[2][0] * y_shares[2][1] \
+ x_shares[2][1] * y_shares[2][0] \
+ a2 - r.shares[2][0]
# Step 3: Reveal (x*y - r) / 2^d
# xy_minus_r = z0 + z1 + z2
# xy_minus_r_trunc = xy_minus_r.right_shift(amount)
# z = ABY3PublicTensor(prot, [xy_minus_r_trunc, xy_minus_r_trunc, xy_minus_r_trunc], True, ARITHMETIC)
xy_minus_r_trunc = [None] * 3
for i in range(3):
with tf.device(prot.servers[i].device_name):
xy_minus_r_trunc[i] = z0 + z1 + z2
xy_minus_r_trunc[i] = xy_minus_r_trunc[i].right_shift(amount)
z = ABY3PublicTensor(prot, xy_minus_r_trunc, True, ARITHMETIC)
# Step 4: Final addition
z = z + r_trunc
return z
def _matmul_public_private(prot, x, y):
assert isinstance(x, ABY3PublicTensor), type(x)
assert isinstance(y, ABY3PrivateTensor), type(y)
x_on_0, x_on_1, x_on_2 = x.unwrapped
shares = y.unwrapped
z = [[None, None], [None, None], [None, None]]
with tf.name_scope("matmul"):
with tf.device(prot.servers[0].device_name):
z[0][0] = x_on_0.matmul(shares[0][0])
z[0][1] = x_on_0.matmul(shares[0][1])
with tf.device(prot.servers[1].device_name):
z[1][0] = x_on_1.matmul(shares[1][0])
z[1][1] = x_on_1.matmul(shares[1][1])
with tf.device(prot.servers[2].device_name):
z[2][0] = x_on_2.matmul(shares[2][0])
z[2][1] = x_on_2.matmul(shares[2][1])
z = ABY3PrivateTensor(prot, z, x.is_scaled or y.is_scaled, y.share_type)
z = prot.truncate(z) if x.is_scaled and y.is_scaled else z
return z
def _matmul_private_public(prot, x, y):
assert isinstance(x, ABY3PrivateTensor), type(x)
assert isinstance(y, ABY3PublicTensor), type(y)
shares = x.unwrapped
y_on_0, y_on_1, y_on_2 = y.unwrapped
z = [[None, None], [None, None], [None, None]]
with tf.name_scope("matmul"):
with tf.device(prot.servers[0].device_name):
z[0][0] = shares[0][0].matmul(y_on_0)
z[0][1] = shares[0][1].matmul(y_on_0)
with tf.device(prot.servers[1].device_name):
z[1][0] = shares[1][0].matmul(y_on_1)
z[1][1] = shares[1][1].matmul(y_on_1)
with tf.device(prot.servers[2].device_name):
z[2][0] = shares[2][0].matmul(y_on_2)
z[2][1] = shares[2][1].matmul(y_on_2)
z = ABY3PrivateTensor(prot, z, x.is_scaled or y.is_scaled, x.share_type)
z = prot.truncate(z) if x.is_scaled and y.is_scaled else z
return z
def _matmul_private_private(prot, x, y):
assert isinstance(x, ABY3PrivateTensor), type(x)
assert isinstance(y, ABY3PrivateTensor), type(y)
x_shares = x.unwrapped
y_shares = y.unwrapped
# Tensorflow supports matmul for more than 2 dimensions,
# with the inner-most 2 dimensions specifying the 2-D matrix multiplication
result_shape = tf.TensorShape((*x.shape[:-1], y.shape[-1]))
z = [[None, None], [None, None], [None, None]]
with tf.name_scope("matmul"):
a0, a1, a2 = prot._gen_zero_sharing(result_shape)
with tf.device(prot.servers[0].device_name):
z0 = x_shares[0][0].matmul(y_shares[0][0]) \
+ x_shares[0][0].matmul(y_shares[0][1]) \
+ x_shares[0][1].matmul(y_shares[0][0]) \
+ a0
with tf.device(prot.servers[1].device_name):
z1 = x_shares[1][0].matmul(y_shares[1][0]) \
+ x_shares[1][0].matmul(y_shares[1][1]) \
+ x_shares[1][1].matmul(y_shares[1][0]) \
+ a1
with tf.device(prot.servers[2].device_name):
z2 = x_shares[2][0].matmul(y_shares[2][0]) \
+ x_shares[2][0].matmul(y_shares[2][1]) \
+ x_shares[2][1].matmul(y_shares[2][0]) \
+ a2
# Re-sharing
with tf.device(prot.servers[0].device_name):
z[0][0] = z0
z[0][1] = z1
with tf.device(prot.servers[1].device_name):
z[1][0] = z1
z[1][1] = z2
with tf.device(prot.servers[2].device_name):
z[2][0] = z2
z[2][1] = z0
z = ABY3PrivateTensor(prot, z, x.is_scaled or y.is_scaled, x.share_type)
z = prot.truncate(z) if x.is_scaled and y.is_scaled else z
return z
def _truncate_private(prot: ABY3, x: ABY3PrivateTensor) -> ABY3PrivateTensor:
assert isinstance(x, ABY3PrivateTensor)
if prot.fixedpoint_config.use_noninteractive_truncation:
return _truncate_private_noninteractive(prot, x)
return _truncate_private_interactive(prot, x)
def _truncate_private_noninteractive(
prot: ABY3,
x: ABY3PrivateTensor,
) -> ABY3PrivateTensor:
assert isinstance(x, ABY3PrivateTensor), type(x)
base = prot.fixedpoint_config.scaling_base
amount = prot.fixedpoint_config.precision_fractional
shares = x.unwrapped
y = [[None, None], [None, None], [None, None]]
with tf.name_scope("truncate"):
# First step: compute new shares
with tf.device(prot.servers[2].device_name):
r_on_2 = prot.int_factory.sample_seeded_uniform(
shares[2][0].shape, prot.pairwise_keys[2][0] + prot.pairwise_nonces[1])
with tf.device(prot.servers[0].device_name):
y0 = shares[0][0].truncate(amount, base)
with tf.device(prot.servers[1].device_name):
r_on_1 = prot.int_factory.sample_seeded_uniform(
shares[1][0].shape, prot.pairwise_keys[1][1] + prot.pairwise_nonces[1])
t = shares[1][0] + shares[1][1]
# tmp = 0 - (0 - t).truncate(amount, base)
tmp = t.truncate(amount, base)
y1 = tmp - r_on_1
prot.pairwise_nonces[1] = prot.pairwise_nonces[1] + 1
# Second step: replicate shares
with tf.device(prot.servers[0].device_name):
y[0][0] = y0
y[0][1] = y1
with tf.device(prot.servers[1].device_name):
y[1][0] = y1
y[1][1] = r_on_1
with tf.device(prot.servers[2].device_name):
y[2][0] = r_on_2
y[2][1] = y0
return ABY3PrivateTensor(prot, y, x.is_scaled, x.share_type)
def _truncate_private_interactive(prot: ABY3, a: ABY3PrivateTensor) -> ABY3PrivateTensor:
"""
See protocol TruncPr (3.1) in
"Secure Computation With Fixed-Point Numbers" by Octavian Catrina and Amitabh
Saxena, FC'10.
We call it "interactive" to keep consistent with the 2pc setting,
but in fact, our protocol uses only one round communication, exactly the same as
that in the "non-interactive" one.
"""
assert isinstance(a, ABY3PrivateTensor), type(a)
with tf.name_scope("truncate-i"):
scaling_factor = prot.fixedpoint_config.scaling_factor
scaling_factor_inverse = inverse(prot.fixedpoint_config.scaling_factor,
prot.int_factory.modulus)
# we first rotate `a` to make sure reconstructed values fall into
# a non-negative interval `[0, 2B)` for some bound B; this uses an
# assumption that the values originally lie in `[-B, B)`, and will
# leak private information otherwise
# 'a + bound' will automatically lift 'bound' by another scaling factor,
# so we should first divide bound by the scaling factor if we want to
# use this convenient '+' operation.
bound = prot.fixedpoint_config.bound_double_precision
b = a + (bound / scaling_factor)
# next step is for servers to add a statistical mask to `b`, reveal
# it to server1 and server2, and compute the lower part
trunc_gap = prot.fixedpoint_config.truncation_gap
mask_bitlength = ceil(log2(bound)) + 2 + trunc_gap
b_shares = b.unwrapped
a_shares = a.unwrapped
shape = a.shape
# NOTE: The following algorithm has an assumption to ensure the correctness:
# c = a + bound + r0 + r1 SHOULD be positively smaller than
# the max int64 number 2^{63} - 1. This is necessary to ensure the correctness of
# the modulo operation 'c % scaling_factor'.
# As a simple example, consider a 4-bit number '1111', when we think of it as a signed
# number, it is '-1', and '-1 % 3 = 2'. But when we think of it as an unsigned number,
# then '15 % 3 = 0'. AND the following works only if c is a positive number that is within
# 63-bit, because 64-bit becomes a negative number.
# Therefore, 'mask_bitlength' is better <= 61 if we use int64 as the underlying type, because
# r0 is 61-bit, r1 is 61-bit, bound is much smaller, and (assuming) a is much smaller than bound.
d = [[None] * 2 for _ in range(3)]
with tf.device(prot.servers[0].device_name):
r0_on_0 = prot.int_factory.sample_seeded_bounded(
shape, prot.pairwise_keys[0][0] + prot.pairwise_nonces[2], mask_bitlength)
r1_on_0 = prot.int_factory.sample_seeded_bounded(
shape, prot.pairwise_keys[0][1] + prot.pairwise_nonces[0], mask_bitlength)
c0_on_0 = b_shares[0][0] + r0_on_0
c1_on_0 = b_shares[0][1] + r1_on_0
r0_lower_on_0 = r0_on_0 % scaling_factor
r1_lower_on_0 = r1_on_0 % scaling_factor
a_lower0_on_0 = -r0_lower_on_0
a_lower1_on_0 = -r1_lower_on_0
d[0][0] = (a_shares[0][0] - a_lower0_on_0) * scaling_factor_inverse
d[0][1] = (a_shares[0][1] - a_lower1_on_0) * scaling_factor_inverse
with tf.device(prot.servers[1].device_name):
r1_on_1 = prot.int_factory.sample_seeded_bounded(
shape, prot.pairwise_keys[1][0] + prot.pairwise_nonces[0], mask_bitlength)
c1_on_1 = b_shares[1][0] + r1_on_1
c2_on_1 = b_shares[1][1]
# server0 sends c0 to server1, revealing c to server1
c_on_1 = c0_on_0 + c1_on_1 + c2_on_1
r1_lower_on_1 = r1_on_1 % scaling_factor
a_lower1_on_1 = -r1_lower_on_1
a_lower2_on_1 = c_on_1 % scaling_factor
d[1][0] = (a_shares[1][0] - a_lower1_on_1) * scaling_factor_inverse
d[1][1] = (a_shares[1][1] - a_lower2_on_1) * scaling_factor_inverse
with tf.device(prot.servers[2].device_name):
r0_on_2 = prot.int_factory.sample_seeded_bounded(
shape, prot.pairwise_keys[2][1] + prot.pairwise_nonces[2], mask_bitlength)
c0_on_2 = b_shares[2][1] + r0_on_2
c2_on_2 = b_shares[2][0]
# server1 sends c1 to server2, revealing c to server2
c_on_2 = c0_on_2 + c1_on_1 + c2_on_2
r0_lower_on_2 = r0_on_2 % scaling_factor
a_lower0_on_2 = -r0_lower_on_2
a_lower2_on_2 = c_on_2 % scaling_factor
d[2][0] = (a_shares[2][0] - a_lower2_on_2) * scaling_factor_inverse
d[2][1] = (a_shares[2][1] - a_lower0_on_2) * scaling_factor_inverse
prot.pairwise_nonces[0] += 1
prot.pairwise_nonces[2] += 1
return ABY3PrivateTensor(prot, d, a.is_scaled, a.share_type)
def _B_xor_private_private(prot: ABY3, x: ABY3PrivateTensor, y: ABY3PrivateTensor):
assert isinstance(x, ABY3PrivateTensor), type(x)
assert isinstance(y, ABY3PrivateTensor), type(y)
assert x.backing_dtype == y.backing_dtype
z = [[None, None], [None, None], [None, None]]
with tf.name_scope("b_xor"):
with tf.device(prot.servers[0].device_name):
z[0][0] = x.shares[0][0] ^ y.shares[0][0]
z[0][1] = x.shares[0][1] ^ y.shares[0][1]
with tf.device(prot.servers[1].device_name):
z[1][0] = x.shares[1][0] ^ y.shares[1][0]
z[1][1] = x.shares[1][1] ^ y.shares[1][1]
with tf.device(prot.servers[2].device_name):
z[2][0] = x.shares[2][0] ^ y.shares[2][0]
z[2][1] = x.shares[2][1] ^ y.shares[2][1]
return ABY3PrivateTensor(prot, z, x.is_scaled, x.share_type)
def _B_xor_private_public(prot: ABY3, x: ABY3PrivateTensor, y: ABY3PublicTensor):
assert isinstance(x, ABY3PrivateTensor), type(x)
assert isinstance(y, ABY3PublicTensor), type(y)
assert x.backing_dtype == y.backing_dtype
z = [[None, None], [None, None], [None, None]]
with tf.name_scope("b_xor"):
y_on_0, y_on_1, y_on_2 = y.unwrapped
with tf.device(prot.servers[0].device_name):
z[0][0] = x.shares[0][0] ^ y_on_0
z[0][1] = x.shares[0][1] ^ y_on_0
with tf.device(prot.servers[1].device_name):
z[1][0] = x.shares[1][0] ^ y_on_1
z[1][1] = x.shares[1][1] ^ y_on_1
with tf.device(prot.servers[2].device_name):
z[2][0] = x.shares[2][0] ^ y_on_2
z[2][1] = x.shares[2][1] ^ y_on_2
return ABY3PrivateTensor(prot, z, x.is_scaled, x.share_type)
def _B_and_private_private(prot: ABY3, x: ABY3PrivateTensor, y: ABY3PrivateTensor):
assert isinstance(x, ABY3PrivateTensor), type(x)
assert isinstance(y, ABY3PrivateTensor), type(y)
assert x.backing_dtype == y.backing_dtype
x_shares = x.unwrapped
y_shares = y.unwrapped
z = [[None, None], [None, None], [None, None]]
with tf.name_scope("b_and"):
a0, a1, a2 = prot._gen_zero_sharing(x.shape,
share_type=BOOLEAN,
factory=x.backing_dtype)
with tf.device(prot.servers[0].device_name):
tmp0 = x_shares[0][0] & y_shares[0][0]
tmp1 = x_shares[0][0] & y_shares[0][1]
tmp2 = x_shares[0][1] & y_shares[0][0]
z0 = tmp0 ^ tmp1 ^ tmp2 ^ a0
with tf.device(prot.servers[1].device_name):
tmp0 = x_shares[1][0] & y_shares[1][0]
tmp1 = x_shares[1][0] & y_shares[1][1]
tmp2 = x_shares[1][1] & y_shares[1][0]
z1 = tmp0 ^ tmp1 ^ tmp2 ^ a1
with tf.device(prot.servers[2].device_name):
tmp0 = x_shares[2][0] & y_shares[2][0]
tmp1 = x_shares[2][0] & y_shares[2][1]
tmp2 = x_shares[2][1] & y_shares[2][0]
z2 = tmp0 ^ tmp1 ^ tmp2 ^ a2
# Re-sharing
with tf.device(prot.servers[0].device_name):
z[0][0] = z0
z[0][1] = z1
with tf.device(prot.servers[1].device_name):
z[1][0] = z1
z[1][1] = z2
with tf.device(prot.servers[2].device_name):
z[2][0] = z2
z[2][1] = z0
z = ABY3PrivateTensor(prot, z, x.is_scaled or y.is_scaled, x.share_type)
return z
def _B_and_private_public(prot, x, y):
assert isinstance(x, ABY3PrivateTensor), type(x)
assert isinstance(y, ABY3PublicTensor), type(x)
assert x.backing_dtype == y.backing_dtype
x_shares = x.unwrapped
y_on_0, y_on_1, y_on_2 = y.unwrapped
z = [[None, None], [None, None], [None, None]]
with tf.name_scope("B_and"):
with tf.device(prot.servers[0].device_name):
z[0][0] = x_shares[0][0] & y_on_0
z[0][1] = x_shares[0][1] & y_on_0
with tf.device(prot.servers[1].device_name):
z[1][0] = x_shares[1][0] & y_on_1
z[1][1] = x_shares[1][1] & y_on_1
with tf.device(prot.servers[2].device_name):
z[2][0] = x_shares[2][0] & y_on_2
z[2][1] = x_shares[2][1] & y_on_2
z = ABY3PrivateTensor(prot, z, x.is_scaled, x.share_type)
return z
def _B_and_public_private(prot, x, y):
assert isinstance(x, ABY3PublicTensor), type(x)
assert isinstance(y, ABY3PrivateTensor), type(y)
assert x.backing_dtype == y.backing_dtype
x_on_0, x_on_1, x_on_2 = x.unwrapped
y_shares = y.unwrapped
z = [[None, None], [None, None], [None, None]]
with tf.name_scope("B_and"):
with tf.device(prot.servers[0].device_name):
z[0][0] = x_on_0 & y_shares[0][0]
z[0][1] = x_on_0 & y_shares[0][1]
with tf.device(prot.servers[1].device_name):
z[1][0] = x_on_1 & y_shares[1][0]
z[1][1] = x_on_1 & y_shares[1][1]
with tf.device(prot.servers[2].device_name):
z[2][0] = x_on_2 & y_shares[2][0]
z[2][1] = x_on_2 & y_shares[2][1]
z = ABY3PrivateTensor(prot, z, y.is_scaled, y.share_type)
return z
def _B_or_private_private(prot, x, y):
assert isinstance(x, ABY3PrivateTensor), type(x)
assert isinstance(y, ABY3PrivateTensor), type(y)
with tf.name_scope("B_or"):
z = (x ^ y) ^ (x & y)
return z
def _B_not_private(prot, x):
assert isinstance(x, ABY3PrivateTensor), type(x)
x_shares = x.unwrapped
z = [[None, None], [None, None], [None, None]]
with tf.name_scope("B_not"):
with tf.device(prot.servers[0].device_name):
# We use the `~` operator instead of XORing a constant, because we want it to work for both
# the int_factory and the bool_factory
z[0][0] = ~x_shares[0][0]
z[0][1] = x_shares[0][1]
with tf.device(prot.servers[1].device_name):
z[1][0] = x_shares[1][0]
z[1][1] = x_shares[1][1]
with tf.device(prot.servers[2].device_name):
z[2][0] = x_shares[2][0]
z[2][1] = ~x_shares[2][1]
z = ABY3PrivateTensor(prot, z, x.is_scaled, x.share_type)
return z
def _lshift_private(prot, x, steps):
"""
Left shift.
"""
assert isinstance(x, ABY3PrivateTensor), type(x)
x_shares = x.unwrapped
z = [[None, None], [None, None], [None, None]]
with tf.name_scope("lshift"):
for i in range(3):
with tf.device(prot.servers[i].device_name):
z[i][0] = x_shares[i][0] << steps
z[i][1] = x_shares[i][1] << steps
z = ABY3PrivateTensor(prot, z, x.is_scaled, x.share_type)
return z
def _rshift_private(prot, x, steps):
"""
Arithmetic right shift.
"""
assert isinstance(x, ABY3PrivateTensor), type(x)
x_shares = x.unwrapped
z = [[None, None], [None, None], [None, None]]
with tf.name_scope("rshift"):
for i in range(3):
with tf.device(prot.servers[i].device_name):
z[i][0] = x_shares[i][0] >> steps
z[i][1] = x_shares[i][1] >> steps
z = ABY3PrivateTensor(prot, z, x.is_scaled, x.share_type)
return z
def _logical_rshift_private(prot, x, steps):
"""
Logical right shift.
"""
assert isinstance(x, ABY3PrivateTensor), type(x)
x_shares = x.unwrapped
z = [[None, None], [None, None], [None, None]]
with tf.name_scope("logical-rshift"):
for i in range(3):
with tf.device(prot.servers[i].device_name):
z[i][0] = x_shares[i][0].logical_rshift(steps)
z[i][1] = x_shares[i][1].logical_rshift(steps)
z = ABY3PrivateTensor(prot, z, x.is_scaled, x.share_type)
return z
def _B_add_private_private(prot, x, y):
raise NotImplementedError("Addition with boolean sharing is not implemented, and not recommended.")
def _B_sub_private_private(prot, x, y):
raise NotImplementedError("Sbustraction with boolean sharing is not implemented, and not recommended.")
def _B_ppa_private_private(prot, x, y, n_bits, topology="kogge_stone"):
"""
Parallel prefix adder (PPA). This adder can be used for addition of boolean sharings.
`n_bits` can be passed as an optimization to constrain the computation for least significant
`n_bits` bits.
AND Depth: log(k)
Total gates: klog(k)
"""
if topology == "kogge_stone":
return _B_ppa_kogge_stone_private_private(prot, x, y, n_bits)
elif topology == "sklansky":
return _B_ppa_sklansky_private_private(prot, x, y, n_bits)
else:
raise NotImplementedError("Unknown adder topology.")
def _B_ppa_sklansky_private_private(prot, x, y, n_bits):
"""
Parallel prefix adder (PPA), using the Sklansky adder topology.
"""
assert isinstance(x, ABY3PrivateTensor), type(x)
assert isinstance(y, ABY3PrivateTensor), type(y)
if x.backing_dtype.native_type != tf.int64:
raise NotImplementedError("Native type {} not supported".format(
x.backing_dtype.native_type))
with tf.name_scope("B_ppa"):
keep_masks = [
0x5555555555555555, 0x3333333333333333,
0x0f0f0f0f0f0f0f0f, 0x00ff00ff00ff00ff,
0x0000ffff0000ffff, 0x00000000ffffffff
] # yapf: disable
copy_masks = [
0x5555555555555555, 0x2222222222222222,
0x0808080808080808, 0x0080008000800080,
0x0000800000008000, 0x0000000080000000
] # yapf: disable
G = x & y
P = x ^ y
k = prot.nbits
if n_bits is not None:
k = n_bits
for i in range(ceil(log2(k))):
c_mask = prot.define_constant(np.ones(x.shape, dtype=np.object) * copy_masks[i],
apply_scaling=False,
share_type=BOOLEAN)
k_mask = prot.define_constant(np.ones(x.shape, dtype=np.object) * keep_masks[i],
apply_scaling=False,
share_type=BOOLEAN)
# Copy the selected bit to 2^i positions:
# For example, when i=2, the 4-th bit is copied to the (5, 6, 7, 8)-th bits
G1 = (G & c_mask) << 1
P1 = (P & c_mask) << 1
for j in range(i):
G1 = (G1 << (2**j)) ^ G1
P1 = (P1 << (2**j)) ^ P1
"""
Two-round impl. using algo. that assume using OR gate is free, but in fact,
here using OR gate cost one round.
The PPA operator 'o' is defined as:
(G, P) o (G1, P1) = (G + P*G1, P*P1), where '+' is OR, '*' is AND
"""
# G1 and P1 are 0 for those positions that we do not copy the selected bit to.
# Hence for those positions, the result is: (G, P) = (G, P) o (0, 0) = (G, 0).
# In order to keep (G, P) for these positions so that they can be used in the future,
# we need to let (G1, P1) = (G, P) for these positions, because (G, P) o (G, P) = (G, P)
#
# G1 = G1 ^ (G & k_mask)
# P1 = P1 ^ (P & k_mask)
#
# G = G | (P & G1)
# P = P & P1
"""
One-round impl. by modifying the PPA operator 'o' as:
(G, P) o (G1, P1) = (G ^ (P*G1), P*P1), where '^' is XOR, '*' is AND
This is a valid definition: when calculating the carry bit c_i = g_i + p_i * c_{i-1},
the OR '+' can actually be replaced with XOR '^' because we know g_i and p_i will NOT take '1'
at the same time.
And this PPA operator 'o' is also associative. BUT, it is NOT idempotent: (G, P) o (G, P) != (G, P).
This does not matter, because we can do (G, P) o (0, P) = (G, P), or (G, P) o (0, 1) = (G, P)
if we want to keep G and P bits.
"""
# Option 1: Using (G, P) o (0, P) = (G, P)
# P1 = P1 ^ (P & k_mask)
# Option 2: Using (G, P) o (0, 1) = (G, P)
P1 = P1 ^ k_mask
G = G ^ (P & G1)
P = P & P1
# G stores the carry-in to the next position
C = G << 1
P = x ^ y
z = C ^ P
return z
def _B_ppa_kogge_stone_private_private(prot, x, y, n_bits):
"""
Parallel prefix adder (PPA), using the Kogge-Stone adder topology.
"""
assert isinstance(x, ABY3PrivateTensor), type(x)
assert isinstance(y, ABY3PrivateTensor), type(y)
if x.backing_dtype.native_type != tf.int64:
raise NotImplementedError("Native type {} not supported".format(
x.backing_dtype.native_type))
with tf.name_scope("B_ppa"):
keep_masks = []
for i in range(ceil(log2(prot.nbits))):
keep_masks.append((1 << (2**i)) - 1)
"""
For example, if prot.nbits = 64, then keep_masks is:
keep_masks = [0x0000000000000001, 0x0000000000000003, 0x000000000000000f,
0x00000000000000ff, 0x000000000000ffff, 0x00000000ffffffff]
"""
G = x & y
P = x ^ y
k = prot.nbits if n_bits is None else n_bits
for i in range(ceil(log2(k))):
k_mask = prot.define_constant(np.ones(x.shape, dtype=np.object) * keep_masks[i],
apply_scaling=False,
share_type=BOOLEAN)
G1 = G << (2**i)
P1 = P << (2**i)
"""
One-round impl. by modifying the PPA operator 'o' as:
(G, P) o (G1, P1) = (G ^ (P*G1), P*P1), where '^' is XOR, '*' is AND
This is a valid definition: when calculating the carry bit c_i = g_i + p_i * c_{i-1},
the OR '+' can actually be replaced with XOR '^' because we know g_i and p_i will NOT take '1'
at the same time.
And this PPA operator 'o' is also associative. BUT, it is NOT idempotent: (G, P) o (G, P) != (G, P).
This does not matter, because we can do (G, P) o (0, P) = (G, P), or (G, P) o (0, 1) = (G, P)
if we want to keep G and P bits.
"""
# Option 1: Using (G, P) o (0, P) = (G, P)
# P1 = P1 ^ (P & k_mask)
# Option 2: Using (G, P) o (0, 1) = (G, P)
P1 = P1 ^ k_mask
G = G ^ (P & G1)
P = P & P1
# G stores the carry-in to the next position
C = G << 1
P = x ^ y
z = C ^ P
return z
def _A2B_private(prot, x, nbits):
"""
Bit decomposition: Convert an arithmetic sharing to a boolean sharing.
"""
assert isinstance(x, ABY3PrivateTensor), type(x)
assert x.share_type == ARITHMETIC
x_shares = x.unwrapped
zero = prot.define_constant(np.zeros(x.shape, dtype=np.int64),
apply_scaling=False,
share_type=BOOLEAN)
zero_on_0, zero_on_1, zero_on_2 = zero.unwrapped
a0, a1, a2 = prot._gen_zero_sharing(x.shape, share_type=BOOLEAN)
operand1 = [[None, None], [None, None], [None, None]]
operand2 = [[None, None], [None, None], [None, None]]
with tf.name_scope("A2B"):
# Step 1: We know x = ((x0, x1), (x1, x2), (x2, x0))
# We need to reshare it into two operands that will be fed into an addition circuit:
# operand1 = (((x0+x1) XOR a0, a1), (a1, a2), (a2, (x0+x1) XOR a0)), meaning boolean sharing of x0+x1
# operand2 = ((0, 0), (0, x2), (x2, 0)), meaning boolean sharing of x2
with tf.device(prot.servers[0].device_name):
x0_plus_x1 = x_shares[0][0] + x_shares[0][1]
operand1[0][0] = x0_plus_x1 ^ a0
operand1[0][1] = a1
operand2[0][0] = zero_on_0
operand2[0][1] = zero_on_0
with tf.device(prot.servers[1].device_name):
operand1[1][0] = a1
operand1[1][1] = a2
operand2[1][0] = zero_on_1
operand2[1][1] = x_shares[1][1]
with tf.device(prot.servers[2].device_name):
operand1[2][0] = a2
operand1[2][1] = operand1[0][0]
operand2[2][0] = x_shares[2][0]
operand2[2][1] = zero_on_2
operand1 = ABY3PrivateTensor(prot, operand1, x.is_scaled, BOOLEAN)
operand2 = ABY3PrivateTensor(prot, operand2, x.is_scaled, BOOLEAN)
# Step 2: Parallel prefix adder that requires log(k) rounds of communication
result = prot.B_ppa(operand1, operand2, nbits)
return result
def _bit_extract_private(prot, x, i):
"""
Bit extraction: Extracts the `i`-th bit of an arithmetic sharing or boolean sharing
to a single-bit boolean sharing.
"""
assert isinstance(x, ABY3PrivateTensor), type(x)
assert x.backing_dtype == prot.int_factory
with tf.name_scope("bit_extract"):
if x.share_type == ARITHMETIC:
with tf.name_scope("A2B_partial"):
x_shares = x.unwrapped
zero = prot.define_constant(np.zeros(x.shape, dtype=np.int64),
apply_scaling=False,
share_type=BOOLEAN)
zero_on_0, zero_on_1, zero_on_2 = zero.unwrapped
a0, a1, a2 = prot._gen_zero_sharing(x.shape, share_type=BOOLEAN)
operand1 = [[None, None], [None, None], [None, None]]
operand2 = [[None, None], [None, None], [None, None]]
# Step 1: We know x = ((x0, x1), (x1, x2), (x2, x0))
# We need to reshare it into two operands that will be fed into an addition circuit:
# operand1 = (((x0+x1) XOR a0, a1), (a1, a2), (a2, (x0+x1) XOR a0)), meaning boolean sharing of x0+x1
# operand2 = ((0, 0), (0, x2), (x2, 0)), meaning boolean sharing of x2
with tf.device(prot.servers[0].device_name):
x0_plus_x1 = x_shares[0][0] + x_shares[0][1]
operand1[0][0] = x0_plus_x1 ^ a0
operand1[0][1] = a1
operand2[0][0] = zero_on_0
operand2[0][1] = zero_on_0
with tf.device(prot.servers[1].device_name):
operand1[1][0] = a1
operand1[1][1] = a2
operand2[1][0] = zero_on_1
operand2[1][1] = x_shares[1][1]
with tf.device(prot.servers[2].device_name):
operand1[2][0] = a2
operand1[2][1] = operand1[0][0]
operand2[2][0] = x_shares[2][0]
operand2[2][1] = zero_on_2
operand1 = ABY3PrivateTensor(prot, operand1, x.is_scaled, BOOLEAN)
operand2 = ABY3PrivateTensor(prot, operand2, x.is_scaled, BOOLEAN)
# Step 2: Parallel prefix adder that requires log(i+1) rounds of communication
x = prot.B_ppa(operand1, operand2, i + 1)
# Take out the i-th bit
#
# NOTE: Don't use x = x & 0x1. Even though we support automatic lifting of 0x1
# to an ABY3Tensor, but it also includes automatic scaling to make the two operands have
# the same scale, which is not what want here.
#
mask = prot.define_constant(np.array([0x1 << i]),
apply_scaling=False,
share_type=BOOLEAN)
x = x & mask
x_shares = x.unwrapped
result = [[None, None], [None, None], [None, None]]
for i in range(3):
with tf.device(prot.servers[i].device_name):
result[i][0] = x_shares[i][0].cast(prot.bool_factory)
result[i][1] = x_shares[i][1].cast(prot.bool_factory)
result = ABY3PrivateTensor(prot, result, False, BOOLEAN)
return result
def _B2A_private(prot, x, nbits):
"""
Bit composition: Convert a boolean sharing to an arithmetic sharing.
"""
assert isinstance(x, ABY3PrivateTensor), type(x)
assert x.share_type == BOOLEAN
# In semi-honest, the following two calls can be further optimized because we don't
# need the boolean shares of x1 and x2. We only need their original values on intended servers.
x1_on_0, x1_on_1, x1_on_2, x1_shares = prot._gen_b2a_sharing(x.shape, prot.b2a_keys_1)
assert (x1_on_2 is None)
x2_on_0, x2_on_1, x2_on_2, x2_shares = prot._gen_b2a_sharing(x.shape, prot.b2a_keys_2)
assert (x2_on_0 is None)
a0, a1, a2 = prot._gen_zero_sharing(x.shape, share_type=BOOLEAN)
with tf.name_scope("B2A"):
# Server 1 reshares (-x1-x2) as private input
neg_x1_neg_x2 = [[None, None], [None, None], [None, None]]
with tf.device(prot.servers[1].device_name):
value = -x1_on_1 - x2_on_1
neg_x1_neg_x2[1][0] = value ^ a1
neg_x1_neg_x2[1][1] = a2
with tf.device(prot.servers[0].device_name):
neg_x1_neg_x2[0][0] = a0
neg_x1_neg_x2[0][1] = neg_x1_neg_x2[1][0]
with tf.device(prot.servers[2].device_name):
neg_x1_neg_x2[2][0] = a2
neg_x1_neg_x2[2][1] = a0
neg_x1_neg_x2 = ABY3PrivateTensor(prot, neg_x1_neg_x2, x.is_scaled, BOOLEAN)
# Compute x0 = x + (-x1-x2) using the parallel prefix adder
x0 = prot.B_ppa(x, neg_x1_neg_x2, nbits)
# Reveal x0 to server 0 and 2
with tf.device(prot.servers[0].device_name):
x0_on_0 = prot._reconstruct(x0.unwrapped, prot.servers[0], BOOLEAN)
with tf.device(prot.servers[2].device_name):
x0_on_2 = prot._reconstruct(x0.unwrapped, prot.servers[2], BOOLEAN)
# Construct the arithmetic sharing
result = [[None, None], [None, None], [None, None]]
with tf.device(prot.servers[0].device_name):
result[0][0] = x0_on_0
result[0][1] = x1_on_0
with tf.device(prot.servers[1].device_name):
result[1][0] = x1_on_1
result[1][1] = x2_on_1
with tf.device(prot.servers[2].device_name):
result[2][0] = x2_on_2
result[2][1] = x0_on_2
result = ABY3PrivateTensor(prot, result, x.is_scaled, ARITHMETIC)
return result
def _mul_AB_public_private(prot, x, y):
assert isinstance(x, ABY3PublicTensor), type(x)
assert isinstance(y, ABY3PrivateTensor), type(x)
assert x.share_type == ARITHMETIC
assert y.share_type == BOOLEAN
x_on_0, x_on_1, x_on_2 = x.unwrapped
with tf.name_scope("mul_AB"):
z = __mul_AB_routine(prot, x_on_2, y, 2)
z = ABY3PrivateTensor(prot, z, x.is_scaled, ARITHMETIC)
return z
def _mul_AB_private_private(prot, x, y):
assert isinstance(x, ABY3PrivateTensor), type(x)
assert isinstance(y, ABY3PrivateTensor), type(y)
assert x.share_type == ARITHMETIC
assert y.share_type == BOOLEAN
x_shares = x.unwrapped
with tf.name_scope("mul_AB"):
with tf.name_scope("term0"):
w = __mul_AB_routine(prot, x_shares[0][0], y, 0)
w = ABY3PrivateTensor(prot, w, x.is_scaled, ARITHMETIC)
with tf.name_scope("term1"):
with tf.device(prot.servers[1].device_name):
a = x_shares[1][0] + x_shares[1][1]
z = __mul_AB_routine(prot, a, y, 1)
z = ABY3PrivateTensor(prot, z, x.is_scaled, ARITHMETIC)
z = w + z
return z
def __mul_AB_routine(prot, a, b, sender_idx):
"""
A sub routine for multiplying a value 'a' (located at servers[sender_idx]) with a boolean sharing 'b'.
"""
assert isinstance(a, AbstractTensor), type(a)
assert isinstance(b, ABY3PrivateTensor), type(b)
with tf.name_scope("__mul_AB_routine"):
b_shares = b.unwrapped
s = [None, None, None]
s[0], s[1], s[2] = prot._gen_zero_sharing(a.shape, ARITHMETIC)
z = [[None, None], [None, None], [None, None]]
idx0 = sender_idx
idx1 = (sender_idx + 1) % 3
idx2 = (sender_idx + 2) % 3
with tf.device(prot.servers[idx0].device_name):
z[idx0][0] = s[idx2]
z[idx0][1] = s[idx1]
tmp = (b_shares[idx0][0] ^ b_shares[idx0][1]).cast(a.factory) * a
m0 = tmp + s[idx0]
m1 = -tmp + a + s[idx0]
with tf.device(prot.servers[idx1].device_name):
z[idx1][0] = s[idx1]
z[idx1][1] = prot._ot(
prot.servers[idx0],
prot.servers[idx1],
prot.servers[idx2],
m0,
m1,
b_shares[idx1][1],
b_shares[idx2][0],
prot.pairwise_keys[idx0][0],
prot.pairwise_keys[idx2][1],
prot.pairwise_nonces[idx2],
)
prot.pairwise_nonces[idx2] = prot.pairwise_nonces[idx2] + 1
with tf.device(prot.servers[idx2].device_name):
z[idx2][0] = prot._ot(
prot.servers[idx0],
prot.servers[idx2],
prot.servers[idx1],
m0,
m1,
b_shares[idx2][0],
b_shares[idx1][1],
prot.pairwise_keys[idx0][1],
prot.pairwise_keys[idx1][0],
prot.pairwise_nonces[idx0],
)
z[idx2][1] = s[idx2]
prot.pairwise_nonces[idx0] = prot.pairwise_nonces[idx0] + 1
return z
def _pow_private(prot, x, p):
assert isinstance(x, ABY3PrivateTensor), type(x)
assert x.share_type == ARITHMETIC
assert p >= 1, "Exponent should be >= 0"
# NOTE: pow should be able to use the `memoir` memoization
with tf.name_scope("pow"):
result = 1
tmp = x
while p > 0:
bit = (p & 0x1)
if bit > 0:
result = result * tmp
p >>= 1
if p > 0:
tmp = tmp * tmp
return result
def _polynomial_private(prot, x, coeffs):
assert isinstance(x, ABY3PrivateTensor), type(x)
assert x.share_type == ARITHMETIC
with tf.name_scope("polynomial"):
result = prot.define_constant(np.zeros(x.shape), apply_scaling=x.is_scaled)
for i in range(len(coeffs)):
if i == 0:
result = result + coeffs[i]
elif coeffs[i] == 0:
continue
elif (coeffs[i] - int(coeffs[i])) == 0:
# Optimization when coefficient is integer: mulitplication can be performed
# locally without interactive truncation
tmp = prot.define_constant(np.array([coeffs[i]]), apply_scaling=False)
tmp = tmp * (x**i)
result = result + tmp
else:
tmp = coeffs[i] * (x**i)
result = result + tmp
return result
def _polynomial_piecewise_private(prot, x, c, coeffs):
"""
:param prot:
:param x:
:param c: A list of splitting points between pieces
:param coeffs: Two-dimensional list: 1st dimension is the polynomial index, 2nd dimension is the coefficient index
:return:
"""
assert isinstance(x, ABY3PrivateTensor), type(x)
assert len(c) + 1 == len(coeffs), "# of pieces do not match # of polynomials"
with tf.name_scope("polynomial_piecewise"):
# Compute the selection bit for each polynomial
with tf.name_scope("polynomial-selection-bit"):
msbs = [None] * len(c)
for i in range(len(c)):
msbs[i] = prot.msb(x - c[i])
b = [None] * len(coeffs)
b[0] = msbs[0]
for i in range(len(c) - 1):
b[i + 1] = ~msbs[i] & msbs[i + 1]
b[len(c)] = ~msbs[len(c) - 1]
# Compute the piecewise combination result
result = 0
for i in range(len(coeffs)):
fi = prot.polynomial(x, coeffs[i])
result = result + prot.mul_AB(fi, b[i])
return result
def _sigmoid_private(prot, x, approx_type):
assert isinstance(x, ABY3PrivateTensor), type(x)
with tf.name_scope("sigmoid"):
if approx_type == "piecewise_linear":
c = (-2.5, 2.5)
coeffs = ((1e-4,), (0.50, 0.17), (1 - 1e-4,))
else:
raise NotImplementedError("Only support piecewise linear approximation of sigmoid.")
result = prot.polynomial_piecewise(x, c, coeffs)
return result
#
# transpose helpers
#
def _transpose_private(prot, x, perm=None):
assert isinstance(x, ABY3PrivateTensor)
x_shares = x.unwrapped
with tf.name_scope("transpose"):
z = [[None, None], [None, None], [None, None]]
for i in range(3):
with tf.device(prot.servers[i].device_name):
z[i][0] = x_shares[i][0].transpose(perm=perm)
z[i][1] = x_shares[i][1].transpose(perm=perm)
return ABY3PrivateTensor(prot, z, x.is_scaled, x.share_type)
def _transpose_public(prot, x, perm=None):
assert isinstance(x, ABY3PublicTensor)
x_on_0, x_on_1, x_on_2 = x.unwrapped
with tf.name_scope("transpose"):
with tf.device(prot.servers[0].device_name):
x_on_0_t = x_on_0.transpose(perm=perm)
with tf.device(prot.servers[1].device_name):
x_on_1_t = x_on_1.transpose(perm=perm)
with tf.device(prot.servers[2].device_name):
x_on_2_t = x_on_2.transpose(perm=perm)
return ABY3PublicTensor(prot, [x_on_0_t, x_on_1_t, x_on_2_t], x.is_scaled, x.share_type)
#
# reduce_sum helpers
#
def _reduce_sum_public(prot, x, axis=None, keepdims=False):
x_on_0, x_on_1, x_on_2 = x.unwrapped
with tf.name_scope("reduce_sum"):
with tf.device(prot.servers[0].device_name):
y_on_0 = x_on_0.reduce_sum(axis, keepdims)
with tf.device(prot.servers[1].device_name):
y_on_1 = x_on_1.reduce_sum(axis, keepdims)
with tf.device(prot.servers[2].device_name):
y_on_2 = x_on_2.reduce_sum(axis, keepdims)
return ABY3PublicTensor(prot, [y_on_0, y_on_1, y_on_2], x.is_scaled, x.share_type)
def _reduce_sum_private(prot, x, axis=None, keepdims=False):
x_shares = x.unwrapped
with tf.name_scope("reduce_sum"):
z = [[None, None], [None, None], [None, None]]
for i in range(3):
with tf.device(prot.servers[i].device_name):
z[i][0] = x_shares[i][0].reduce_sum(axis, keepdims)
z[i][1] = x_shares[i][1].reduce_sum(axis, keepdims)
return ABY3PrivateTensor(prot, z, x.is_scaled, x.share_type)
#
# concat helpers
#
def _concat_public(prot, xs, axis):
assert all(x.is_scaled for x in xs) or all(not x.is_scaled for x in xs)
factory = xs[0].backing_dtype
is_scaled = xs[0].is_scaled
xs_on_0, xs_on_1, xs_on_2 = zip(*(x.unwrapped for x in xs))
with tf.name_scope("concat"):
with tf.device(prot.servers[0].device_name):
x_on_0_concat = factory.concat(xs_on_0, axis=axis)
with tf.device(prot.servers[1].device_name):
x_on_1_concat = factory.concat(xs_on_1, axis=axis)
with tf.device(prot.servers[2].device_name):
x_on_2_concat = factory.concat(xs_on_2, axis=axis)
return ABY3PublicTensor(prot, [x_on_0_concat, x_on_1_concat, x_on_2_concat], is_scaled,
xs[0].share_type)
def _concat_private(prot, xs, axis):
assert all(x.is_scaled for x in xs) or all(not x.is_scaled for x in xs)
factory = xs[0].backing_dtype
is_scaled = xs[0].is_scaled
share_type = xs[0].share_type
xs_shares = [x.unwrapped for x in xs]
z = [[None, None], [None, None], [None, None]]
for i in range(3):
z[i][0] = [x_shares[i][0] for x_shares in xs_shares]
z[i][1] = [x_shares[i][1] for x_shares in xs_shares]
with tf.name_scope("concat"):
for i in range(3):
with tf.device(prot.servers[i].device_name):
z[i][0] = factory.concat(z[i][0], axis=axis)
z[i][1] = factory.concat(z[i][1], axis=axis)
return ABY3PrivateTensor(prot, z, is_scaled, share_type)
def _write_private(prot, x, filename_prefix):
assert isinstance(x, ABY3PrivateTensor), type(x)
def encode(feature_row):
# Converting a row to a string seems to be the only way of writing out
# the dataset in a distributed way
feature = tf.strings.reduce_join(tf.dtypes.as_string(tf.reshape(feature_row, [-1])),
separator=",")
return feature
x_shares = x.unwrapped
ops = []
for i in range(3):
with tf.device(prot.servers[i].device_name):
for j in range(2):
data = tf.data.Dataset.from_tensor_slices(x_shares[i][j].value) \
.map(encode)
writer = tf.data.experimental.TFRecordWriter("{}_share{}{}".format(
filename_prefix, i, j))
ops.append(writer.write(data))
return tf.group(*ops)
def _read_(prot, filename_prefix, batch_size, n_columns):
row_shape = [n_columns]
def decode(line):
fields = tf.string_split([line], ",").values
fields = tf.strings.to_number(fields, tf.int64)
fields = tf.reshape(fields, row_shape)
return fields
batch = [[None] * 2 for _ in range(3)]
for i in range(3):
with tf.device(prot.servers[i].device_name):
for j in range(2):
data = tf.data.TFRecordDataset(["{}_share{}{}".format(filename_prefix, i, j)]) \
.map(decode) \
.repeat() \
.batch(batch_size=batch_size)
it = data.make_one_shot_iterator()
batch[i][j] = it.get_next()
batch[i][j] = tf.reshape(batch[i][j], [batch_size] + row_shape)
batch[i][j] = prot.int_factory.tensor(batch[i][j])
return ABY3PrivateTensor(prot, batch, True, ARITHMETIC)
def _iterate_private(
prot,
tensor: "ABY3PrivateTensor",
batch_size: int,
repeat=True,
shuffle=True,
seed: int = None,
):
assert isinstance(tensor, ABY3PrivateTensor)
shares = tensor.unwrapped
iterators = [[None] * 2 for _ in range(3)]
results = [[None] * 2 for _ in range(3)]
if seed is None:
seed = np.random.randint(1, 1 << 32) # this seed is publicly known.
batch_size = max(1, batch_size)
def helper(idx):
with tf.device(prot.servers[idx].device_name):
out_shape = shares[idx][0].value.shape.as_list()
out_shape[0] = batch_size
for i in range(2):
dataset = tf.data.Dataset.from_tensor_slices(shares[idx][i].value)
if repeat:
dataset = dataset.repeat()
if shuffle:
dataset = dataset.shuffle(buffer_size=512, seed=seed)
dataset = dataset.batch(batch_size)
# NOTE: initializable_iterator needs to run initializer.
iterators[idx][i] = tf.compat.v1.data.make_initializable_iterator(dataset)
batch = iterators[idx][i].get_next()
# Wrap the tf.tensor as a dense tensor (no extra encoding is needed)
results[idx][i] = prot.int_factory.tensor(tf.reshape(batch, out_shape))
prot.add_initializers(*[iterators[idx][i].initializer for i in range(2)])
for idx in range(3):
helper(idx)
# Synchronize the reading of all 6 dataset iterators
with tf.control_dependencies([share.value for result in results for share in result]):
for i in range(3):
results[i][0] = results[i][0].identity()
results[i][1] = results[i][1].identity()
return ABY3PrivateTensor(prot, results, tensor.is_scaled, tensor.share_type)
def _indexer_private(prot: ABY3, tensor: ABY3PrivateTensor, slc) -> "ABY3PrivateTensor":
shares = tensor.unwrapped
results = [[None] * 2 for _ in range(3)]
with tf.name_scope("index"):
for i in range(3):
with tf.device(prot.servers[i].device_name):
results[i][0] = shares[i][0][slc]
results[i][1] = shares[i][1][slc]
return ABY3PrivateTensor(prot, results, tensor.is_scaled, tensor.share_type)
def _reshape_private(prot: ABY3, tensor: ABY3PrivateTensor, axe):
shares = tensor.unwrapped
results = [[None] * 2 for _ in range(3)]
with tf.name_scope("reshape"):
for i in range(3):
with tf.device(prot.servers[i].device_name):
results[i][0] = shares[i][0].reshape(axe)
results[i][1] = shares[i][1].reshape(axe)
return ABY3PrivateTensor(prot, results, tensor.is_scaled, tensor.share_type)
|
/home/runner/.cache/pip/pool/cf/0a/b5/ca35a000c1454aeb9b61c66b32b1a72ea7d996e4e9c6e8af62a32513d1 |
class Solution:
# 1st solution
# O(n) time | O(n) space
def calculate(self, s: str) -> int:
res, num, sign, stack = 0, 0, 1, []
for ch in s:
if ch.isdigit():
num = 10 * num + int(ch)
elif ch in "+-":
res += sign * num
num = 0
sign = 1 if ch == "+" else -1
elif ch == "(":
stack.append(res)
stack.append(sign)
sign, res = 1, 0
elif ch == ")":
res += sign * num
res *= stack.pop()
res += stack.pop()
num = 0
return res + num * sign
# 2nd solution
# O(n) time | O(n) space
def calculate(self, s: str) -> int:
def update(op, v):
if op == "+": stack.append(v)
if op == "-": stack.append(-v)
if op == "*": stack.append(stack.pop() * v)
if op == "/": stack.append(int(stack.pop() / v))
it, num, stack, sign = 0, 0, [], "+"
while it < len(s):
if s[it].isdigit():
num = num * 10 + int(s[it])
elif s[it] in "+-*/":
update(sign, num)
num, sign = 0, s[it]
elif s[it] == "(":
num, j = self.calculate(s[it + 1:])
it = it + j
elif s[it] == ")":
update(sign, num)
return sum(stack), it + 1
it += 1
update(sign, num)
return sum(stack)
# 3rd solution
# O(n) time | O(n) space
def calculate(self, s):
def calc(it):
def update(op, v):
if op == "+": stack.append(v)
if op == "-": stack.append(-v)
if op == "*": stack.append(stack.pop() * v)
if op == "/": stack.append(int(stack.pop() / v))
num, stack, sign = 0, [], "+"
while it < len(s):
if s[it].isdigit():
num = num * 10 + int(s[it])
elif s[it] in "+-*/":
update(sign, num)
num, sign = 0, s[it]
elif s[it] == "(":
num, j = calc(it + 1)
it = j - 1
elif s[it] == ")":
update(sign, num)
return sum(stack), it + 1
it += 1
update(sign, num)
return sum(stack)
return calc(0) |
# ### Problem 2
# Prompt the user with the message, ‘Is it better to be rude or kind to People?’
# Keeping prompting the user to enter an answer until they enter the word kind.
# Each time they enter something other than kind, print the message, ‘That’s not the answer I had hoped to hear. Try again.’ and prompt the user again.
# Once the user enters kind, print, ’Now that’s what I wanted to hear!’ and exit the program.
# create a variable that holds userinput
userinput = input("IS it better to be rude or kind to People? ")
# continue to ask user for input until kind is entered. print message based on userinput
while userinput != "kind":
if userinput != "kind":
print("That's not the answer I had hoped to hear. Try again.")
userinput = input("Is it better to be rude of kind to people? ")
if userinput == "kind":
print("Now that's what I wanted to hear") |
class Gen_dummy:
def __init__(self):
self.X_ = None
def fit(self, X, y=None, metamodel=None):
self.X_ = X.copy()
return self
def sample(self, n_samples=1):
return self.X_.copy()
def my_name(self):
return "dummy"
# =============================================================================
# # This generator always returns the same dataset
#
# import numpy as np
#
# mean = [0, 0]
# cov = [[1, 0], [0, 1]]
# x = np.random.multivariate_normal(mean, cov, 500)
# mean = [5, 5]
# x = np.vstack((x,np.random.multivariate_normal(mean, cov, 500)))
#
# dg = Gen_dummy()
# dg.fit(x)
# dg.sample(n_samples = 201) - x
# ============================================================================= |
# Copyright (c) 2019 NETSCOUT Systems, Inc.
"""Manage a connection and interface with Equinix Smart Key to manage keys."""
import base64
import json
import logging
import os
import pprint
from builtins import object
import requests
TOKEN_DIR = os.getenv("HOME", default="/tmp")
APP_TOKEN_FILE = os.path.join(TOKEN_DIR, ".skey_app_token")
USER_TOKEN_FILE = os.path.join(TOKEN_DIR, ".skey_user_token")
class SmartKeyException(Exception):
"""General error from SmartKey."""
pass
class SmartKeyNeedsAuthException(SmartKeyException):
"""Authorization failed due to missing credentials."""
pass
class SmartKeyNeedsAcctSelectException(SmartKeyException):
"""Account selection must be performed."""
pass
class SmartKeyAuthAppException(SmartKeyException):
"""Raise when application authentication fails."""
pass
class SmartKeyAuthUserException(SmartKeyException):
"""Raise when user authentication fails."""
pass
class SmartKey(object):
"""Manage a connection and interface with SmartKey using the REST API."""
def __init__(self, apikey=None):
"""
Initialize an instance.
This will fetch the token from disk automatically if it exists. Pass an
API key to authenticate as an application.
apikey - API key for SmartKey
"""
self.baseurl = "https://www.smartkey.io"
self.token = None
self.token_file = USER_TOKEN_FILE if apikey is None else APP_TOKEN_FILE
self.apikey = apikey
self._fetch_token()
def generate_rsa_key(self, name, size, description, group_id=None):
"""Generate an RSA key with the given parameters."""
body = {
"obj_type": "RSA",
"name": name,
"description": description,
# [ AES, DES, DES3, RSA, EC, OPAQUE, HMAC, SECRET, CERTIFICATE ]
"rsa": {"key_size": size},
# Impose no constraints on encryption or key wrapping
"encryption_policy": [{}],
# Permit EXPORT of the key to sync with the Arbor HSM and
# the web server. Permit APPMANAGEABLE so we can delete,
# and SIGN if we want to use it to sign other keys.
"key_ops": ["EXPORT", "APPMANAGEABLE", "SIGN"],
}
if group_id is not None:
body["group_id"] = group_id
res = self._request("POST", "/crypto/v1/keys", data=json.dumps(body))
if res.status_code != requests.codes.created:
# How can we determine why the generation failed? name already
# exists for example.
msg = "Cannot generate key: %d %s" % (res.status_code, res.text)
raise SmartKeyException(msg)
kid = res.json()["kid"]
return kid
def generate_ec_key(self, name, curve, group_id, description):
"""Generate an Elliptic Curve key with the given parameters."""
body = {
"obj_type": "EC",
"name": name,
"elliptic_curve": curve,
"description": description,
"encryption_policy": [{}],
"key_ops": ["EXPORT", "APPMANAGEABLE"],
}
if group_id is not None:
body["group_id"] = group_id
res = self._request("POST", "/crypto/v1/keys", data=json.dumps(body))
if res.status_code != requests.codes.created:
# How can we determine why the generation failed? name already
# exists for example.
msg = "Cannot generate key: %d %s" % (res.status_code, res.text)
raise SmartKeyException(msg)
kid = res.json()["kid"]
return kid
def delete_key(self, kid):
"""Delete a key."""
res = self._request("DELETE", "/crypto/v1/keys/" + kid)
if res.status_code != requests.codes.no_content:
msg = "Cannot delete key: %d %s" % (res.status_code, res.text)
raise SmartKeyException(msg)
def list_keys(self, name=None, group_id=None):
"""
List keys.
TODO: use group_id
"""
data = None
if name is not None:
data = {"name": name}
res = self._request("GET", "/crypto/v1/keys", data=data)
if res.status_code != requests.codes.ok:
msg = "%d %s" % (res.status_code, res.text)
raise SmartKeyException(msg)
data = []
keys = res.json()
return keys
def list_accounts(self):
"""List the accounts."""
res = self._request("GET", "/sys/v1/accounts")
if res.status_code != requests.codes.ok:
msg = "%d %s" % (res.status_code, res.text)
raise SmartKeyException(msg)
return res.json()
def list_groups(self):
"""List the groups."""
res = self._request("GET", "/sys/v1/groups")
if res.status_code != requests.codes.ok:
msg = "%d %s" % (res.status_code, res.text)
raise SmartKeyException(msg)
return res.json()
def get_key(self, kid):
"""Get a specific key (security object) by key id."""
res = self._request("GET", "/crypto/v1/keys/%s" % kid)
if res.status_code != requests.codes.ok:
msg = "%d %s" % (res.status_code, res.text)
raise SmartKeyException(msg)
return res.json()
def export_key(self, kid):
"""Export the key data."""
body = {"kid": kid}
res = self._request("POST", "/crypto/v1/keys/export", json.dumps(body))
if res.status_code != requests.codes.ok:
msg = "%d %s" % (res.status_code, res.text)
raise SmartKeyException(msg)
data = res.json()
logging.debug(pprint.pformat(data))
return data
def auth_app(self, save=True):
"""
Authenticate as application and acquire bearer token.
Acquire the token to save use for subsequent requests.
"""
logging.info("Authenticating with SmartKey as an application")
headers = {"Authorization": "Basic " + self.apikey}
res = requests.request(
method="POST",
url="%s/sys/v1/session/auth" % (self.baseurl,),
headers=headers,
)
if res.status_code != requests.codes.ok:
fmt = "Application authentication failed %d: %s"
raise SmartKeyAuthAppException(fmt % (res.status_code, res.text))
else:
logging.info("Successfully logged in to SmartKey")
decoded = json.loads(res.text)
logging.debug(pprint.pformat(decoded))
self.token = decoded["access_token"]
# Application will always save the token
self._save_token()
def auth_user(self, username, password, save=False):
"""
Authenticate as user and acquire bearer token.
Acquire the token to save use for subsequent requests.
"""
logging.info("Authenticating with SmartKey as a user")
creds = "%s:%s" % (username, password)
encoded = base64.b64encode(creds.encode("ascii"))
headers = {"Authorization": "Basic " + encoded.decode("ascii")}
res = requests.request(
method="POST",
url="%s/sys/v1/session/auth" % (self.baseurl,),
headers=headers,
)
if res.status_code != requests.codes.ok:
fmt = "Authentication failed %d: %s"
raise SmartKeyAuthUserException(fmt % (res.status_code, res.text))
else:
logging.info("Successfully logged in to SmartKey")
decoded = json.loads(res.text)
logging.debug(pprint.pformat(decoded))
self.token = decoded["access_token"]
expires = decoded["expires_in"]
if save:
self._save_token()
return expires
def select_account(self, acct_id):
"""Select an account by id."""
body = {"acct_id": acct_id}
res = self._request(
"POST", "/sys/v1/session/select_account", json.dumps(body)
)
if res.status_code != requests.codes.ok:
msg = "%d %s" % (res.status_code, res.text)
raise SmartKeyException(msg)
data = res.json()
logging.debug(pprint.pformat(data))
return data
def terminate_session(self):
"""Terminate a session."""
if self.token is None:
raise SmartKeyException("No saved session token to invalidate.")
res = self._request("POST", "/sys/v1/session/terminate")
if res.status_code == 204:
self.purge_token()
else:
msg = "%d %s" % (res.status_code, res.text)
raise SmartKeyException(msg)
def _save_token(self):
"""Save the auth token to disk to use it in the future."""
# TODO: use NamedTemporaryFile() and move in to place
fd = os.open(self.token_file, os.O_CREAT | os.O_WRONLY, 0o600)
os.write(fd, self.token.encode())
os.close(fd)
def _fetch_token(self):
"""Return saved auth token or fetch a new one."""
try:
with open(self.token_file) as tfile:
self.token = tfile.readline().rstrip()
fmt = "Loaded bearer token %s from %s"
logging.info(fmt % (self.token, self.token_file))
except IOError:
if self.apikey is not None:
logging.info("No token file on disk. Acquiring.")
self.auth_app()
def purge_token(self):
"""Remove the saved auth token."""
self.token = None
os.unlink(self.token_file)
def _request_aux(self, method, url_suffix, data):
"""Request helper.
This issues a request and does not handle authentication.
"""
logging.debug("%s %s\n%s" % (method, self.baseurl + url_suffix, data))
headers = {"Authorization": "Bearer " + self.token}
if method == "POST":
result = requests.post(
self.baseurl + url_suffix, headers=headers, data=data
)
elif method == "GET":
result = requests.get(
url=self.baseurl + url_suffix, headers=headers, params=data
)
elif method == "DELETE":
result = requests.delete(
url=self.baseurl + url_suffix, headers=headers
)
logging.debug("%d\n%s" % (result.status_code, result.text))
return result
def _request(self, method, url_suffix, data=None):
"""Make a request with possible authorization.
Attempt to make a request and if it fails due to authorization,
automatically authenticate and try again.
"""
# No token means we need to authenticate and get one
if self.token is None:
logging.debug("No token; need authentication")
if self.apikey is None:
msg = "SmartKey authentication required"
raise SmartKeyNeedsAuthException(msg)
else:
self.auth_app()
result = self._request_aux(method, url_suffix, data)
if result.status_code not in [200, 201, 204]:
# If we're doing application authentication retry automatically.
# 403 can mean 'Requested operation is not allowed with this key'
# in which case (re)authenticating won't solve the problem so we're
# checking the text to see if it's this specific error.
if (
result.status_code == 403
and "Requested operation is not allowed" not in result.text
):
if self.apikey is not None:
logging.info(
"Response has 403: %s (Retrying...)" % (result.text,)
)
self.auth_app()
result = self._request_aux(method, url_suffix, data)
else:
raise SmartKeyNeedsAuthException(result.text)
elif result.status_code == 401 and (
"operation requires an account to be selected" in result.text
):
raise SmartKeyNeedsAcctSelectException(result.text)
else:
# Note that indicating there was an error here is purely a
# debug message. It's normal for a client to issue requests
# that don't work out (i.e. key doesn't exist) and we don't
# want to report that as an error in the log here.
logging.debug(
"Response has error: %s %s"
% (result.status_code, result.text)
)
return result
|
"""knopy module - some space objects
In this script, some of solar planets' characteristics are defined.
"""
class Planet:
"""Defines a planet.
Attributes
----------
Aphelion: int
Aphelion distance of the planet
Perihelion: int
Perihelion dinstance of the planet
SemiMajorAxis: int
Semi major axis of the planet
Eccentiricity: int
Eccentiricity of the orbit
OrbitalPeriod: int
Period of the planet
AverageOrbitalSpeed: int
Average orbital speed of the planet
MeanRadius: int
Mean radius of the planet
EquatorialRadius: int
Equatorial radius of the planet
PolarRadius: int
Polar radius of the planet
Circumference: int
Circumference of the planet
SurfaceArea: int
Surface area of the planet
Volume: int
Volume of the planet
Mass: int
Mass of the planet
SurfaceGravity: int
Surface gravity of the planet
EscapeVelocity: int
Escape velocity of the planet
"""
def __init__(self,Aphelion,Perihelion,SemiMajorAxis,Eccentricity,OrbitalPeriod,AverageOrbitalSpeed,MeanRadius,EquatorialRadius,PolarRadius,Circumference,SurfaceArea,Volume,Mass,SurfaceGravity,EscapeVelocity):
self.Aphelion = Aphelion
self.Perihelion = Perihelion
self.SemiMajorAxis = SemiMajorAxis
self.Eccentricity = Eccentricity
self.OrbitalPeriod = OrbitalPeriod
self.AverageOrbitalSpeed = AverageOrbitalSpeed
self.MeanRadius = MeanRadius
self.EquatorialRadius = EquatorialRadius
self.PolarRadius = PolarRadius
self.Circumference = Circumference
self.SurfaceArea = SurfaceArea
self.Volume = Volume
self.Mass = Mass
self.SurfaceGravity = SurfaceGravity
self.EscapeVelocity = EscapeVelocity
class Star:
"""Defines a star.
Attributes
----------
EquatorialRadius: int
Equatorial radius of the star
Circumference: int
Circumference of the star
SurfaceArea: int
Surface area of the star
Volume: int
Volume of the star
Mass: int
Mass of the star
SurfaceGravity: int
Surface gravity of the star
EscapeVelocity: int
Escape velocity of the star
"""
def __init__(self,EquatorialRadius,Circumference,SurfaceArea,Volume,Mass,SurfaceGravity,EscapeVelocity):
self.EquatorialRadius = EquatorialRadius
self.Circumference = Circumference
self.SurfaceArea = SurfaceArea
self.Volume = Volume
self.Mass = Mass
self.SurfaceGravity = SurfaceGravity
self.EscapeVelocity = EscapeVelocity
Earth = Planet(1.528E11,1.47095E11,1.49598023e11,0.0167086,31558149.7635,29780,6371e3,6378.1e3,6356.8e3,40075.017e3,510072000e6,1.08321E+21,5.97237e24,9.80665,11.186e3)
Sun = Star(695700e3,4.379e9,6.09e15,1.41e27,1.9884e30,274,617.7e3) |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 19 2020
@author: Sergio Llana (@SergioMinuto90)
"""
from pandas import json_normalize
from abc import ABC, abstractmethod
import pandas as pd
import warnings
from statsbombpy import sb
import sbpUtils
warnings.simplefilter(action='ignore', category=pd.errors.PerformanceWarning)
from processing import PassingNetworkBuilder
from utils import read_json
class StatsBombPassingNetwork(PassingNetworkBuilder, ABC):
def __init__(self, args):
self.team_name = args.team_name
self.match_id = args.match_id
self.plot_name = None
self.df_events = None
self.plot_title = None
self.names_dict = None
self.plot_legend = None
self.num_minutes = None
self.player_position = None
self.pair_pass_value = None
self.pair_pass_count = None
self.player_pass_value = None
self.player_pass_count = None
def read_data(self):
"""
Read StatsBomb eventing data of the selected 'match_id', generating a pandas DataFrame
with the events and a dictionary of player names and nicknames.
Switching to using API over local files has added benefit of parsing player names
to remove non UTF-8 characters
"""
# Player name translation dict
name_dict = sbpUtils.get_lineups(match_id=self.match_id)
self.names_dict = name_dict
# Pandas dataframe containing the events of the match
apiEvents = sb.events(self.match_id,fmt="dict")
newApiEvents =[]
for event in apiEvents.keys():
newApiEvents.append(apiEvents[event])
event_df = json_normalize(newApiEvents, sep="_").assign(match_id=self.match_id)
self.df_events = event_df
def compute_total_minutes(self):
"""
Compute the maximum number of minutes that are used for the passing network.
The idea is not to have more/less than 11 players in the team because of substitutions or red cards.
"""
first_red_card_minute = self.df_events[self.df_events.foul_committed_card_name.isin(["Second Yellow", "Red Card"])].minute.min()
first_substitution_minute = self.df_events[self.df_events.type_name == "Substitution"].minute.min()
max_minute = self.df_events.minute.max()
self.num_minutes = min(first_substitution_minute, first_red_card_minute, max_minute)
def set_text_info(self):
"""
Set the plot's name, title and legend information based on the customization chosen with the command line arguments.
"""
# Name of the .PNG in the plots/ folder
self.plot_name = "statsbomb_match{0}_{1}".format(self.match_id, self.team_name)
# Title of the plot
opponent_team = [x for x in self.df_events.team_name.unique() if x != self.team_name][0]
self.plot_title ="{0}'s passing network against {1} (StatsBomb eventing data)".format(self.team_name, opponent_team)
# Information in the legend
color_meaning = "number of passes"
self.plot_legend = "Location: pass origin\nSize: number of passes\nColor: {0}".format(color_meaning)
@abstractmethod
def prepare_data(self):
pass
@staticmethod
def _statsbomb_to_point(location, max_width=120, max_height=80):
'''
Convert a point's coordinates from a StatsBomb's range to 0-1 range.
'''
return location[0] / max_width, 1-(location[1] / max_height)
class StatsBombBasicPassingNetwork(StatsBombPassingNetwork):
def __init__(self, args):
super(StatsBombBasicPassingNetwork, self).__init__(args)
def prepare_data(self):
"""
Prepares the five pandas DataFrames that 'draw_pass_map' needs.
"""
# We select all successful passes done by the selected team before the minute
# of the first substitution or red card.
df_passes = self.df_events[(self.df_events.type_name == "Pass") &
(self.df_events.pass_outcome_name.isna()) &
(self.df_events.team_name == self.team_name) &
(self.df_events.minute < self.num_minutes)].copy()
# If available, use player's nickname instead of full name to optimize space in plot
df_passes["pass_recipient_name"] = df_passes.pass_recipient_name.apply(lambda x: self.names_dict[x] if self.names_dict[x] else x)
df_passes["player_name"] = df_passes.player_name.apply(lambda x: self.names_dict[x] if self.names_dict[x] else x)
# In this type of plot, both the size and color (i.e. value) mean the same: number of passes
self.player_pass_count = df_passes.groupby("player_name").size().to_frame("num_passes")
self.player_pass_value = df_passes.groupby("player_name").size().to_frame("pass_value")
# 'pair_key' combines the names of the passer and receiver of each pass (sorted alphabetically)
df_passes["pair_key"] = df_passes.apply(lambda x: "_".join(sorted([x["player_name"], x["pass_recipient_name"]])), axis=1)
self.pair_pass_count = df_passes.groupby("pair_key").size().to_frame("num_passes")
self.pair_pass_value = df_passes.groupby("pair_key").size().to_frame("pass_value")
# Average pass origin's coordinates for each player
df_passes["origin_pos_x"] = df_passes.location.apply(lambda x: self._statsbomb_to_point(x)[0])
df_passes["origin_pos_y"] = df_passes.location.apply(lambda x: self._statsbomb_to_point(x)[1])
self.player_position = df_passes.groupby("player_name").agg({"origin_pos_x": "median", "origin_pos_y": "median"})
|
from django.shortcuts import render, redirect
from django.urls import reverse
from . import models
# Create your views here.
# /cars/
def show(request):
all_cars = models.Car.objects.all()
context = {'all_cars': all_cars}
return render(request, 'cars/html/show.html', context=context)
def add(request):
if request.POST:
brand = request.POST["brand"]
year = int(request.POST["year"])
models.Car.objects.create(brand=brand, year=year)
return redirect(reverse('cars:show'))
else:
return render(request, 'cars/html/add.html')
def delete(request):
if request.POST:
pk = request.POST["pk"]
try:
models.Car.objects.get(pk=pk).delete()
return redirect(reverse('cars:show'))
except:
print("pk not found")
return redirect(reverse('cars:show'))
else:
return render(request, 'cars/html/delete.html')
|
#!/usr/bin/env python
#########################################################################################
#
# Perform various types of processing from the spinal cord segmentation (e.g. extract centerline, compute CSA, etc.).
# (extract_centerline) extract the spinal cord centerline from the segmentation. Output file is an image in the same
# space as the segmentation.
#
#
# ---------------------------------------------------------------------------------------
# Copyright (c) 2013 Polytechnique Montreal <www.neuro.polymtl.ca>
# Author: Benjamin De Leener, Julien Touati, Gabriel Mangeat
# Created: 2014-05-24
#
# About the license: see the file LICENSE.TXT
#########################################################################################
# DEFAULT PARAMETERS
class param:
## The constructor
def __init__(self):
self.debug = 0
self.verbose = 1 # verbose
self.step = 1 # step of discretized plane in mm
self.remove_temp_files = 1
import re
import math
import sys
import getopt
import os
import commands
import numpy as np
import time
import sct_utils as sct
from sct_nurbs import NURBS
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from scipy.misc import imsave
try:
import nibabel
except ImportError:
print '--- nibabel not installed! Exit program. ---'
sys.exit(2)
# MAIN
# ==========================================================================================
def main():
# Initialization
path_script = os.path.dirname(__file__)
fsloutput = 'export FSLOUTPUTTYPE=NIFTI; ' # for faster processing, all outputs are in NIFTI
# THIS DOES NOT WORK IN MY LAPTOP: path_sct = os.environ['SCT_DIR'] # path to spinal cord toolbox
#path_sct = path_script[:-8] # TODO: make it cleaner!
status, path_sct = commands.getstatusoutput('echo $SCT_DIR')
fname_segmentation = ''
name_process = ''
processes = ['extract_centerline','compute_CSA']
verbose = param.verbose
start_time = time.time()
remove_temp_files = param.remove_temp_files
# Parameters for debug mode
if param.debug:
fname_segmentation = path_sct+'/testing/data/errsm_23/t2/t2_manual_segmentation.nii.gz'
verbose = 1
remove_temp_files = 0
# Check input parameters
try:
opts, args = getopt.getopt(sys.argv[1:],'hi:p:v:')
except getopt.GetoptError:
usage()
for opt, arg in opts:
if opt == '-h':
usage()
elif opt in ("-i"):
fname_segmentation = arg
elif opt in ("-p"):
name_process = arg
elif opt in ('-v'):
verbose = int(arg)
# display usage if a mandatory argument is not provided
if fname_segmentation == '' or name_process == '':
usage()
# display usage if the requested process is not available
if name_process not in processes:
usage()
# check existence of input files
sct.check_file_exist(fname_segmentation)
# print arguments
print '\nCheck parameters:'
print '.. segmentation file: '+fname_segmentation
if name_process == 'extract_centerline':
extract_centerline(fname_segmentation)
if name_process == 'compute_CSA':
compute_CSA(fname_segmentation)
# display elapsed time
elapsed_time = time.time() - start_time
print '\nFinished! Elapsed time: '+str(int(round(elapsed_time)))+'s'
# End of Main
# EXTRACT_CENTERLINE
# ==========================================================================================
def extract_centerline(fname_segmentation):
# Extract path, file and extension
path_data, file_data, ext_data = sct.extract_fname(fname_segmentation)
# create temporary folder
path_tmp = 'tmp.'+time.strftime("%y%m%d%H%M%S")
sct.run('mkdir '+path_tmp)
# copy files into tmp folder
sct.run('cp '+fname_segmentation+' '+path_tmp)
# go to tmp folder
os.chdir(path_tmp)
remove_temp_files = param.remove_temp_files
# Change orientation of the input segmentation into RPI
print '\nOrient segmentation image to RPI orientation...'
fname_segmentation_orient = 'tmp.segmentation_rpi' + ext_data
sct.run('sct_orientation -i ' + file_data+ext_data + ' -o ' + fname_segmentation_orient + ' -orientation RPI')
# Extract orientation of the input segmentation
status,sct_orientation_output = sct.run('sct_orientation -i ' + file_data+ext_data + ' -get')
orientation = sct_orientation_output[-3:]
print '\nOrientation of segmentation image: ' + orientation
# Get size of data
print '\nGet dimensions data...'
nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(fname_segmentation_orient)
print '.. '+str(nx)+' x '+str(ny)+' y '+str(nz)+' z '+str(nt)
print '\nOpen segmentation volume...'
file = nibabel.load(fname_segmentation_orient)
data = file.get_data()
hdr = file.get_header()
# Extract min and max index in Z direction
X, Y, Z = (data>0).nonzero()
print data
min_z_index, max_z_index = min(Z), max(Z)
x_centerline = [0 for i in range(0,max_z_index-min_z_index+1)]
y_centerline = [0 for i in range(0,max_z_index-min_z_index+1)]
z_centerline = [iz for iz in range(min_z_index, max_z_index+1)]
# Extract segmentation points and average per slice
for iz in range(min_z_index, max_z_index+1):
x_seg, y_seg = (data[:,:,iz]>0).nonzero()
x_centerline[iz-min_z_index] = np.mean(x_seg)
y_centerline[iz-min_z_index] = np.mean(y_seg)
for k in range(len(X)):
data[X[k],Y[k],Z[k]] = 0
# Fit the centerline points with splines and return the new fitted coordinates
x_centerline_fit, y_centerline_fit,x_centerline_deriv,y_centerline_deriv,z_centerline_deriv = b_spline_centerline(x_centerline,y_centerline,z_centerline)
# Create an image with the centerline
for iz in range(min_z_index, max_z_index+1):
data[round(x_centerline_fit[iz-min_z_index]),round(y_centerline_fit[iz-min_z_index]),iz] = 1
# Write the centerline image in RPI orientation
hdr.set_data_dtype('uint8') # set imagetype to uint8
print '\nWrite NIFTI volumes...'
img = nibabel.Nifti1Image(data, None, hdr)
nibabel.save(img, 'tmp.centerline.nii')
sct.generate_output_file('tmp.centerline.nii','./',file_data+'_centerline',ext_data)
del data
# come back to parent folder
os.chdir('..')
# Change orientation of the output centerline into input orientation
print '\nOrient centerline image to input orientation: ' + orientation
fname_segmentation_orient = 'tmp.segmentation_rpi' + ext_data
sct.run('sct_orientation -i ' + path_tmp+'/'+file_data+'_centerline'+ext_data + ' -o ' + file_data+'_centerline'+ext_data + ' -orientation ' + orientation)
# Remove temporary files
if remove_temp_files == 1 :
print('\nRemove temporary files...')
sct.run('rm -rf '+path_tmp)
# to view results
print '\nTo view results, type:'
print 'fslview '+file_data+'_centerline &\n'
# End of extract_centerline
# COMPUTE_CSA
# ==========================================================================================
def compute_CSA(fname_segmentation):
# Extract path, file and extension
path_data, file_data, ext_data = sct.extract_fname(fname_segmentation)
# create temporary folder
path_tmp = 'tmp.'+time.strftime("%y%m%d%H%M%S")
sct.run('mkdir '+path_tmp)
# copy files into tmp folder
sct.run('cp '+fname_segmentation+' '+path_tmp)
# go to tmp folder
os.chdir(path_tmp)
remove_temp_files = param.remove_temp_files
step = param.step
# # Change orientation of the input segmentation into RPI
print '\nOrient segmentation image to RPI orientation...'
fname_segmentation_orient = 'tmp.segmentation_rpi' + ext_data
sct.run('sct_orientation -i ' + file_data+ext_data + ' -o ' + fname_segmentation_orient + ' -orientation RPI')
# Get size of data
print '\nGet dimensions data...'
nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(fname_segmentation_orient)
print '.. '+str(nx)+' x '+str(ny)+' y '+str(nz)+' z '+str(nt)
print '\nOpen segmentation volume...'
file = nibabel.load(fname_segmentation_orient)
data = file.get_data()
hdr = file.get_header()
x_scale=hdr['pixdim'][1]
y_scale=hdr['pixdim'][2]
z_scale=hdr['pixdim'][3]
#
# Extract min and max index in Z direction
X, Y, Z = (data>0).nonzero()
coords = np.array([str([X[i],Y[i],Z[i]]) for i in range(0,len(Z))]) #don't know why but finding strings in array of array of strings is WAY fater than doing the same with integers
#coords = [[X[i],Y[i],Z[i]] for i in range(0,len(Z))]
min_z_index, max_z_index = min(Z), max(Z)
x_centerline = [0 for i in range(0,max_z_index-min_z_index+1)]
y_centerline = [0 for i in range(0,max_z_index-min_z_index+1)]
z_centerline = [iz for iz in range(min_z_index, max_z_index+1)]
# Extract segmentation points and average per slice
for iz in range(min_z_index, max_z_index+1):
x_seg, y_seg = (data[:,:,iz]>0).nonzero()
x_centerline[iz-min_z_index] = np.mean(x_seg)
y_centerline[iz-min_z_index] = np.mean(y_seg)
# Fit the centerline points with splines and return the new fitted coordinates
x_centerline_fit, y_centerline_fit,x_centerline_deriv,y_centerline_deriv,z_centerline_deriv = b_spline_centerline(x_centerline,y_centerline,z_centerline)
# fig=plt.figure()
# ax=Axes3D(fig)
# ax.plot(x_centerline,y_centerline,z_centerline,zdir='z')
# ax.plot(x_centerline_fit,y_centerline_fit,z_centerline,zdir='z')
# plt.show()
# step = min([x_scale,y_scale])
# print step
x=np.array([1,0,0])
y=np.array([0,1,0])
z=np.array([0,0,1])
print('\nComputing CSA...')
sections=[0 for i in range(0,max_z_index-min_z_index+1)]
for iz in range(0,len(z_centerline)):
a = x_centerline_deriv[iz]
b = y_centerline_deriv[iz]
c = z_centerline_deriv[iz]
x_center = x_centerline_fit[iz]
y_center = y_centerline_fit[iz]
z_center = z_centerline[iz]
d = -(a*x_center+b*y_center+c*z_center)
normal=normalize(np.array([a,b,c]))
basis_1 = normalize(np.cross(normal,x)) # use of x in order to get orientation of each plane, basis_1 is in the plane ax+by+cz+d=0
basis_2 = normalize(np.cross(normal,basis_1)) # third vector of base
angle = np.arccos(np.dot(normal,z))
max_diameter = (max([(max(X)-min(X))*x_scale,(max(Y)-min(Y))*y_scale])*np.sqrt(2))/(np.cos(angle)) # maximum dimension of the tilted plane
plane = np.zeros((int(max_diameter/step),int(max_diameter/step))) ## discretized plane which will be filled with 0/1
plane_grid = np.linspace(-int(max_diameter/2),int(max_diameter/2),(max_diameter/step)) # how the plane will be skimmed through
cpt=0
for i_b1 in plane_grid :
for i_b2 in plane_grid : # we go through the plane
point = np.array([x_center*x_scale,y_center*y_scale,z_center*z_scale]) + i_b1*basis_1 +i_b2*basis_2
coord_voxel = str([ int(round(point[0]/x_scale)), int(round(point[1]/y_scale)), int(round(point[2]/z_scale))]) ## to which voxel belongs each point of the plane
#coord_voxel = [ int(round(point[0]/x_scale)), int(round(point[1]/y_scale)), int(round(point[2]/z_scale))] ## to which voxel belongs each point of the plane
if (coord_voxel in coords) is True : ## if this voxel is 1
plane[i_b1+int(max_diameter/2)][i_b2+int(max_diameter/2)]=1
cpt = cpt+1
sections[iz]=cpt*step*step # number of voxels that are in the intersection of each plane and the nonzeros values of segmentation, times the area of one cell of the discretized plane
print sections[iz]
#os.chdir('..')
#sct.run('mkdir JPG_Results')
#os.chdir('JPG_Results')
#imsave('plane_' + str(iz) + '.jpg', plane) # if you want ot save the images with the sections
#os.chdir('..')
#os.chdir('path_tmp')
#print sections
## plotting results
fig=plt.figure()
plt.plot(z_centerline*z_scale, sections)
plt.show()
# come back to parent folder
os.chdir('..')
# creating output text file
print('\nGenerating output text file...')
file = open('Cross_Area_Sections.txt','w')
file.write('List of Cross Section Areas for each z slice\n')
for i in range(min_z_index, max_z_index+1):
file.write('\nz = ' + str(i*z_scale) + ' mm -> CSA = ' + str(sections[i]) + ' mm^2')
file.close()
# Remove temporary files
if remove_temp_files == 1 :
print('\nRemove temporary files...')
sct.run('rm -rf '+path_tmp)
# End of compute_CSA
#=======================================================================================================================
# B-Spline fitting
#=======================================================================================================================
def b_spline_centerline(x_centerline,y_centerline,z_centerline):
print '\nFitting centerline using B-spline approximation...'
points = [[x_centerline[n],y_centerline[n],z_centerline[n]] for n in range(len(x_centerline))]
nurbs = NURBS(3,3000,points) # BE very careful with the spline order that you choose : if order is too high ( > 4 or 5) you need to set a higher number of Control Points (cf sct_nurbs ). For the third argument (number of points), give at least len(z_centerline)+500 or higher
P = nurbs.getCourbe3D()
x_centerline_fit=P[0]
y_centerline_fit=P[1]
Q = nurbs.getCourbe3D_deriv()
x_centerline_deriv=Q[0]
y_centerline_deriv=Q[1]
z_centerline_deriv=Q[2]
return x_centerline_fit, y_centerline_fit,x_centerline_deriv,y_centerline_deriv,z_centerline_deriv
def normalize(vect):
"""take an 1x3 matrix vector and return the normalised vector"""
norm=np.linalg.norm(vect)
return vect/norm
# def find_in_list(small_list,list_lists):
# """find a list in a list of lists"""
# test=0
# cpt2=0
# while (test==0) & (cpt2<len(coords)): ## find in list
# if (int(coord_voxel[0])==int(coords[cpt2][0]))&(int(coord_voxel[1])==int(coords[cpt2][1]))&(int(coord_voxel[2])==int(coords[cpt2][2])):
# cpt=cpt+1
# test=1
# cpt2=cpt2+1
# Print usage
# ==========================================================================================
def usage():
print '\n' \
''+os.path.basename(__file__)+'\n' \
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n' \
'Part of the Spinal Cord Toolbox <https://sourceforge.net/projects/spinalcordtoolbox>\n' \
'\n'\
'DESCRIPTION\n' \
' Perform various types of processing from the spinal cord segmentation (e.g. extract centerline, compute CSA,' \
' etc.).\n' \
'\n' \
'USAGE\n' \
' '+os.path.basename(__file__)+' -i <segmentation> -p <process>\n' \
'\n' \
'MANDATORY ARGUMENTS\n' \
' -i <segmentation> segmentation data\n' \
' -p <process> process to perform {extract_centerline},{compute_CSA}\n' \
'\n' \
'OPTIONAL ARGUMENTS\n' \
' -v <0,1> verbose. Default='+str(param.verbose)+'.\n'
# exit program
sys.exit(2)
# START PROGRAM
# =========================================================================================
if __name__ == "__main__":
# initialize parameters
param = param()
# call main function
main()
|
"""
Summary:
Contains the Conduit unit type classes.
This holds all of the data read in from the conduit units in the dat file.
Can be called to load in the data and read and update the contents
held in the object.
Author:
Duncan Runnacles
Copyright:
Duncan Runnacles 2020
TODO:
Updates:
"""
from __future__ import unicode_literals
import logging
logger = logging.getLogger(__name__)
"""logging references with a __name__ set to this module."""
from ship.fmp.datunits.isisunit import AUnit
from ship.fmp.headdata import HeadDataItem
from ship.datastructures import DATA_TYPES as dt
from ship.fmp.datunits import ROW_DATA_TYPES as rdt
from ship.datastructures import dataobject as do
from ship.datastructures.rowdatacollection import RowDataCollection
class ConduitUnit(AUnit):
'''Class for dealing with conduit type units in the .dat file.'''
# Class constants
UNIT_TYPE = 'conduit'
UNIT_CATEGORY = 'conduit'
FILE_KEY = 'CONDUIT'
FILE_KEY2 = None
def __init__(self):
'''Constructor.
'''
super(ConduitUnit, self).__init__()
self._unit_type = ConduitUnit.UNIT_TYPE
self._unit_category = ConduitUnit.UNIT_CATEGORY
def icLabels(self):
"""Overriddes superclass method."""
return [self._name, self._name_ds]
def linkLabels(self):
"""Overriddes superclass method."""
return {'name': self.name, 'name_ds': self.name_ds}
class RectangularConduitUnit(ConduitUnit):
# Class constants
UNIT_TYPE = 'conduit_rectangular'
UNIT_CATEGORY = 'conduit'
FILE_KEY = 'CONDUIT'
FILE_KEY2 = 'RECTANGULAR'
def __init__(self, **kwargs):
'''Constructor.
'''
super(RectangularConduitUnit, self).__init__(**kwargs)
self._unit_type = RectangularConduitUnit.UNIT_TYPE
self._unit_category = RectangularConduitUnit.UNIT_CATEGORY
self.head_data = {
'comment': HeadDataItem('', '', 0, 0, dtype=dt.STRING),
'distance': HeadDataItem(0.000, '{:>10}', 3, 0, dtype=dt.FLOAT, dps=3),
'roughness_type': HeadDataItem('MANNING', '', 4, 0, dtype=dt.CONSTANT, choices=('MANNING', 'COLEBROOK-WHITE')),
'invert': HeadDataItem(0.000, '{:>10}', 5, 0, dtype=dt.FLOAT, dps=3),
'width': HeadDataItem(0.000, '{:>10}', 5, 1, dtype=dt.FLOAT, dps=3),
'height': HeadDataItem(0.000, '{:>10}', 5, 2, dtype=dt.FLOAT, dps=3),
'bottom_slot_status': HeadDataItem('GLOBAL', '{:>10}', 5, 3, dtype=dt.CONSTANT, choices=('ON', 'OFF', 'GLOBAL', ''), allow_blank=True),
'bottom_slot_distance': HeadDataItem(0.000, '{:>10}', 5, 4, dtype=dt.FLOAT, dps=3, allow_blank=True),
'bottom_slot_depth': HeadDataItem(0.000, '{:>10}', 5, 5, dtype=dt.FLOAT, dps=3, allow_blank=True),
'top_slot_status': HeadDataItem('GLOBAL', '{:>10}', 5, 6, dtype=dt.CONSTANT, choices=('ON', 'OFF', 'GLOBAL', ''), allow_blank=True),
'top_slot_distance': HeadDataItem(0.000, '{:>10}', 5, 7, dtype=dt.FLOAT, dps=3, allow_blank=True),
'top_slot_depth': HeadDataItem(0.000, '{:>10}', 5, 8, dtype=dt.FLOAT, dps=3, allow_blank=True),
'roughness_invert': HeadDataItem(0.000, '{:>10}', 6, 0, dtype=dt.FLOAT, dps=5),
'roughness_walls': HeadDataItem(0.000, '{:>10}', 6, 1, dtype=dt.FLOAT, dps=5),
'roughness_soffit': HeadDataItem(0.000, '{:>10}', 6, 2, dtype=dt.FLOAT, dps=5),
}
def readUnitData(self, unit_data, file_line):
'''Reads the given data into the object.
See Also:
isisunit.
Args:
unit_data (list): The raw file data to be processed.
'''
self.head_data['comment'].value = unit_data[file_line][8:].strip()
self._name = unit_data[file_line + 2][:12].strip()
self._name_ds = unit_data[file_line + 2][12:].strip()
self.head_data['distance'].value = unit_data[file_line + 3][:10].strip()
self.head_data['roughness_type'].value = unit_data[file_line + 4][:15].strip()
self.head_data['invert'].value = unit_data[file_line + 5][:10].strip()
self.head_data['width'].value = unit_data[file_line + 5][10:20].strip()
self.head_data['height'].value = unit_data[file_line + 5][20:30].strip()
self.head_data['bottom_slot_status'].value = unit_data[file_line + 5][30:40].strip()
self.head_data['bottom_slot_distance'].value = unit_data[file_line + 5][40:50].strip()
self.head_data['bottom_slot_depth'].value = unit_data[file_line + 5][50:60].strip()
self.head_data['top_slot_status'].value = unit_data[file_line + 5][60:70].strip()
self.head_data['top_slot_distance'].value = unit_data[file_line + 5][70:80].strip()
self.head_data['top_slot_depth'].value = unit_data[file_line + 5][80:].strip()
self.head_data['roughness_invert'].value = unit_data[file_line + 6][:10].strip()
self.head_data['roughness_walls'].value = unit_data[file_line + 6][10:20].strip()
self.head_data['roughness_soffit'].value = unit_data[file_line + 6][20:].strip()
return file_line + 6
def getData(self):
'''Returns the formatted data for this unit.
See Also:
isisunit.
Returns:
List of strings formatted for writing to the new dat file.
'''
out = []
out.append('CONDUIT ' + self.head_data['comment'].value)
out.append('\nRECTANGULAR')
out.append('\n' + '{:<12}'.format(self._name) + '{:<12}'.format(self._name_ds))
key_order = ['distance', 'roughness_type', 'invert', 'width', 'height',
'bottom_slot_status', 'bottom_slot_distance', 'bottom_slot_depth',
'top_slot_status', 'top_slot_distance', 'top_slot_depth',
'roughness_invert', 'roughness_walls', 'roughness_soffit']
for k in key_order:
out.append(self.head_data[k].format(True))
out_data = ''.join(out).split('\n')
return out_data
class CircularConduitUnit(ConduitUnit):
# Class constants
UNIT_TYPE = 'conduit_circular'
UNIT_CATEGORY = 'conduit'
FILE_KEY = 'CONDUIT'
FILE_KEY2 = 'CIRCULAR'
def __init__(self, **kwargs):
'''Constructor.
'''
super(CircularConduitUnit, self).__init__(**kwargs)
self._unit_type = CircularConduitUnit.UNIT_TYPE
self._unit_category = CircularConduitUnit.UNIT_CATEGORY
self.head_data = {
'comment': HeadDataItem('', '', 0, 0, dtype=dt.STRING),
'distance': HeadDataItem(0.000, '{:>10}', 3, 0, dtype=dt.FLOAT, dps=3),
'roughness_type': HeadDataItem('MANNING', '', 4, 0, dtype=dt.CONSTANT, choices=('MANNING', 'COLEBROOK-WHITE')),
'invert': HeadDataItem(0.000, '{:>10}', 5, 0, dtype=dt.FLOAT, dps=3),
'diameter': HeadDataItem(0.000, '{:>10}', 5, 1, dtype=dt.FLOAT, dps=3),
'bottom_slot_status': HeadDataItem('GLOBAL', '{:>10}', 5, 2, dtype=dt.CONSTANT, choices=('ON', 'OFF', 'GLOBAL', '')),
'bottom_slot_distance': HeadDataItem(0.000, '{:>10}', 5, 3, dtype=dt.FLOAT, dps=3, allow_blank=True),
'bottom_slot_depth': HeadDataItem(0.000, '{:>10}', 5, 4, dtype=dt.FLOAT, dps=3, allow_blank=True),
'top_slot_status': HeadDataItem('GLOBAL', '{:>10}', 5, 5, dtype=dt.CONSTANT, choices=('ON', 'OFF', 'GLOBAL', '')),
'top_slot_distance': HeadDataItem(0.000, '{:>10}', 5, 6, dtype=dt.FLOAT, dps=3, allow_blank=True),
'top_slot_depth': HeadDataItem(0.000, '{:>10}', 5, 7, dtype=dt.FLOAT, dps=3, allow_blank=True),
'roughness_below_axis': HeadDataItem(0.000, '{:>10}', 6, 0, dtype=dt.FLOAT, dps=5),
'roughness_above_axis': HeadDataItem(0.000, '{:>10}', 6, 1, dtype=dt.FLOAT, dps=5),
}
def readUnitData(self, unit_data, file_line):
'''Reads the given data into the object.
See Also:
isisunit.
Args:
unit_data (list): The raw file data to be processed.
'''
self.head_data['comment'].value = unit_data[file_line][8:].strip()
self._name = unit_data[file_line + 2][:12].strip()
self._name_ds = unit_data[file_line + 2][12:].strip()
self.head_data['distance'].value = unit_data[file_line + 3][:10].strip()
self.head_data['roughness_type'].value = unit_data[file_line + 4][:15].strip()
self.head_data['invert'].value = unit_data[file_line + 5][:10].strip()
self.head_data['diameter'].value = unit_data[file_line + 5][10:20].strip()
self.head_data['bottom_slot_status'].value = unit_data[file_line + 5][20:30].strip()
self.head_data['bottom_slot_distance'].value = unit_data[file_line + 5][30:40].strip()
self.head_data['bottom_slot_depth'].value = unit_data[file_line + 5][40:50].strip()
self.head_data['top_slot_status'].value = unit_data[file_line + 5][50:60].strip()
self.head_data['top_slot_distance'].value = unit_data[file_line + 5][60:70].strip()
self.head_data['top_slot_depth'].value = unit_data[file_line + 5][70:].strip()
self.head_data['roughness_below_axis'].value = unit_data[file_line + 6][:10].strip()
self.head_data['roughness_above_axis'].value = unit_data[file_line + 6][10:20].strip()
return file_line + 6
def getData(self):
'''Returns the formatted data for this unit.
See Also:
isisunit.
Returns:
List of strings formatted for writing to the new dat file.
'''
out = []
out.append('CONDUIT ' + self.head_data['comment'].value)
out.append('\nCIRCULAR')
out.append('\n' + '{:<12}'.format(self._name) + '{:<12}'.format(self._name_ds))
key_order = ['distance', 'roughness_type', 'invert', 'diameter',
'bottom_slot_status', 'bottom_slot_distance', 'bottom_slot_depth',
'top_slot_status', 'top_slot_distance', 'top_slot_depth',
'roughness_below_axis', 'roughness_above_axis']
for k in key_order:
out.append(self.head_data[k].format(True))
out_data = ''.join(out).split('\n')
return out_data
class FullarchConduitUnit(ConduitUnit):
# Class constants
UNIT_TYPE = 'conduit_fullarch'
UNIT_CATEGORY = 'conduit'
FILE_KEY = 'CONDUIT'
FILE_KEY2 = 'FULLARCH'
def __init__(self, **kwargs):
'''Constructor.
'''
super(FullarchConduitUnit, self).__init__(**kwargs)
self._unit_type = FullarchConduitUnit.UNIT_TYPE
self._unit_category = FullarchConduitUnit.UNIT_CATEGORY
self.head_data = {
'comment': HeadDataItem('', '', 0, 0, dtype=dt.STRING),
'distance': HeadDataItem(0.000, '{:>10}', 3, 0, dtype=dt.FLOAT, dps=3),
'roughness_type': HeadDataItem('MANNING', '', 4, 0, dtype=dt.CONSTANT, choices=('MANNING', 'COLEBROOK-WHITE')),
'invert': HeadDataItem(0.000, '{:>10}', 5, 0, dtype=dt.FLOAT, dps=3),
'width': HeadDataItem(0.000, '{:>10}', 5, 1, dtype=dt.FLOAT, dps=3),
'height': HeadDataItem(0.000, '{:>10}', 5, 2, dtype=dt.FLOAT, dps=3),
'bottom_slot_status': HeadDataItem('GLOBAL', '{:>10}', 5, 3, dtype=dt.CONSTANT, choices=('ON', 'OFF', 'GLOBAL', '')),
'bottom_slot_distance': HeadDataItem(0.000, '{:>10}', 5, 4, dtype=dt.FLOAT, dps=3, allow_blank=True),
'bottom_slot_depth': HeadDataItem(0.000, '{:>10}', 5, 5, dtype=dt.FLOAT, dps=3, allow_blank=True),
'top_slot_status': HeadDataItem('GLOBAL', '{:>10}', 5, 6, dtype=dt.CONSTANT, choices=('ON', 'OFF', 'GLOBAL', '')),
'top_slot_distance': HeadDataItem(0.000, '{:>10}', 5, 7, dtype=dt.FLOAT, dps=3, allow_blank=True),
'top_slot_depth': HeadDataItem(0.000, '{:>10}', 5, 8, dtype=dt.FLOAT, dps=3, allow_blank=True),
'roughness_below_axis': HeadDataItem(0.000, '{:>10}', 6, 0, dtype=dt.FLOAT, dps=5),
'roughness_above_axis': HeadDataItem(0.000, '{:>10}', 6, 1, dtype=dt.FLOAT, dps=5),
}
def readUnitData(self, unit_data, file_line):
'''Reads the given data into the object.
See Also:
isisunit.
Args:
unit_data (list): The raw file data to be processed.
'''
self.head_data['comment'].value = unit_data[file_line][8:].strip()
self._name = unit_data[file_line + 2][:12].strip()
self._name_ds = unit_data[file_line + 2][12:].strip()
self.head_data['distance'].value = unit_data[file_line + 3][:10].strip()
self.head_data['roughness_type'].value = unit_data[file_line + 4][:15].strip()
self.head_data['invert'].value = unit_data[file_line + 5][:10].strip()
self.head_data['width'].value = unit_data[file_line + 5][10:20].strip()
self.head_data['height'].value = unit_data[file_line + 5][20:30].strip()
self.head_data['bottom_slot_status'].value = unit_data[file_line + 5][30:40].strip()
self.head_data['bottom_slot_distance'].value = unit_data[file_line + 5][40:50].strip()
self.head_data['bottom_slot_depth'].value = unit_data[file_line + 5][50:60].strip()
self.head_data['top_slot_status'].value = unit_data[file_line + 5][60:70].strip()
self.head_data['top_slot_distance'].value = unit_data[file_line + 5][70:80].strip()
self.head_data['top_slot_depth'].value = unit_data[file_line + 5][80:].strip()
self.head_data['roughness_below_axis'].value = unit_data[file_line + 6][:10].strip()
self.head_data['roughness_above_axis'].value = unit_data[file_line + 6][10:20].strip()
return file_line + 6
def getData(self):
'''Returns the formatted data for this unit.
See Also:
isisunit.
Returns:
List of strings formatted for writing to the new dat file.
'''
out = []
out.append('CONDUIT ' + self.head_data['comment'].value)
out.append('\nFULLARCH')
out.append('\n' + '{:<12}'.format(self._name) + '{:<12}'.format(self._name_ds))
key_order = ['distance', 'roughness_type', 'invert', 'width', 'height',
'bottom_slot_status', 'bottom_slot_distance', 'bottom_slot_depth',
'top_slot_status', 'top_slot_distance', 'top_slot_depth',
'roughness_below_axis', 'roughness_above_axis']
for k in key_order:
out.append(self.head_data[k].format(True))
out_data = ''.join(out).split('\n')
return out_data
class SprungarchConduitUnit(ConduitUnit):
# Class constants
UNIT_TYPE = 'conduit_sprungarch'
UNIT_CATEGORY = 'conduit'
FILE_KEY = 'CONDUIT'
FILE_KEY2 = 'SPRUNGARCH'
def __init__(self, **kwargs):
'''Constructor.
'''
super(SprungarchConduitUnit, self).__init__(**kwargs)
self._unit_type = SprungarchConduitUnit.UNIT_TYPE
self._unit_category = SprungarchConduitUnit.UNIT_CATEGORY
self.head_data = {
'comment': HeadDataItem('', '', 0, 0, dtype=dt.STRING),
'distance': HeadDataItem(0.000, '{:>10}', 3, 0, dtype=dt.FLOAT, dps=3),
'roughness_type': HeadDataItem('MANNING', '', 4, 0, dtype=dt.CONSTANT, choices=('MANNING', 'COLEBROOK-WHITE')),
'invert': HeadDataItem(0.000, '{:>10}', 5, 0, dtype=dt.FLOAT, dps=3),
'width': HeadDataItem(0.000, '{:>10}', 5, 1, dtype=dt.FLOAT, dps=3),
'springing_height': HeadDataItem(0.000, '{:>10}', 5, 2, dtype=dt.FLOAT, dps=3),
'crown_height': HeadDataItem(0.000, '{:>10}', 5, 3, dtype=dt.FLOAT, dps=3),
'bottom_slot_status': HeadDataItem('GLOBAL', '{:>10}', 5, 4, dtype=dt.CONSTANT, choices=('ON', 'OFF', 'GLOBAL', '')),
'bottom_slot_distance': HeadDataItem(0.000, '{:>10}', 5, 5, dtype=dt.FLOAT, dps=3, allow_blank=True),
'bottom_slot_depth': HeadDataItem(0.000, '{:>10}', 5, 6, dtype=dt.FLOAT, dps=3, allow_blank=True),
'top_slot_status': HeadDataItem('GLOBAL', '{:>10}', 5, 7, dtype=dt.CONSTANT, choices=('ON', 'OFF', 'GLOBAL', '')),
'top_slot_distance': HeadDataItem(0.000, '{:>10}', 5, 8, dtype=dt.FLOAT, dps=3, allow_blank=True),
'top_slot_depth': HeadDataItem(0.000, '{:>10}', 5, 9, dtype=dt.FLOAT, dps=3, allow_blank=True),
'roughness_invert': HeadDataItem(0.000, '{:>10}', 6, 0, dtype=dt.FLOAT, dps=5),
'roughness_walls': HeadDataItem(0.000, '{:>10}', 6, 1, dtype=dt.FLOAT, dps=5),
'roughness_soffit': HeadDataItem(0.000, '{:>10}', 6, 2, dtype=dt.FLOAT, dps=5),
}
def readUnitData(self, unit_data, file_line):
'''Reads the given data into the object.
See Also:
isisunit.
Args:
unit_data (list): The raw file data to be processed.
'''
self.head_data['comment'].value = unit_data[file_line][8:].strip()
self._name = unit_data[file_line + 2][:12].strip()
self._name_ds = unit_data[file_line + 2][12:].strip()
self.head_data['distance'].value = unit_data[file_line + 3][:10].strip()
self.head_data['roughness_type'].value = unit_data[file_line + 4][:15].strip()
self.head_data['invert'].value = unit_data[file_line + 5][:10].strip()
self.head_data['width'].value = unit_data[file_line + 5][10:20].strip()
self.head_data['springing_height'].value = unit_data[file_line + 5][20:30].strip()
self.head_data['crown_height'].value = unit_data[file_line + 5][30:40].strip()
self.head_data['bottom_slot_status'].value = unit_data[file_line + 5][40:50].strip()
self.head_data['bottom_slot_distance'].value = unit_data[file_line + 5][50:60].strip()
self.head_data['bottom_slot_depth'].value = unit_data[file_line + 5][60:70].strip()
self.head_data['top_slot_status'].value = unit_data[file_line + 5][70:80].strip()
self.head_data['top_slot_distance'].value = unit_data[file_line + 5][80:90].strip()
self.head_data['top_slot_depth'].value = unit_data[file_line + 5][90:].strip()
self.head_data['roughness_invert'].value = unit_data[file_line + 6][:10].strip()
self.head_data['roughness_walls'].value = unit_data[file_line + 6][10:20].strip()
self.head_data['roughness_soffit'].value = unit_data[file_line + 6][20:30].strip()
return file_line + 6
def getData(self):
'''Returns the formatted data for this unit.
See Also:
isisunit.
Returns:
List of strings formatted for writing to the new dat file.
'''
out = []
out.append('CONDUIT ' + self.head_data['comment'].value)
out.append('\nSPRUNGARCH')
out.append('\n' + '{:<12}'.format(self._name) + '{:<12}'.format(self._name_ds))
key_order = ['distance', 'roughness_type', 'invert', 'width', 'springing_height',
'crown_height', 'bottom_slot_status', 'bottom_slot_distance',
'bottom_slot_depth', 'top_slot_status', 'top_slot_distance',
'top_slot_depth', 'roughness_invert', 'roughness_walls',
'roughness_soffit']
for k in key_order:
out.append(self.head_data[k].format(True))
out_data = ''.join(out).split('\n')
return out_data
class RowDataConduitType(ConduitUnit):
def __init__(self, **kwargs):
'''Constructor.
'''
super(RowDataConduitType, self).__init__(**kwargs)
self._setup_headdata()
dobjs = [
# update_callback is called every time a value is added or updated
do.FloatData(rdt.CHAINAGE, format_str='{:>10}', no_of_dps=3),
do.FloatData(rdt.ELEVATION, format_str='{:>10}', no_of_dps=3),
# Note roughness much be Colebrook-White for Symmetrical conduits
do.FloatData(rdt.ROUGHNESS, format_str='{:>10}', default=0.039, no_of_dps=5),
]
self.row_data['main'] = RowDataCollection.bulkInitCollection(dobjs)
self.row_data['main'].setDummyRow({rdt.CHAINAGE: 0, rdt.ELEVATION: 0, rdt.ROUGHNESS: 0})
def _setup_headdata(self):
pass
def readUnitData(self, unit_data, file_line):
'''Reads the given data into the object.
See Also:
isisunit.
Args:
unit_data (list): The raw file data to be processed.
'''
file_line = self._readHeadData(unit_data, file_line)
file_line = self._readRowData(unit_data, file_line)
return file_line - 1
def _readRowData(self, unit_data, file_line):
"""Reads the units rows into the row collection.
This is all the geometry data that occurs after the no of rows variable in
the River Units of the dat file.
Args:
unit_data (list): the data pertaining to this unit.
"""
end_line = int(unit_data[file_line].strip())
file_line += 1
try:
# Load the geometry data
for i in range(file_line, end_line + file_line):
chain = unit_data[i][0:10].strip()
elev = unit_data[i][10:20].strip()
rough = unit_data[i][20:30].strip()
self.row_data['main'].addRow(
{rdt.CHAINAGE: chain, rdt.ELEVATION: elev, rdt.ROUGHNESS: rough},
# We don't need to make backup copies here. If it fails the
# load fails anyway and this will just really slow us down
no_copy=True
)
except NotImplementedError:
logger.ERROR('Unable to read Unit Data(dataRowObject creation) - NotImplementedError')
raise
return end_line + file_line
def getData(self):
"""Retrieve the data in this unit.
The String[] returned is formatted for printing in the fashion
of the .dat file.
Return:
List of strings formated for writing to .dat file.
"""
row_count = self.row_data['main'].numberOfRows()
out_data = self._getHeadData()
out_data.append('{:>10}'.format(row_count))
out_data.extend(self._getRowData(row_count))
return out_data
def _getRowData(self, row_count):
"""Returns the row data in this class.
For all the rows in the river geometry section get the data from
the rowdatacollection class.
Returns:
list = containing the formatted unit rows.
"""
out_data = []
for i in range(0, row_count):
out_data.append(self.row_data['main'].getPrintableRow(i))
return out_data
class SymmetricalConduitUnit(RowDataConduitType):
# Class constants
UNIT_TYPE = 'conduit_symmetrical'
UNIT_CATEGORY = 'conduit'
FILE_KEY = 'CONDUIT'
FILE_KEY2 = 'SECTION'
def __init__(self, **kwargs):
'''Constructor.
'''
super(SymmetricalConduitUnit, self).__init__(**kwargs)
def _setup_headdata(self):
self._unit_type = SymmetricalConduitUnit.UNIT_TYPE
self._unit_category = SymmetricalConduitUnit.UNIT_CATEGORY
self.head_data = {
'comment': HeadDataItem('', '', 0, 0, dtype=dt.STRING),
'distance': HeadDataItem(0.000, '{:>10}', 3, 0, dtype=dt.FLOAT, dps=3),
}
def _readHeadData(self, unit_data, file_line):
"""Format the header data for writing to file.
Args:
unit_data (list): containing the data to read.
"""
self.head_data['comment'].value = unit_data[file_line + 0][8:].strip()
self._name = unit_data[file_line + 2][:12].strip()
self._name_ds = unit_data[file_line + 2][12:24].strip()
self.head_data['distance'].value = unit_data[file_line + 3][:10].strip()
return file_line + 4
def _getHeadData(self):
"""Get the header data formatted for printing out to file.
Returns:
List of strings - The formatted header list.
"""
out = []
out.append('CONDUIT ' + self.head_data['comment'].value)
out.append('SECTION')
out.append('{:<12}'.format(self._name) + '{:<12}'.format(self._name_ds))
out.append('{:<10}'.format(self.head_data['distance'].format()))
return out
class AsymmetricalConduitUnit(RowDataConduitType):
# Class constants
UNIT_TYPE = 'conduit_asymmetrical'
UNIT_CATEGORY = 'conduit'
FILE_KEY = 'CONDUIT'
FILE_KEY2 = 'ASYMMETRIC'
def __init__(self, **kwargs):
'''Constructor.
'''
super(AsymmetricalConduitUnit, self).__init__(**kwargs)
def _setup_headdata(self):
self._unit_type = AsymmetricalConduitUnit.UNIT_TYPE
self._unit_category = AsymmetricalConduitUnit.UNIT_CATEGORY
self.head_data = {
'comment': HeadDataItem('', '', 0, 0, dtype=dt.STRING),
'distance': HeadDataItem(0.000, '{:>10}', 3, 0, dtype=dt.FLOAT, dps=3),
'roughness_type': HeadDataItem('DARCY', '', 4, 0, dtype=dt.CONSTANT, choices=('MANNING', 'DARCY')),
}
def _readHeadData(self, unit_data, file_line):
"""Format the header data for writing to file.
Args:
unit_data (list): containing the data to read.
"""
self.head_data['comment'].value = unit_data[file_line + 0][8:].strip()
self._name = unit_data[file_line + 2][:12].strip()
self._name_ds = unit_data[file_line + 2][12:24].strip()
self.head_data['distance'].value = unit_data[file_line + 3][:10].strip()
self.head_data['roughness_type'].value = unit_data[file_line + 3][10:20].strip()
return file_line + 4
def _getHeadData(self):
"""Get the header data formatted for printing out to file.
Returns:
List of strings - The formatted header list.
"""
out = []
out.append('CONDUIT ' + self.head_data['comment'].value)
out.append('ASYMMETRIC')
out.append('{:<12}'.format(self._name) + '{:<12}'.format(self._name_ds))
out.append(
'{:<10}'.format(self.head_data['distance'].format()) +
'{:>10}'.format(self.head_data['roughness_type'].format())
)
return out
|
from tkinter import *
class Calculator(Frame):
def __init__(self):
Frame.__init__(self)
self.pack(expand=YES, fill=BOTH)
self.master.title('calculator')
self.master.rowconfigure(0, weight=1)
self.master.columnconfigure(0, weight=1)
self.grid(sticky=W + E + N + S)
display = StringVar()
# add entry ,use grid method
# textvariable must use 'display' instead of 'StrinvVar()', or click button shows nothing in Entry
entry = Entry(self, relief=SUNKEN, textvariable=display)
# W+E+N+S means that the widget should be expanded in both directions. Default is to center the widget in the cell.
entry.grid(row=0, column=0, columnspan=4, sticky=W + E + N + S)
# add button, use grid method
grid = '789+456-123*0./='
for index, textChar in enumerate(grid):
a = Button(self, text=textChar, width=5, command=lambda text=textChar: display.set(display.get() + text))
a.grid(row=1 + index // 4, column=index % 4)
button_text = a.cget("text")
# print(button_text)
if button_text == '=':
a.config(command=lambda: display.set(eval(display.get())))
# add clear button
b = Button(self, text="clear", width=20, command=lambda: display.set(""))
b.grid(row=7, column=0, columnspan=4, sticky=W + E + N + S)
if __name__ == '__main__':
Calculator().mainloop()
|
#!/usr/bin/env python
# Python script that grabs all the binaries produced when building the
# Sharpmake solution and copies them in a directory. (By default, in a
# directory named /deploy.) Without that script, you need to manually copy
# and paste the binaries of every platform implementation assembly you have
# built.
#
# You typically use it like this:
# py deploy-binaries.py --config <Release or Debug> --target-dir=<Where to copy the binaries>
#
# Please use the -h or --help options for more info about the command line
# arguments that this script accepts.
#
# This script supports Python 3.
import os.path
import shutil
import sys
from optparse import OptionParser
# Parses the command line options.
parser = OptionParser()
parser.add_option("-r", "--root-dir",
dest="root_dir", default=os.getcwd(),
help="The root path of the Sharpmake source code.",
metavar="DIR")
parser.add_option("-t", "--target-dir",
dest="target_dir", default="Binaries",
help="The directory where to deploy the files.",
metavar="DIR")
parser.add_option("-c", "--configuration",
dest="config", default="release",
help="Select the configuration to deploy to. (Debug or Release) The default is Release.",
metavar="CONFIG")
parser.add_option("-d", "--deploy-pdb",
dest="deploy_pdb", default=False, action="store_true",
help="Deploy program debug database files (.PDB) along with the binaries.")
parser.add_option("-x", "--deploy-xmldoc",
dest="deploy_xmldoc", default=False, action="store_true",
help="Deploy XML API documentation along with the binaries.")
parser.add_option("-a", "--deploy-all",
dest="deploy_all", default=False, action="store_true",
help="Deploy all files that come with the binaries.")
(options, args) = parser.parse_args()
root_dir = options.root_dir
target_dir = os.path.join(root_dir, options.target_dir)
deploy_pdb = options.deploy_pdb or options.deploy_all
deploy_xmldoc = options.deploy_xmldoc or options.deploy_all
config = options.config
# Validate the configuration.
if config.upper() == "RELEASE":
config = "Release"
elif config.upper() == "DEBUG":
config = "Debug"
else:
print("Unknown configuration: {}".format(config))
sys.exit(-1)
# Check if there are actual DLLs to copy, otherwise it must be compiled in VS.
if not os.path.isfile(os.path.join(root_dir, "Sharpmake/bin/{}/Sharpmake.dll".format(config))):
print("Please build Sharpmake in it's {} configuration.".format(config))
sys.exit(1)
# If the directory exists, make sure that it is empty.
if not os.path.isdir(target_dir):
os.mkdir(target_dir)
# Simple wrapper class that represents an output folder,
# ie: Sharpmake/bin/Release
class BinarySite:
def __init__(self, name, path):
self.name = name
self.path = os.path.join(root_dir, path)
def copy_file(self, src):
if os.path.isfile(src):
print("Copying {} to {}".format(os.path.join(root_dir, src), target_dir))
shutil.copy2(src, target_dir)
def copy(self):
# Copy the DLL.
dll_path = os.path.join(self.path, "bin", config, self.name + ".dll")
if os.path.isfile(dll_path):
self.copy_file(dll_path)
# Copy the executable.
exe_path = os.path.join(self.path, "bin", config, self.name + ".exe")
if os.path.isfile(exe_path):
self.copy_file(exe_path)
# Copy the program debug database if it exists.
if deploy_pdb:
self.copy_file(os.path.join(self.path, "bin", config, self.name + ".pdb"))
# Copy the XML API doc if it exists.
if deploy_xmldoc:
self.copy_file(os.path.join(self.path, "bin", config, self.name + ".xml"))
def __str__(self):
return "{} ({})".format(self.name, self.path)
# The list of files to copy. We omit the extension because we want to try to
# copy more files than just the DLL.
copy_list = [
BinarySite("Sharpmake", "Sharpmake"),
BinarySite("Sharpmake.Application", "Sharpmake.Application"),
BinarySite("Sharpmake.Generators", "Sharpmake.Generators"),
BinarySite("SimpleNuGet", "SimpleNuGet")
]
# Add the platforms to the list of files to copy.
if os.path.isdir("Sharpmake.Platforms"):
for platform_dir in os.listdir("Sharpmake.Platforms"):
name = platform_dir
path = os.path.join("Sharpmake.Platforms", platform_dir)
site = BinarySite(name, path)
copy_list.append(site)
# Finally, do the copying.
for site in copy_list:
site.copy()
|
import reapy_boost
if reapy_boost.is_inside_reaper():
from reaper_python import *
# Generated for ReaImGui v0.5.9
|
# Probability of a segmentation =
# Probability(first word) * Probability(rest)
# Best segmentation =
# one with highest probability
# Probability(word)
# estimated by counting
# Eg. Best segmentation("nowisthetime...")
# Pf("n") * Pr("owisthetime...") = .003% * 10^-30% = 10^-34%
# Pf("no") * Pr("wisthetime...") = .26% * 10^-26% = 10^-29%
# Pf("now") * Pr("isthetime...") = .23% * 10^-21% = 10^-24%
# Pf("nowi") * Pr("sthetime...") = 10^-7% * 10^-21% = 10^-30%
# ...
from utils import Pw, product, memo
def splits(characters, longest=12):
"All ways to split chars into a first word and remainder"
return [(characters[:i], characters[i:])
for i in range(1, 1+min(longest, len(characters)))]
def Pwords(words): return product(words, key=Pw)
@memo
def segment(text):
"Best segmentation of text into words, by probability."
return [] if (text =="") else (
max([[first]+segment(rest) for first, rest in splits(text)],
key=Pwords)) |
import pyblish.api
class IntegrateAutoRigPublishing(pyblish.api.InstancePlugin):
"""Auto publishing rig when model being published"""
label = "Integrate Auto Rig Publishing"
# This plugin must runs after disk and database integration
order = pyblish.api.IntegratorOrder + 0.499
hosts = ["maya"]
families = ["reveries.model"]
def process(self, instance):
import os
import re
import sys
import json
import subprocess
from avalon import io
asset_doc = instance.data["assetDoc"]
asset_name = asset_doc["name"]
# Check asset's rigging task option
value_path = "taskOptions.rigging.autoModelUpdate.value"
value = asset_doc["data"]
for entry in value_path.split("."):
value = value.get(entry, {})
if not value:
# Auto model update not enabled
return
# Get subset, version documents from instance which just been
# integrated.
model_subset, model_version, _ = instance.data["toDatabase"]
if model_version["name"] == 1:
# First version of model, must not have dependent rig.
return
# Find all previous versions of model, only document id is needed.
previous = io.find({"type": "version",
"parent": model_subset["_id"]},
sort=[("name", -1)],
projection={"_id": True},
skip=1) # Skip the latest
previous = set([str(p["_id"]) for p in previous])
if not previous:
self.log.warning("Model is now on version %d but has no previous, "
"skip updating rig." % model_version["name"])
return
# Any latest version of rig may not be using the latest model, so
# we iterate through all rig subsets' latest version and compare
# the dependency data with all previous model versions to find the
# dependent.
dependent_rigs = dict()
for rig_subset in io.find({"type": "subset",
"parent": asset_doc["_id"],
"name": re.compile("rig*")},
projection={"_id": True, "name": True}):
latest_rig = io.find_one({"type": "version",
"parent": rig_subset["_id"]},
sort=[("name", -1)],
projection={"data.dependencies": True})
if latest_rig is None:
# Not likely to happen, but just in case
continue
# Consider dependent if any dependency matched in model versions
dependencies = set(latest_rig["data"]["dependencies"].keys())
if dependencies.intersection(previous):
dependent_rigs[str(latest_rig["_id"])] = rig_subset["name"]
if not dependent_rigs:
self.log.info("No rig to update, skip auto process.")
return
# Submit subprocess
mayapy_exe = os.path.join(os.path.dirname(sys.executable),
"mayapy.exe")
cmd = [
mayapy_exe,
__file__,
"asset_name={}".format(str(asset_name)),
"model_subset={}".format(str(model_subset["name"])),
"rig_versions={}".format(json.dumps(dependent_rigs)),
]
print("auto rig cmd: {}".format(cmd))
try:
out_bytes = subprocess.check_output(cmd, shell=True)
except subprocess.CalledProcessError:
# Mark failed for future debug.
io.update_many({"_id": model_version["_id"]},
{"$set": {"data.rigAutoUpdateFailed": True}})
raise Exception("Model publish success but Rig auto update "
"failed. Please inform rigger or TD.")
else:
print(out_bytes)
class LauncherAutoPublish(object):
def __init__(self):
import sys
import json
kwargs = {}
for _arg in sys.argv[1:]:
_args_data = _arg.split("=")
kwargs[_args_data[0]] = _args_data[1]
self.asset_name = kwargs.get("asset_name", "")
self.model_subset = kwargs.get("model_subset", "")
self.rig_versions = json.loads(kwargs.get("rig_versions", "{}"))
self.contexts = list()
def run(self):
from avalon import api, io
import maya.standalone as standalone
import pyblish.util
standalone.initialize(name="python")
# Get project root path and rig source files.
jobs = dict()
root = api.registered_root()
for rig_version, rig_subset in self.rig_versions.items():
version_id = io.ObjectId(rig_version)
latest_ver = io.find_one({"type": "version", "_id": version_id})
rig_source = latest_ver["data"]["source"].format(root=root)
rig_source = rig_source.replace("\\", "/")
if rig_source not in jobs:
jobs[rig_source] = list()
# One source scene may contains multiple rig subsets.
jobs[rig_source].append(rig_subset)
# Run publish process, till extraction
for source, rig_subsets in jobs.items():
self._publish(source, rig_subsets)
# Run final integration only if all extraction succeed
for context in self.contexts:
context.data["_autoPublishingSkipUnlock"] = True
pyblish.util.integrate(context=context)
standalone.uninitialize()
# Bye
def _publish(self, rig_source, rig_subsets):
import os
import re
import pyblish.util
import maya.cmds as cmds
from avalon import api
from reveries.maya import lib
# Switch task
api.update_current_task(task="rigging", asset=self.asset_name)
# Open rig source file
cmds.file(rig_source, open=True, force=True)
# Update all loaded model which subset name has matched
_updated = False
host = api.registered_host()
for _container in host.ls():
if _container["name"] == self.model_subset:
api.update(_container)
_updated = True
if not _updated:
# Not likely to happen, but just in case
raise Exception("No matched model subset, this is a bug.")
# Config rig instances' activities
# Activate rig instances that need to be published, and deactivate
# the rest.
for instance_set in lib.lsAttr("id", "pyblish.avalon.instance"):
active = cmds.getAttr(instance_set + ".subset") in rig_subsets
cmds.setAttr(instance_set + ".active", active)
# Save as file
_tmp_dir = os.path.join(os.path.dirname(rig_source), "_auto_update")
if not os.path.exists(_tmp_dir):
os.mkdir(_tmp_dir)
os.chmod(_tmp_dir, 777)
# Compose a good file name
basename, ext = os.path.splitext(os.path.basename(rig_source))
if "auto_model_update" not in basename:
_new_fname = "{}.auto_model_update.001{}".format(basename, ext)
else:
current_v = re.findall(".auto_model_update.(\\d+).", rig_source)[0]
new_v = "{:03d}".format(int(current_v) + 1)
_new_fname = "{}{}".format(basename, ext)
_new_fname = _new_fname.replace(".{}.published.".format(current_v),
".{}.".format(new_v))
_save_to = os.path.join(_tmp_dir, _new_fname)
cmds.file(rename=_save_to)
cmds.file(force=True, save=True)
print("Saved to : {}".format(_save_to))
# Publish
pyblish.api.register_target("localhost")
# Fix AvalonUUID before validate
ValidateAvalonUUID = next(p for p in pyblish.api.discover()
if p.__name__ == "ValidateAvalonUUID")
for instance in pyblish.util.collect():
try:
ValidateAvalonUUID.fix_invalid_missing(instance)
except Exception as e:
print("Fix uuid failed: {}.".format(e))
context = pyblish.util.collect()
context.data["comment"] = "Auto update model to latest version."
context = pyblish.util.validate(context=context)
context = pyblish.util.extract(context=context)
if not all(result["success"] for result in context.data["results"]):
raise RuntimeError("Atomicity not held, aborting.")
# Will run integration later..
self.contexts.append(context)
if __name__ == "__main__":
auto_publish = LauncherAutoPublish()
auto_publish.run()
|
import requests
from requests.exceptions import RequestException
from service_vncorenlp.config_vncorenlp import HOST, PORT, ANNOTATOR
import os
class VnCoreNLP():
def __init__(self):
self.url = f"http://{HOST}:{PORT}"
self.timeout = 30
self.annotators = set(ANNOTATOR.split(","))
if not self.is_alive():
raise ConnectionError("Run start_vncorenlp.sh to enable service VnCoreNLP")
def is_alive(self):
# Check if the server is alive
try:
response = requests.get(self.url, timeout=self.timeout)
return response.ok
except RequestException:
pass
return False
def __get_annotators(self):
# Get list of annotators from the server
response = requests.get(self.url + '/annotators', timeout=self.timeout)
response.raise_for_status()
return response.json()
def annotate(self, text, annotators=None):
if isinstance(annotators, str):
assert self.annotators.issuperset(annotators.split(
',')), 'Please ensure that the annotators "%s" are being used on the server.' % annotators
data = {
'text': text.encode('UTF-8'),
'props': annotators
}
response = requests.post(self.url + '/handle', data=data, timeout=self.timeout)
response.raise_for_status()
response = response.json()
while not response['status']:
response = requests.post(self.url + '/handle', data=data, timeout=self.timeout)
response.raise_for_status()
response = response.json()
assert response['status'], response['error']
del response['status']
return response
def tokenize(self, text):
sentences = self.annotate(text, annotators='wseg')['sentences']
return [[w['form'] for w in s] for s in sentences]
def pos_tag(self, text):
sentences = self.annotate(text, annotators='wseg,pos')['sentences']
return [[(w['form'], w['posTag']) for w in s] for s in sentences]
def ner(self, text):
sentences = self.annotate(text, annotators='wseg,pos,ner')['sentences']
return [[(w['form'], w['nerLabel']) for w in s] for s in sentences]
def dep_parse(self, text):
sentences = self.annotate(text, annotators='wseg,pos,ner,parse')['sentences']
# dep, governor, dependent
return [[(w['depLabel'], w['head'], w['index']) for w in s] for s in sentences]
def detect_language(self, text):
return self.annotate(text, annotators='lang')['language']
|
"""
Code for preprocessing streamflow data.
"""
import collections
import os
from pathlib import Path
from shapely.geometry import Point
import numpy as np
import pandas as pd
from gisutils import shp2df, df2shp, project, get_values_at_points
from mfsetup.obs import make_obsname
from mfsetup.units import convert_volume_units, convert_time_units
from mapgwm.utils import makedirs, assign_geographic_obsgroups, cull_data_to_active_area
def format_site_ids(iterable, add_leading_zeros=False):
"""Cast site ids to strings"""
str_ids = []
for id in iterable:
if add_leading_zeros:
str_ids.append(format_usgs_sw_site_id(id))
else:
str_ids.append(str(id))
return str_ids
def format_usgs_sw_site_id(stationID):
"""Add leading zeros to NWIS surface water sites, if they are missing.
See https://help.waterdata.usgs.gov/faq/sites/do-station-numbers-have-any-particular-meaning.
Zeros are only added to numeric site numbers less than 15 characters in length.
"""
if not str(stationID).startswith('0') and str(stationID).isdigit() and \
0 < int(str(stationID)[0]) < 10 and len(str(stationID)) < 15:
return '0{}'.format(stationID)
return str(stationID)
def preprocess_flows(data, metadata=None, flow_data_columns=['flow'],
start_date=None, active_area=None,
active_area_id_column=None,
active_area_feature_id=None,
source_crs=4269, dest_crs=5070,
datetime_col='datetime',
site_no_col='site_no',
line_id_col='line_id',
x_coord_col='x',
y_coord_col='y',
name_col='name',
flow_qualifier_column=None,
default_qualifier='measured',
include_sites=None,
include_line_ids=None,
source_volume_units='ft3',
source_time_units='s',
dest_volume_units='m3',
dest_time_units='d',
geographic_groups=None,
geographic_groups_col=None,
max_obsname_len=None,
add_leading_zeros_to_sw_site_nos=False,
column_renames=None,
outfile=None,
):
"""Preprocess stream flow observation data, for example, from NWIS or another data source that
outputs time series in CSV format with site locations and identifiers.
* Data are reprojected from a `source_crs` (Coordinate reference system; assumed to be in geographic coordinates)
to the CRS of the model (`dest_crs`)
* Data are culled to a `start_date` and optionally, a polygon or set of polygons defining the model area
* length and time units are converted to those of the groundwater model.
* Prefixes for observation names (with an optional length limit) that identify the location are generated
* Preliminary observation groups can also be assigned, based on geographic areas defined by polygons
(`geographic_groups` parameter)
Parameters
----------
data : csv file or DataFrame
Time series of stream flow observations.
Columns:
===================== ======================================
site_no site identifier
datetime measurement dates/times
x x-coordinate of site
y y-coordinate of site
flow_data_columns Columns of observed streamflow values
flow_qualifier_column Optional column with qualifiers for flow values
===================== ======================================
Notes:
* x and y columns can alternatively be in the metadata table
* flow_data_columns are denoted in `flow_data_columns`; multiple
columns can be included to process base flow and total flow, or
other statistics in tandem
* For example, `flow_qualifier_column` may have "estimated" or "measured"
flags denoting whether streamflows were derived from measured values
or statistical estimates.
metadata : csv file or DataFrame
Stream flow observation site information.
May include columns:
================= ================================================================================
site_no site identifier
x x-coordinate of site
y y-coordinate of site
name name of site
line_id_col Identifier for a line in a hydrography dataset that the site is associated with.
================= ================================================================================
Notes:
* other columns in metadata will be passed through to the metadata output
flow_data_columns : list of strings
Columns in data with flow values or their statistics.
By default, ['q_cfs']
start_date : str (YYYY-mm-dd)
Simulation start date (cull observations before this date)
active_area : str
Shapefile with polygon to cull observations to. Automatically reprojected
to dest_crs if the shapefile includes a .prj file.
by default, None.
active_area_id_column : str, optional
Column in active_area with feature ids.
By default, None, in which case all features are used.
active_area_feature_id : str, optional
ID of feature to use for active area
By default, None, in which case all features are used.
source_crs : obj
Coordinate reference system of the head observation locations.
A Python int, dict, str, or :class:`pyproj.crs.CRS` instance
passed to :meth:`pyproj.crs.CRS.from_user_input`
Can be any of:
- PROJ string
- Dictionary of PROJ parameters
- PROJ keyword arguments for parameters
- JSON string with PROJ parameters
- CRS WKT string
- An authority string [i.e. 'epsg:4326']
- An EPSG integer code [i.e. 4326]
- A tuple of ("auth_name": "auth_code") [i.e ('epsg', '4326')]
- An object with a `to_wkt` method.
- A :class:`pyproj.crs.CRS` class
By default, epsg:4269
dest_crs : obj
Coordinate reference system of the model. Same input types
as ``source_crs``.
By default, epsg:5070
datetime_col : str, optional
Column name in data with observation date/times,
by default 'datetime'
site_no_col : str, optional
Column name in data and metadata with site identifiers,
by default 'site_no'
line_id_col : str, optional
Column name in data or metadata with identifiers for
hydrography lines associated with observation sites.
by default 'line_id'
x_coord_col : str, optional
Column name in data or metadata with x-coordinates,
by default 'x'
y_coord_col : str, optional
Column name in data or metadata with y-coordinates,
by default 'y'
name_col : str, optional
Column name in data or metadata with observation site names,
by default 'name'
flow_qualifier_column : str, optional
Column name in data with flow observation qualifiers, such
as "measured" or "estimated"
by default 'category'
default_qualifier : str, optional
Default qualifier to populate flow_qualifier_column if it
is None. By default, "measured"
include_sites : list-like, optional
Exclude output to these sites.
by default, None (include all sites)
include_line_ids : list-like, optional
Exclude output to these sites, represented by line identifiers.
by default, None (include all sites)
source_volume_units : str, 'm3', 'cubic meters', 'ft3', etc.
Volume units of the source data. By default, 'ft3'
source_time_units : str, 's', 'seconds', 'days', etc.
Time units of the source data. By default, 's'
dest_volume_units : str, 'm3', 'cubic meters', 'ft3', etc.
Volume units of the output (model). By default, 'm3'
dest_time_units : str, 's', 'seconds', 'days', etc.
Time units of the output (model). By default, 'd'
geographic_groups : file, dict or list-like
Option to group observations by area(s) of interest. Can
be a shapefile, list of shapefiles, or dictionary of shapely polygons.
A 'group' column will be created in the metadata, and observation
sites within each polygon will be assigned the group name
associated with that polygon.
For example::
geographic_groups='../source_data/extents/CompositeHydrographArea.shp'
geographic_groups=['../source_data/extents/CompositeHydrographArea.shp']
geographic_groups={'cha': <shapely Polygon>}
Where 'cha' is an observation group name for observations located within the
the area defined by CompositeHydrographArea.shp. For shapefiles,
group names are provided in a `geographic_groups_col`.
geographic_groups_col : str
Field name in the `geographic_groups` shapefile(s) containing the
observation group names associated with each polygon.
max_obsname_len : int or None
Maximum length for observation name prefix. Default of 13
allows for a PEST obsnme of 20 characters or less with
<prefix>_yyyydd or <prefix>_<per>d<per>
(e.g. <prefix>_2d1 for a difference between stress periods 2 and 1)
If None, observation names will not be truncated. PEST++ does not have
a limit on observation name length.
add_leading_zeros_to_sw_site_nos : bool
Whether or not to pad site numbers using the
:func:~`mapgwm.swflows.format_usgs_sw_site_id` function.
By default, False.
column_renames : dict, optional
Option to rename columns in the data or metadata that are different than those listed above.
For example, if the data file has a 'SITE_NO' column instead of 'SITE_BADGE'::
column_renames={'SITE_NO': 'site_no'}
by default None, in which case the renames listed above will be used.
Note that the renames must be the same as those listed above for
:func:`mapgwm.swflows.preprocess_flows` to work.
outfile : str
Where output file will be written. Metadata are written to a file
with the same name, with an additional "_info" suffix prior to
the file extension.
Returns
-------
data : DataFrame
Preprocessed time series
metadata : DataFrame
Preprocessed metadata
References
----------
`The PEST++ Manual <https://github.com/usgs/pestpp/tree/master/documentation>`
Notes
-----
"""
# outputs
if outfile is not None:
outpath, filename = os.path.split(outfile)
makedirs(outpath)
outname, ext = os.path.splitext(outfile)
out_info_csvfile = outname + '_info.csv'
out_data_csvfile = outfile
out_shapefile = outname + '_info.shp'
# read the source data
if not isinstance(data, pd.DataFrame):
df = pd.read_csv(data, dtype={site_no_col: object})
else:
df = data.copy()
# check the columns
for col in [datetime_col] + flow_data_columns:
assert col in df.columns, "Column {} not found in {}".format(col,
data)
assert any({site_no_col, line_id_col}.intersection(df.columns)), \
"Neither {} or {} found in {}. Need to specify a site_no_col or line_id_col".format(site_no_col,
line_id_col, data)
# rename input columns to these names,
# for consistent output
dest_columns = {datetime_col: 'datetime',
site_no_col: 'site_no',
line_id_col: 'line_id',
x_coord_col: 'x',
y_coord_col: 'y',
name_col: 'name',
flow_qualifier_column: 'category'
}
# update the default column renames
# with any supplied via column_renames parameter
if isinstance(column_renames, collections.Mapping):
dest_columns.update(column_renames)
df.rename(columns=dest_columns, inplace=True)
flow_data_columns = [c if c not in dest_columns else dest_columns[c]
for c in flow_data_columns]
# convert site numbers to strings;
# add leading 0s to any USGS sites that should have them
if 'site_no' in df.columns:
df['site_no'] = format_site_ids(df['site_no'], add_leading_zeros_to_sw_site_nos)
else:
df['site_no'] = df[line_id_col]
# read the source data
if metadata is not None:
if not isinstance(metadata, pd.DataFrame):
md = pd.read_csv(metadata, dtype={site_no_col: object})
else:
md = metadata.copy()
if site_no_col not in md.columns or 'site_no' not in df.columns:
raise IndexError('If metadata are supplied, both data and metadata must '
'have a site_no column.')
md.rename(columns=dest_columns, inplace=True)
md['site_no'] = format_site_ids(md['site_no'], add_leading_zeros_to_sw_site_nos)
md.index = md['site_no']
by_site = df.groupby('site_no')
md['start_dt'] = pd.DataFrame(by_site['datetime'].first())
else:
by_site = df.groupby('site_no')
md = pd.DataFrame(by_site['datetime'].first())
md.columns = ['start_dt']
md['site_no'] = md.index
md['end_dt'] = pd.DataFrame(by_site['datetime'].last())
md['n'] = pd.DataFrame(by_site['datetime'].count())
md.reset_index(inplace=True, drop=True)
# assign metadata if supplied
for col in 'x', 'y', 'line_id', 'name':
if col in df.columns and col not in md.columns:
by_site_no = dict(zip(df['site_no'], df[col]))
md[col] = [by_site_no[sn] for sn in md['site_no']]
if col != 'line_id':
df.drop(col, axis=1, inplace=True)
# index the dataframe to times;
# truncate data before start date
df.index = pd.to_datetime(df['datetime'])
df.index.name = 'datetime'
df = df.loc[start_date:].copy()
# project x, y to model crs
x_pr, y_pr = project((md.x.values, md.y.values), source_crs, dest_crs)
md['x'], md['y'] = x_pr, y_pr
md['geometry'] = [Point(x, y) for x, y in zip(x_pr, y_pr)]
# cull data to that within the model area
if active_area is not None:
df, md = cull_data_to_active_area(df, active_area,
active_area_id_column,
active_area_feature_id,
data_crs=dest_crs, metadata=md)
# get the hydrography IDs corresponding to each site
# using the included lookup table
#if 'line_id' not in df.columns:
# assert line_id_lookup is not None, \
# "need to include line_ids in a column, or line_id_lookup dictionary mapping line_ids to site numbers"
# df = df.loc[df['site_no'].isin(line_id_lookup)].copy()
# df['line_id'] = [line_id_lookup[sn] for sn in df['site_no']]
if include_sites is not None:
md = md.loc[md.site_no.isin(include_sites)]
df = df.loc[df.site_no.isin(include_sites)]
if include_line_ids is not None:
md = md.loc[md.line_id.isin(include_line_ids)]
df = df.loc[df.line_id.isin(include_line_ids)]
# convert units
# ensure that flow values are numeric (may be objects if taken directly from NWIS)
unit_conversion = (convert_volume_units(source_volume_units, dest_volume_units) /
convert_time_units(source_time_units, dest_time_units))
for flow_col in flow_data_columns:
df[flow_col] = pd.to_numeric(df[flow_col], errors='coerce') * unit_conversion
df.dropna(subset=flow_data_columns, axis=0, inplace=True)
# reformat qualifiers for consistent output
# (lump to dest category columns of either estimated or measured)
# with measured including values derived from baseflow separation or actual measurements)
# output column name for flow qualifier column:
dest_flow_qualifier_column = 'category'
if flow_qualifier_column is not None:
flow_qualifiers = {'calculated': 'measured', # 'measured',
'base flow separated from measured values': 'measured', # 'measured',
'measured total flow': 'measured',
'estimated gaged': 'estimated',
'estimated ungaged': 'estimated'}
df[dest_flow_qualifier_column] = df[flow_qualifier_column].replace(flow_qualifiers)
else:
df['category'] = default_qualifier
# make unique n-character prefixes (site identifiers) for each observation location
# 13 character length allows for prefix_yyyymmm in 20 character observation names
# (BeoPEST limit)
unique_obsnames = set()
obsnames = []
for sn in md['site_no'].tolist():
if max_obsname_len is not None:
name = make_obsname(sn, unique_names=unique_obsnames,
maxlen=max_obsname_len)
assert name not in unique_obsnames
else:
name = sn
unique_obsnames.add(name)
obsnames.append(name)
md['obsprefix'] = obsnames
# add area of interest information
md['group'] = 'fluxes'
md = assign_geographic_obsgroups(md, geographic_groups,
geographic_groups_col,
metadata_crs=dest_crs)
# data columns
data_cols = ['site_no', 'line_id', 'datetime'] + flow_data_columns + ['category']
#if 'line_id' in md.columns and 'line_id' not in df.columns:
# # only map line_ids to data if there are more site numbers
# # implying that no site number maps to more than one line_id
# if len(set(df.site_no)) >= len(set(df.line_id)):
# ids = dict(zip(md['site_no'], md['line_id']))
# df['line_id'] = [ids[sn] for sn in df['site_no']]
data_cols = [c for c in data_cols if c in df.columns]
df = df[data_cols]
md.index = md['site_no']
# save out the results
if outfile is not None:
df2shp(md.drop(['x', 'y'], axis=1),
out_shapefile, crs=dest_crs)
print('writing {}'.format(out_info_csvfile))
md.drop('geometry', axis=1).to_csv(out_info_csvfile, index=False, float_format='%g')
print('writing {}'.format(out_data_csvfile))
df.to_csv(out_data_csvfile, index=False, float_format='%g')
return df, md
def combine_measured_estimated_values(measured_values, estimated_values,
measured_values_data_col, estimated_values_data_col,
dest_values_col='obsval',
resample_freq='MS', how='mean'):
"""Combine time series of measured and estimated values for multiple sites,
giving preference to measured values.
Parameters
----------
measured_values : csv file or DataFrame
Time series of measured values at multiple sites, similar to that
output by :func:`~mapgwm.swflows.preprocess_flows`.
Columns:
===================== ===========================================
site_no site identifiers; read-in as strings
datetime measurement dates/times
data columns columns with floating-point data to combine
===================== ===========================================
estimated_values : csv file or DataFrame
Time series of measured values at multiple sites, similar to that
output by :func:`~mapgwm.swflows.preprocess_flows`.
Columns:
===================== ===========================================
site_no site identifiers; read-in as strings
datetime measurement dates/times
data columns columns with floating-point data to combine
===================== ===========================================
measured_values_data_col : str
Column in `measured_values` with data to combine.
estimated_values_data_col : str
Column in `estimated_values` with data to combine.
dest_values_col : str
Output column with combined data from `measured_values_data_col`
and estimated_values_data_col, by default 'obsval'
resample_freq : str or DateOffset
Any `pandas frequency alias <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#timeseries-offset-aliases>`_
The data columns in `measured_values` and `estimated_values` are resampled
to this fequency using the method specified by ``how``.
By default, 'MS' (month-start)
how : str
Resample method. Can be any of the method calls on the pandas
`Resampler <https://pandas.pydata.org/pandas-docs/stable/reference/resampling.html>`_
object. By default, 'mean'
Returns
-------
combined : DataFrame
DataFrame containing all columns from `estimated_values`, the data columns
from `measured_values`, and a `dest_values_col` consisting of measured values where
present, and estimated values otherwise. An ``"est_"`` prefix is added to the
estimated data columns, and a ``"meas"`` prefix is added to the measured data columns.
Example:
======== ========== ========= ============== ============== ============
site_no datetime category est_qbase_m3d meas_qbase_m3d obsval
======== ========== ========= ============== ============== ============
07288000 2017-10-01 measured 47872.1 28438.7 28438.7
07288000 2017-11-01 measured 47675.9 24484.5 24484.5
======== ========== ========= ============== ============== ============
Where ``category`` denotes whether the value in obsval is measured or estimated.
Notes
-----
All columns with a floating-point dtype are identified as "Data columns," and
are resampled as specified by the `resample_freq` and `how` arguments. For all other
columns, the first value for each time at each site is used. The resampled measured
data are joined to the resampled estimated data on the basis of site numbers
and times (as a pandas `MultiIndex`).
"""
# read in the data
if not isinstance(measured_values, pd.DataFrame):
measured = pd.read_csv(measured_values, dtype={'site_no': object})
measured['datetime'] = pd.to_datetime(measured['datetime'])
measured.index = measured['datetime']
else:
measured = measured_values.copy()
if not isinstance(estimated_values, pd.DataFrame):
df = pd.read_csv(estimated_values, dtype={'site_no': object})
df['datetime'] = pd.to_datetime(df['datetime'])
df.index = df['datetime']
else:
df = estimated_values.copy()
# resample both timeseries to the resample_freq
df_rs = resample_group_timeseries(df, resample_freq=resample_freq, how=how,
add_data_prefix='est')
df_rs['category'] = 'estimated'
measured_rs = resample_group_timeseries(measured, resample_freq=resample_freq, how=how,
add_data_prefix='meas')
# fill any nan values created in resampling with 'measured' classifier
measured_rs['category'] = 'measured'
# add the measured values to the estimated
measured_data_columns = measured_rs.select_dtypes(include=[np.float]).columns
# join the data columns
combined = df_rs.join(measured_rs[measured_data_columns], how='outer')
# update the site_no, datetime and category columns from measured
# (that were not filled in outer join)
combined.update(measured_rs)
# make the obsval column, starting with the estimated values
combined[dest_values_col] = combined['est_' + estimated_values_data_col]
# prefix was added to measured data column by resample_group_timeseries
measured_values_data_col = 'meas_' + measured_values_data_col
# populate obsval column with all measured values
# (over-writing any pre-existing estimated values)
has_measured = ~combined[measured_values_data_col].isna()
combined.loc[has_measured, dest_values_col] = combined.loc[has_measured, measured_values_data_col]
# verify that only nan values in obsval column are for measured sites
# (that don't have estimated values)
assert np.all(combined.loc[combined.obsval.isna(), 'category'] == 'measured')
# drop observations with no measured or estimated values
combined.dropna(subset=['obsval'], axis=0, inplace=True)
return combined.reset_index(drop=True)
def resample_group_timeseries(df, resample_freq='MS', how='mean',
add_data_prefix=None):
"""Resample a DataFrame with both groups (e.g. measurement sites)
and time series (measurements at each site).
Parameters
----------
df : DataFrame
Time series of values at multiple sites, similar to that
output by :func:`~mapgwm.swflows.preprocess_flows`.
Columns:
===================== ===========================================
site_no site identifiers; read-in as strings
datetime measurement dates/times
data columns columns with floating-point data to resample
===================== ===========================================
resample_freq : str or DateOffset
Any `pandas frequency alias <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#timeseries-offset-aliases>`_
The data columns in `measured_values` and `estimated_values` are resampled
to this fequency using the method specified by ``how``.
By default, 'MS' (month-start)
how : str
Resample method. Can be any of the method calls on the pandas
`Resampler <https://pandas.pydata.org/pandas-docs/stable/reference/resampling.html>`_
object. By default, 'mean'
add_data_prefix : str
Option to add prefix to data columns. By default, None
Returns
-------
resampled : DataFrame
Resampled data at each site.
Notes
-----
All columns with a floating-point dtype are identified as "Data columns," and
are resampled as specified by the `resample_freq` and `how` arguments. For all other
columns, the first value for each time at each site is used.
"""
if not isinstance(df.index, pd.DatetimeIndex):
raise ValueError("Input must have a DatetimeIndex.")
df_resampler = df.groupby('site_no').resample(resample_freq)
df_rs = df_resampler.first().copy()
# resample the data columns
data_columns = df_rs.select_dtypes(include=[np.float]).columns
non_data_columns = [c for c in df.columns if c not in data_columns]
resampled_values = getattr(df_resampler[data_columns], how)()
# put the non-data and data columns back together
df_rs = df_rs[non_data_columns].join(resampled_values)
# add 'est' suffix to column names
if add_data_prefix is not None:
for col in data_columns:
df_rs[add_data_prefix + '_' + col] = df_rs[col]
df_rs.drop(col, axis=1, inplace=True)
# index dates may vary depending on freq (e.g. 'MS' vs 'M')
df_rs['site_no'] = df_rs.index.get_level_values(0)
df_rs['datetime'] = df_rs.index.get_level_values(1)
return df_rs
def aggregrate_values_to_stress_periods(data, perioddata,
datetime_col='datetime',
values_col='values',
id_col='id',
category_col='qualifier',
keep_columns=None):
"""Pandas sausage-making to take flow values at arbitrary times,
and average them to model stress periods defined in a perioddata dataframe.
Optionally, a category column identifying flow values as 'measured' or
'estimated' can also be read. Measured values are used for the averages
where available; the number of measured and estimated values contributing
to each average are tallied in the output.
Parameters
----------
data : DataFrame
Input data
perioddata : DataFrame
Stress Period start/end times. Must have columns:
============== ================= ========================
per int MODFLOW Stress Period
start_datetime str or datetime64 Period start time
end_datetime str or datetime64 Period end time
============== ================= ========================
datetime_col : str
Column in data for Measurement dates (str or datetime64)
id_col: int or str
Column in data identifying the hydrography line or measurement site
associated with flow value. Valid identifiers corresponding to the source
hydrography (e.g. NHDPlus COMIDs) may be needed for locating the flows
within the stream network, unless x and y coordinates are available.
values_col : float
Column in data with observed or estimated values.
category_col : str; categorical
Column in data with 'measured' or 'estimated' flags indicating how each flow
value was derived. If None, 'measured' is used for all flows. By default, 'qualifier'.
site_no_col : str
Optional column in data identifying the measurement site associated with each value,
for example, for keeping track of measurement site numbers or names that are different
than the hydrography identifiers in line_id_col.
Returns
-------
df_per : DataFrame
Stress period averages, and metadata describing the source of the
averages (number of estimated vs. measured values). For each site,
also includes averages and standard deviations for measurements
from outside the stress periods defined in perioddata. For sites
with no measurements within a stress period, an average of all
other measurements is used.
Notes
-----
This method is similar to mfsetup.tdis.aggregate_dataframe_to_stress_period in
what it does (resample time series to model stress periods), and for modflow-setup,
has is superceded by that method (called via TransientTabularData object). Keeping this
method here though, in case we need to use it in the future.
Key differences between two methods:
* this method operates on the whole timeseres of model stress periods instead of a single stress period
* this method allows for specification of both measured and estimated values; the number of estimated and measured values
contributing to each average are included in the output
* duplicate sites in mfsetup.tdis.aggregate_dataframe_to_stress_period (for example,
to handle wells located in the same model cell) can be aggregated with sum, mean, first, etc., or
by raising an error. In this method, the duplicate site with the most measurements (vs. estimates) is retained.
* this method fills stress periods without measurements using the mean values for all time.
"""
# make dictionary of data keyed by site
values = data.copy()
# optionally keep track of auxillary data
# (site number, name, etc.)
if keep_columns is not None:
auxillary_info = {}
for col in keep_columns:
auxillary_info[col] = dict(zip(values[id_col], values[col]))
# create category column if there is none, to conform to logic below
if category_col not in values.columns:
values[category_col] = 'measured'
values[datetime_col] = pd.to_datetime(values[datetime_col])
values.index = pd.MultiIndex.from_tuples(list(zip(values[id_col], values[datetime_col], values[category_col])))
values = {s: values.loc[s] for s in values.index.levels[0]}
# Compute average base flow for each site, stress period
# fill missing values using means
# add columns describing source of base flow values
# vs = [] # list of processed monthly baseflow dataframes
vpers = [] # list of processed period baseflow dataframes
for k, v in values.items():
# create single set of values
# Use measured data where available; estimates where unvailable
# Note: unstack will fail if there are two measurement sites on the same line_id
# (with the same dates, which would be duplicates wrt the line_id)
# look at the fraction of values that are estimated;
# keep the site with the most measurements
if np.any(np.any(v.index.duplicated())):
duplicate_sites = np.array(list(set(v.site_no)))
pct_estimated = []
for site_no in duplicate_sites:
site_values = v.loc[v.site_no == site_no]
pct_estimated.append(np.sum(site_values[category_col] == 'estimated') / len(site_values))
inds = np.argsort(pct_estimated)
duplicate_sites = duplicate_sites[inds]
v = v.loc[v.site_no == duplicate_sites[0]].copy()
v = v.drop([datetime_col, category_col], axis=1).unstack(level=1)
if 'measured' in v[values_col].columns:
if len(v[values_col].columns) > 1:
v['picked'] = v[values_col]['measured']
notmeasured = np.isnan(v[values_col]['measured'])
v.loc[notmeasured, 'picked'] = v[values_col]['estimated'].loc[notmeasured]
v['method'] = 'measured'
v.loc[notmeasured, 'method'] = 'estimated'
if np.any(notmeasured):
assert v.loc[notmeasured].method.unique()[0] == 'estimated'
if np.any(~notmeasured):
assert v.loc[~notmeasured].method.unique()[0] == 'measured'
else:
v['picked'] = v[values_col]['measured']
v['method'] = 'measured'
elif 'estimated' in v[values_col].columns:
v['picked'] = v[values_col]['estimated']
v['method'] = 'estimated'
elif 'measured total flow' in v[values_col].columns:
v['picked'] = v[values_col]['measured total flow'] # so that tallies go in "n_measured"
v['method'] = 'measured total flow'
else:
pass
v = v[['picked', 'method']].copy()
v.columns = v.columns.droplevel(level=1)
# Figure out which stress period each discharge value is in
# 999 indicates values outside of the simulation timeframe
v['per'] = -9999
dfs = []
for i, r in perioddata.iterrows():
v.loc[r.start_datetime:r.end_datetime, 'per'] = r.per
# shave end date off dates in period
# (otherwise first month of next period will be included)
per_end_datetime = pd.Timestamp(r.end_datetime) - pd.Timedelta(1)
df = v.loc[r.start_datetime:per_end_datetime].copy()
df['per'] = r.per
dfs.append(df)
dfs.append(v.loc[v.per == -9999]) # keep values outside of the simulation timeframe
v = pd.concat(dfs)
# Tally number of measured vs. estimated values for each SP
vper = v.groupby(['per', 'method']).count().picked.unstack()
vper.rename(columns={'estimated': 'n_estimated',
'measured': 'n_measured'}, inplace=True)
for c in ['n_estimated', 'n_measured']:
if c not in vper.columns:
vper[c] = 0
# compute mean value and stdev for each stress period
vper['Q_avg'] = v.groupby('per').mean()
vper['Q_std'] = v.groupby('per').std()
vper.columns.name = None
# reindex to model times dataframe
# (pandas will fill empty periods with nans)
# use an outer join to retain values outside of the model time period
vper = perioddata.join(vper, how='outer')
# fill in the start/end times for data outside of simulation period
vper.loc[-9999, 'start_datetime'] = v.loc[v.per == -9999].index.min()
vper.loc[-9999, 'end_datetime'] = v.loc[v.per == -9999].index.max()
vper.loc[-9999, 'per'] = -9999
vper.per = vper.per.astype(int)
# nans in n_measured and n_estimated columns mean there were no values
# for that period. Fill with zero, convert columns to ints.
vper.fillna({'n_measured': 0, 'n_estimated': 0}, inplace=True)
vper['n_measured'] = vper.n_measured.astype(int)
vper['n_estimated'] = vper.n_estimated.astype(int)
# fill in the missing data with mean value
vper['filled'] = np.isnan(vper.Q_avg)
vper['Q_avg'] = vper.Q_avg.fillna(vper.Q_avg.mean())
v[id_col] = k
vper[id_col] = k
# map any auxillary info back to output using site numbers
if keep_columns is not None:
for col in keep_columns:
vper[col] = auxillary_info[col][k]
# vs.append(v)
vpers.append(vper)
df_per = pd.concat(vpers, sort=True)
return df_per |
import uuid
import pytest
from fastapi import UploadFile
from jinad.api.endpoints import flow
_temp_id = uuid.uuid1()
def mock_create_success(**kwargs):
return _temp_id, '0.0.0.0', 12345
def mock_flow_creation_exception(**kwargs):
raise flow.FlowCreationException
def mock_flow_parse_exception(**kwargs):
raise flow.FlowYamlParseException
def mock_flow_start_exception(**kwargs):
raise flow.FlowStartException
def mock_fetch_success(**kwargs):
return '0.0.0.0', 12345, '!Flow\npods:\n pod1:\n uses:_pass'
def mock_fetch_exception(**kwargs):
raise KeyError
@pytest.mark.asyncio
async def test_create_from_pods_success(monkeypatch):
monkeypatch.setattr(flow.flow_store, '_create', mock_create_success)
response = await flow._create_from_pods()
assert response['status_code'] == 200
assert response['flow_id'] == _temp_id
assert response['host'] == '0.0.0.0'
assert response['port'] == 12345
assert response['status'] == 'started'
@pytest.mark.asyncio
async def test_create_from_pods_flow_create_exception(monkeypatch):
monkeypatch.setattr(flow.flow_store, '_create', mock_flow_creation_exception)
with pytest.raises(flow.HTTPException) as response:
await flow._create_from_pods()
assert response.value.status_code == 404
assert response.value.detail == 'Bad pods args'
@pytest.mark.asyncio
async def test_create_from_pods_flow_start_exception(monkeypatch):
monkeypatch.setattr(flow.flow_store, '_create', mock_flow_start_exception)
with pytest.raises(flow.HTTPException) as response:
await flow._create_from_pods()
assert response.value.status_code == 404
assert response.value.detail == 'Flow couldn\'t get started'
@pytest.mark.asyncio
async def test_create_from_yaml_success(monkeypatch):
monkeypatch.setattr(flow.flow_store, '_create', mock_create_success)
response = await flow._create_from_yaml(yamlspec=UploadFile(filename='abc.yaml'),
uses_files=[UploadFile(filename='abcd.yaml')],
pymodules_files=[UploadFile(filename='abc.py')])
assert response['status_code'] == 200
assert response['flow_id'] == _temp_id
assert response['host'] == '0.0.0.0'
assert response['port'] == 12345
assert response['status'] == 'started'
@pytest.mark.asyncio
async def test_create_from_yaml_parse_exception(monkeypatch):
monkeypatch.setattr(flow.flow_store, '_create', mock_flow_parse_exception)
with pytest.raises(flow.HTTPException) as response:
await flow._create_from_yaml(yamlspec=UploadFile(filename='abc.yaml'),
uses_files=[UploadFile(filename='abcd.yaml')],
pymodules_files=[UploadFile(filename='abc.py')])
assert response.value.status_code == 404
assert response.value.detail == 'Invalid yaml file.'
@pytest.mark.asyncio
async def test_create_from_yaml_flow_start_exception(monkeypatch):
monkeypatch.setattr(flow.flow_store, '_create', mock_flow_start_exception)
with pytest.raises(flow.HTTPException) as response:
await flow._create_from_yaml(yamlspec=UploadFile(filename='abc.yaml'),
uses_files=[UploadFile(filename='abcd.yaml')],
pymodules_files=[UploadFile(filename='abc.py')])
assert response.value.status_code == 404
assert 'Flow couldn\'t get started' in response.value.detail
@pytest.mark.asyncio
async def test_fetch_flow_success(monkeypatch):
monkeypatch.setattr(flow.flow_store, '_get', mock_fetch_success)
response = await flow._fetch(_temp_id)
assert response['status_code'] == 200
assert response['host'] == '0.0.0.0'
assert response['port'] == 12345
assert response['yaml'] == '!Flow\npods:\n pod1:\n uses:_pass'
@pytest.mark.asyncio
async def test_fetch_flow_success_yaml_only(monkeypatch):
monkeypatch.setattr(flow.flow_store, '_get', mock_fetch_success)
response = await flow._fetch(_temp_id, yaml_only=True)
assert response.status_code == 200
assert response.body == b'!Flow\npods:\n pod1:\n uses:_pass'
assert response.media_type == 'application/yaml'
@pytest.mark.asyncio
async def test_fetch_flow_keyerror(monkeypatch):
monkeypatch.setattr(flow.flow_store, '_get', mock_fetch_exception)
with pytest.raises(flow.HTTPException) as response:
await flow._fetch(_temp_id)
assert response.value.status_code == 404
assert response.value.detail == f'Flow ID {_temp_id} not found! Please create a new Flow'
def mock_ping_exception(**kwargs):
raise flow.GRPCServerError
@pytest.mark.asyncio
@pytest.mark.skip('unblocking jinad tests. will fix in next PR')
async def test_ping_success(monkeypatch, mocker):
response = await flow._ping(host='0.0.0.0', port=12345)
assert response['status_code'] == 200
assert response['detail'] == 'connected'
@pytest.mark.asyncio
@pytest.mark.skip('unblocking jinad tests. will fix in next PR')
async def test_ping_exception(monkeypatch):
monkeypatch.setattr(flow, 'py_client', mock_ping_exception)
with pytest.raises(flow.HTTPException) as response:
await flow._ping(host='0.0.0.0', port=12345)
assert response.value.status_code == 404
assert response.value.detail == 'Cannot connect to GRPC Server on 0.0.0.0:12345'
@pytest.mark.asyncio
async def test_delete_success(monkeypatch):
monkeypatch.setattr(flow.flow_store, '_delete', lambda **kwargs: None)
response = await flow._delete(_temp_id)
assert response['status_code'] == 200
@pytest.mark.asyncio
async def test_delete_exception(monkeypatch):
monkeypatch.setattr(flow.flow_store, '_delete', mock_fetch_exception)
with pytest.raises(flow.HTTPException) as response:
await flow._delete(_temp_id)
assert response.value.status_code == 404
assert response.value.detail == f'Flow ID {_temp_id} not found! Please create a new Flow'
|
#!/usr/bin/env python
# encoding: utf-8
# Scott Newton, 2005 (scottn)
# Thomas Nagy, 2006 (ita)
"Custom command-line options"
import os, sys, imp, types, tempfile, optparse
import Logs, Utils
from Constants import *
cmds = 'distclean configure build install clean uninstall check dist distcheck'.split()
# TODO remove in waf 1.6 the following two
commands = {}
is_install = False
options = {}
arg_line = []
launch_dir = ''
tooldir = ''
lockfile = os.environ.get('WAFLOCK', '.lock-wscript')
try: cache_global = os.path.abspath(os.environ['WAFCACHE'])
except KeyError: cache_global = ''
platform = Utils.unversioned_sys_platform()
conf_file = 'conf-runs-%s-%d.pickle' % (platform, ABI)
remote_repo = ['http://waf.googlecode.com/svn/']
"""remote directory for the plugins"""
# Such a command-line should work: JOBS=4 PREFIX=/opt/ DESTDIR=/tmp/ahoj/ waf configure
default_prefix = os.environ.get('PREFIX')
if not default_prefix:
if platform == 'win32': default_prefix = tempfile.gettempdir()
else: default_prefix = '/usr/local/'
default_jobs = os.environ.get('JOBS', -1)
if default_jobs < 1:
try:
if 'SC_NPROCESSORS_ONLN' in os.sysconf_names:
default_jobs = os.sysconf('SC_NPROCESSORS_ONLN')
else:
default_jobs = int(Utils.cmd_output(['sysctl', '-n', 'hw.ncpu']))
except:
if os.name == 'java': # platform.system() == 'Java'
from java.lang import Runtime
default_jobs = Runtime.getRuntime().availableProcessors()
else:
# environment var defined on win32
default_jobs = int(os.environ.get('NUMBER_OF_PROCESSORS', 1))
default_destdir = os.environ.get('DESTDIR', '')
def get_usage(self):
cmds_str = []
module = Utils.g_module
if module:
# create the help messages for commands
tbl = module.__dict__
keys = list(tbl.keys())
keys.sort()
if 'build' in tbl:
if not module.build.__doc__:
module.build.__doc__ = 'builds the project'
if 'configure' in tbl:
if not module.configure.__doc__:
module.configure.__doc__ = 'configures the project'
ban = ['set_options', 'init', 'shutdown']
optlst = [x for x in keys if not x in ban
and type(tbl[x]) is type(parse_args_impl)
and tbl[x].__doc__
and not x.startswith('_')]
just = max([len(x) for x in optlst])
for x in optlst:
cmds_str.append(' %s: %s' % (x.ljust(just), tbl[x].__doc__))
ret = '\n'.join(cmds_str)
else:
ret = ' '.join(cmds)
return '''waf [command] [options]
Main commands (example: ./waf build -j4)
%s
''' % ret
setattr(optparse.OptionParser, 'get_usage', get_usage)
def create_parser(module=None):
Logs.debug('options: create_parser is called')
parser = optparse.OptionParser(conflict_handler="resolve", version = 'waf %s (%s)' % (WAFVERSION, WAFREVISION))
parser.formatter.width = Utils.get_term_cols()
p = parser.add_option
p('-j', '--jobs',
type = 'int',
default = default_jobs,
help = 'amount of parallel jobs (%r)' % default_jobs,
dest = 'jobs')
p('-k', '--keep',
action = 'store_true',
default = False,
help = 'keep running happily on independent task groups',
dest = 'keep')
p('-v', '--verbose',
action = 'count',
default = 0,
help = 'verbosity level -v -vv or -vvv [default: 0]',
dest = 'verbose')
p('--nocache',
action = 'store_true',
default = False,
help = 'ignore the WAFCACHE (if set)',
dest = 'nocache')
p('--zones',
action = 'store',
default = '',
help = 'debugging zones (task_gen, deps, tasks, etc)',
dest = 'zones')
p('-p', '--progress',
action = 'count',
default = 0,
help = '-p: progress bar; -pp: ide output',
dest = 'progress_bar')
p('--targets',
action = 'store',
default = '',
help = 'build given task generators, e.g. "target1,target2"',
dest = 'compile_targets')
gr = optparse.OptionGroup(parser, 'configuration options')
parser.add_option_group(gr)
gr.add_option('-b', '--blddir',
action = 'store',
default = '',
help = 'build dir for the project (configuration)',
dest = 'blddir')
gr.add_option('-s', '--srcdir',
action = 'store',
default = '',
help = 'src dir for the project (configuration)',
dest = 'srcdir')
gr.add_option('--prefix',
help = 'installation prefix (configuration) [default: %r]' % default_prefix,
default = default_prefix,
dest = 'prefix')
gr = optparse.OptionGroup(parser, 'installation options')
parser.add_option_group(gr)
gr.add_option('--destdir',
help = 'installation root [default: %r]' % default_destdir,
default = default_destdir,
dest = 'destdir')
gr.add_option('-f', '--force',
action = 'store_true',
default = False,
help = 'force file installation',
dest = 'force')
return parser
def parse_args_impl(parser, _args=None):
global options, commands, arg_line
(options, args) = parser.parse_args(args=_args)
arg_line = args
#arg_line = args[:] # copy
# By default, 'waf' is equivalent to 'waf build'
commands = {}
for var in cmds: commands[var] = 0
if not args:
commands['build'] = 1
args.append('build')
# Parse the command arguments
for arg in args:
commands[arg] = True
# the check thing depends on the build
if 'check' in args:
idx = args.index('check')
try:
bidx = args.index('build')
if bidx > idx:
raise ValueError('build before check')
except ValueError, e:
args.insert(idx, 'build')
if args[0] != 'init':
args.insert(0, 'init')
# TODO -k => -j0
if options.keep: options.jobs = 1
if options.jobs < 1: options.jobs = 1
if 'install' in sys.argv or 'uninstall' in sys.argv:
# absolute path only if set
options.destdir = options.destdir and os.path.abspath(os.path.expanduser(options.destdir))
Logs.verbose = options.verbose
Logs.init_log()
if options.zones:
Logs.zones = options.zones.split(',')
if not Logs.verbose: Logs.verbose = 1
elif Logs.verbose > 0:
Logs.zones = ['runner']
if Logs.verbose > 2:
Logs.zones = ['*']
# TODO waf 1.6
# 1. rename the class to OptionsContext
# 2. instead of a class attribute, use a module (static 'parser')
# 3. parse_args_impl was made in times when we did not know about binding new methods to classes
class Handler(Utils.Context):
"""loads wscript modules in folders for adding options
This class should be named 'OptionsContext'
A method named 'recurse' is bound when used by the module Scripting"""
parser = None
# make it possible to access the reference, like Build.bld
def __init__(self, module=None):
self.parser = create_parser(module)
self.cwd = os.getcwd()
Handler.parser = self
def add_option(self, *k, **kw):
self.parser.add_option(*k, **kw)
def add_option_group(self, *k, **kw):
return self.parser.add_option_group(*k, **kw)
def get_option_group(self, opt_str):
return self.parser.get_option_group(opt_str)
def sub_options(self, *k, **kw):
if not k: raise Utils.WscriptError('folder expected')
self.recurse(k[0], name='set_options')
def tool_options(self, *k, **kw):
Utils.python_24_guard()
if not k[0]:
raise Utils.WscriptError('invalid tool_options call %r %r' % (k, kw))
tools = Utils.to_list(k[0])
# TODO waf 1.6 remove the global variable tooldir
path = Utils.to_list(kw.get('tdir', kw.get('tooldir', tooldir)))
for tool in tools:
tool = tool.replace('++', 'xx')
if tool == 'java': tool = 'javaw'
if tool.lower() == 'unittest': tool = 'unittestw'
module = Utils.load_tool(tool, path)
try:
fun = module.set_options
except AttributeError:
pass
else:
fun(kw.get('option_group', self))
def parse_args(self, args=None):
parse_args_impl(self.parser, args)
|
import torch
import torch.nn as nn
from abc import ABC, abstractmethod
class BaseModel(nn.Module, ABC):
"""Base class for models."""
@abstractmethod
def forward(self, *inputs):
return NotImplemented
def __str__(self):
"""For printing the model and the number of trainable parameters.
Returns:
(str) -- the model and the number of trainable parameters
"""
trainable_params = filter(lambda p: p.requires_grad, self.parameters())
n_params = sum(
[torch.prod(torch.tensor(p.size())) for p in trainable_params])
separate_line_str = ('----------------------------------------'
'------------------------------\n')
return '{0}{1}\n{0}Trainable parameters: {2}\n{0}'.format(
separate_line_str, super().__str__(), n_params)
|
from typing import Optional, Tuple
from torch_geometric.typing import Adj, OptTensor, PairTensor
import torch
from torch import Tensor
from torch.nn import Parameter
from torch_scatter import scatter_add
from torch_sparse import SparseTensor, matmul, fill_diag, sum as sparsesum, mul
from torch_geometric.nn.conv import MessagePassing
from torch_geometric.utils import add_remaining_self_loops
from torch_geometric.utils.num_nodes import maybe_num_nodes
from torch_geometric.nn.inits import glorot, zeros
class GCNConv(MessagePassing):
r"""The graph convolutional operator from the `"Semi-supervised
Classification with Graph Convolutional Networks"
<https://arxiv.org/abs/1609.02907>`_ paper
.. math::
\mathbf{X}^{\prime} = \mathbf{\hat{D}}^{-1/2} \mathbf{\hat{A}}
\mathbf{\hat{D}}^{-1/2} \mathbf{X} \mathbf{\Theta},
where :math:`\mathbf{\hat{A}} = \mathbf{A} + \mathbf{I}` denotes the
adjacency matrix with inserted self-loops and
:math:`\hat{D}_{ii} = \sum_{j=0} \hat{A}_{ij}` its diagonal degree matrix.
The adjacency matrix can include other values than :obj:`1` representing
edge weights via the optional :obj:`edge_weight` tensor.
Its node-wise formulation is given by:
.. math::
\mathbf{x}^{\prime}_i = \mathbf{\Theta} \sum_{j \in \mathcal{N}(v) \cup
\{ i \}} \frac{e_{j,i}}{\sqrt{\hat{d}_j \hat{d}_i}} \mathbf{x}_j
with :math:`\hat{d}_i = 1 + \sum_{j \in \mathcal{N}(i)} e_{j,i}`, where
:math:`e_{j,i}` denotes the edge weight from source node :obj:`j` to target
node :obj:`i` (default: :obj:`1.0`)
Args:
in_channels (int): Size of each input sample.
out_channels (int): Size of each output sample.
improved (bool, optional): If set to :obj:`True`, the layer computes
:math:`\mathbf{\hat{A}}` as :math:`\mathbf{A} + 2\mathbf{I}`.
(default: :obj:`False`)
cached (bool, optional): If set to :obj:`True`, the layer will cache
the computation of :math:`\mathbf{\hat{D}}^{-1/2} \mathbf{\hat{A}}
\mathbf{\hat{D}}^{-1/2}` on first execution, and will use the
cached version for further executions.
This parameter should only be set to :obj:`True` in transductive
learning scenarios. (default: :obj:`False`)
add_self_loops (bool, optional): If set to :obj:`False`, will not add
self-loops to the input graph. (default: :obj:`True`)
normalize (bool, optional): Whether to add self-loops and compute
symmetric normalization coefficients on the fly.
(default: :obj:`True`)
bias (bool, optional): If set to :obj:`False`, the layer will not learn
an additive bias. (default: :obj:`True`)
**kwargs (optional): Additional arguments of
:class:`torch_geometric.nn.conv.MessagePassing`.
"""
_cached_edge_index: Optional[Tuple[Tensor, Tensor]]
_cached_adj_t: Optional[SparseTensor]
def __init__(self, in_channels: int, out_channels: int,
bias: bool = True, **kwargs):
kwargs.setdefault('aggr', 'add')
super(GCNConv, self).__init__(**kwargs)
self.in_channels = in_channels
self.out_channels = out_channels
self.weight = Parameter(torch.Tensor(in_channels, out_channels))
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
glorot(self.weight)
zeros(self.bias)
self._cached_edge_index = None
self._cached_adj_t = None
def forward(self, x: Tensor, edge_index: Tensor,
edge_weight: Tensor) -> Tensor:
""""""
x = x @ self.weight
# propagate_type: (x: Tensor, edge_weight: OptTensor)
out = self.propagate(edge_index, x=x, edge_weight=edge_weight,
size=None)
if self.bias is not None:
out += self.bias
return out
def message(self, x_j: Tensor, edge_weight: OptTensor) -> Tensor:
return x_j if edge_weight is None else edge_weight.view(-1, 1) * x_j
def message_and_aggregate(self, adj_t: SparseTensor, x: Tensor) -> Tensor:
return matmul(adj_t, x, reduce=self.aggr)
def __repr__(self):
return '{}({}, {})'.format(self.__class__.__name__, self.in_channels,
self.out_channels)
|
import re
import sys
import traceback
class HandledException(Exception):
def __init__(self, value):
self.value = value
Exception.__init__(self)
def __str__(self):
return repr(self.value)
def json(self):
return self.value['json']
class ExceptionHandler(object):
""" Handle the expected errors."""
def __init__(self, function):
self.function = function
self.error = self.typ = self.tback = self.exc_type = self.exc_value = self.exc_traceback = None
self.error_map = {
"requests.exceptions": self.requests_error,
"ruamel.yaml.parser.ParserError": self.parser_error,
"ruamel.yaml.constructor.ConstructorError": self.constructor_error,
"ruamel.yaml.constructor.DuplicateKeyError": self.duplicate_key_error,
"ruamel.yaml.scanner.ScannerError": self.scanner_error,
"jinja2.exceptions": self.jinja_error,
"TypeError": self.type_error
}
def __call__(self, *args, **kwargs):
try:
return self.function(*args, **kwargs)
except Exception, error:
self.error = error
self.exc_type, self.exc_value, self.exc_traceback = sys.exc_info()
self.tback = traceback.extract_tb(self.exc_traceback)
error_module = getattr(error, '__module__', None)
if error_module:
full_error = "%s.%s" % (error.__module__, self.exc_type.__name__)
else:
full_error = self.exc_type.__name__
handler = self.error_map.get(full_error,
self.error_map.get(error_module,
self.unhandled))
self.typ = kwargs.get('typ')
message = handler()
raise HandledException({"json": message})
def error_response(self, message, line_number):
error_payload = {"handled_error": {
"in": self.typ,
"title": "Message: Issue found loading %s." % self.typ,
"line_number": line_number,
"details": "Details: %s" % message,
"raw_error": "%s\n%s" % (self.exc_type, self.exc_value)
}
}
return error_payload
def constructor_error(self):
line_number = self.error.problem_mark.line+1
message = next(x for x in str(self.error).splitlines()
if x.startswith('found'))
return self.error_response(message=message,
line_number=line_number)
def duplicate_key_error(self):
line_number = self.error.problem_mark.line+1
message = next(x for x in str(self.error).splitlines()
if x.startswith('found')).split('with')[0]
return self.error_response(message=message,
line_number=line_number)
def jinja_error(self):
message = str(self.error).replace("'ruamel.yaml.comments.CommentedMap object'", 'Object')
line_numbers = [x for x in self.tback if re.search('^<.*>$', x[0])]
if line_numbers:
line_number = line_numbers[0][1]
else:
line_number = 'unknown'
return self.error_response(message=message,
line_number=line_number)
def parser_error(self):
line_number = self.error.problem_mark.line + 1
messages = [x for x in str(self.error).splitlines() if x.startswith('expected')]
if messages:
message = messages[0]
else:
message = str(self.error)
return self.error_response(message=message,
line_number=line_number)
def scanner_error(self):
line_number = self.error.problem_mark.line + 1
message = str(self.error).splitlines()[0]
return self.error_response(message=message,
line_number=line_number)
def requests_error(self):
message = "DB connection problems, see the browser developer tools for the full error."
return self.error_response(message=message,
line_number=None)
def type_error(self):
message = str(self.error)
line_numbers = [x for x in self.tback if re.search('^<.*>$', x[0])]
if line_numbers:
line_number = line_numbers[0][1]
else:
line_number = 'unknown'
return self.error_response(message=message,
line_number=line_number)
def unhandled(self):
print self.exc_type, self.exc_value, self.exc_traceback, self.tback, self.error
line_numbers = [x for x in self.tback if re.search('^<.*>$', x[0])]
if line_numbers:
line_number = line_numbers[0][1]
else:
line_number = None
message = "Please see the console for details. %s" % str(self.error)
return self.error_response(message=message,
line_number=line_number)
|
#TAREFA 2
#cálculo do gradiente
import numpy as np
def sigmoid(x):
return 1/(1 + np.exp(-x))
def sigmoid_prime(x):
#derivada da função sigmoide
return sigmoid(x) * (1-sigmoid(x))
#taxa de aprendizado
learnrate = 0.5
x = np.array([1, 2, 3, 4])
y = np.array([0.5])#erro
bies = 0.5
#pesos iniciais
w= np.array([0.5, -0.5, 0.3, 0.1])
h = np.dot(x, w) + bies
nn_output = sigmoid(h)
print(nn_output)
#erro calcular de rede neural
error = y - nn_output
#calcular tsermo do erro
erro_term = error * sigmoid_prime(h)
#calcule a mudança nos pesos
del_w = learnrate * erro_term * x
print(del_w)
#aplicando os novos pesos
w = w + del_w
print(w)
h = np.dot(x, w) + bies
nn_output = sigmoid(h)
print(nn_output) |
import csv
import os
from itemadapter import ItemAdapter
class RecipelinkextractorPipeline:
def process_item(self, item, spider):
path_to_file = item['dir_path'] + "/" + item['title']+".html"
# check if csv file exists
if not os.path.isfile(item['csv_path']):
with open(item['csv_path'], 'w', encoding='UTF8', newline='') as f:
writer = csv.writer(f, delimiter='\t')
writer.writerow(["title", "url", "path_to_file"])
f.close()
# # add new row into dataset
with open(item['csv_path'], 'a', encoding='UTF8', newline='') as f:
writer = csv.writer(f, delimiter='\t')
# write the data
writer.writerow([item['title'],item['url'],path_to_file])
f.close()
# save html file
file = open(path_to_file,"w", encoding='UTF8')
file.write(item['content'])
file.close()
return item
|
__version__ = '1.2.17' |
"""
Driver for CO2 sensor via USB
Intelligent Infrared CO2 Module
(Model: MH-Z19)
"""
import struct
from osgar.node import Node
from osgar.bus import BusShutdownException
class WinsenCO2(Node):
def __init__(self, config, bus):
super().__init__(config, bus)
bus.register('raw', 'co2')
self.sleep_time = config.get('sleep')
self._buf = b''
self.errors = 0
def create_packet(self):
# request CO2 readings
return b"\xff\x01\x86\x00\x00\x00\x00\x00\x79"
def get_packet(self):
"""extract packet from internal buffer (if available otherwise return None"""
data = self._buf
if 0xFF not in data:
self._buf = b''
return None
i = data.index(0xFF)
data = data[i:] # cut to the beginning of the packet
size = 9
if len(data) < size:
self._buf = data
return None
ret, self._buf = data[:size], data[size:]
checksum = sum(ret[1:]) & 0xFF
if checksum != 0:
self.errors += 1
return None
return ret
def parse_CO2_packet(self, data):
"""
Parse CO2 value
"""
return struct.unpack_from('>H', data, 2)[0]
def run(self):
try:
while True:
dt, channel, data = self.listen()
self.time = dt
if channel == 'trigger':
self.publish('raw', self.create_packet())
elif channel == 'raw':
self._buf += data
packet = self.get_packet()
if packet is not None:
value_CO2 = self.parse_CO2_packet(packet)
if value_CO2 is None:
print(packet)
else:
self.publish('co2', value_CO2)
except BusShutdownException:
pass
assert self.errors == 0, self.errors # checksum
# vim: expandtab sw=4 ts=4
|
DUMMY_API_KEY = "dummykey"
DUMMY_LOCATION = "DUMMY LOCATION"
class MockSnips(object):
class MockDialogue(object):
def __init__(self):
self._called = False
self._speak_called = False
def speak(self, sentence, session_id):
self._speak_called = True
class MockTypes(object):
def __init__(self):
pass
def __init__(self):
self._called = False
self.dialogue = MockSnips.MockDialogue()
self.session_id = "DummySessionId"
self.types = MockSnips.MockTypes()
|
from contextlib import contextmanager
import sqlite3 as sql
from sqlite3 import Error
from flask import Flask, render_template, redirect, jsonify, request, send_from_directory
#------------------------------------------------------------#
# Create variable for database file
#------------------------------------------------------------#
DB_FILE = "static/data/nba_test"
#------------------------------------------------------------#
# Establish connection to database
#------------------------------------------------------------#
@contextmanager
def connection():
""" create a database connection to a SQLite database """
conn = None
try:
yield sql.connect(DB_FILE)
except Error as e:
print(e)
finally:
if conn:
conn.close()
#------------------------------------------------------------#
# Send data to browser via connection
#------------------------------------------------------------#
# Gives us all data in database file
def read_all_data():
with connection() as conn:
c = conn.cursor()
# Kobe stats all data
kb_all_data = c.execute('''
SELECT
ROW_NUMBER() OVER (ORDER BY strftime('%Y', [game.date]))
row_num,strftime('%Y',[game.date]) as Year,strftime('%Y-%m-%d',[game.date]) as Date,[game.visitor_team_id],[game.home_team_id],pts,ast,reb,blk,dreb,stl
FROM kobe
WHERE pts IS NOT NULL
''').fetchall()
# Kobe stats per year data
kb_yearly_data = c.execute('''
SELECT
ROW_NUMBER() OVER (ORDER BY strftime('%Y', [game.date]))
row_num,strftime('%Y', [game.date]) as Year,sum(pts),sum(ast),sum(reb),sum(blk),sum(dreb),sum(stl)
FROM kobe
WHERE pts IS NOT NULL
GROUP BY Year
''').fetchall()
# Lebron stats all data
lj_all_data = c.execute('''
SELECT
ROW_NUMBER() OVER (ORDER BY strftime('%Y', [game.date]))
row_num,strftime('%Y',[game.date]) as Year,strftime('%Y-%m-%d',[game.date]) as Date,[game.visitor_team_id],[game.home_team_id],pts,ast,reb,blk,dreb,stl
FROM lebron
WHERE pts IS NOT NULL
''').fetchall()
# Lebron stats per year data
lj_yearly_data = c.execute('''
SELECT
ROW_NUMBER() OVER (ORDER BY strftime('%Y', [game.date]))
row_num,strftime('%Y', [game.date]) as Year,sum(pts),sum(ast),sum(reb),sum(blk),sum(dreb),sum(stl)
FROM lebron
WHERE pts IS NOT NULL
GROUP BY Year
''').fetchall()
# yearly stats arrays
kb_year_index = [] #0
kb_pts_yr = [] #2
kb_ast_yr = [] #3
kb_reb_yr = [] #4
kb_blk_yr = [] #5
kb_dreb_yr = [] #6
kb_stl_yr = [] #7
# individual games stats arrays
kb_year = [] #1
kb_date = [] #2
kb_pts = [] #5
kb_ast = [] #6
kb_reb = [] #7
kb_blk = [] #8
kb_dreb = [] #9
kb_stl = [] #10
for i in kb_yearly_data:
kb_year_index.append(int(i[0])) #0
kb_pts_yr.append(int(i[2])) #2
kb_ast_yr.append(int(i[3])) #3
kb_reb_yr.append(int(i[4])) #4
kb_blk_yr.append(int(i[5])) #5
kb_dreb_yr.append(int(i[6])) #6
kb_stl_yr.append(int(i[7])) #7
for i in kb_all_data:
kb_year.append(int(i[1])) #1
kb_date.append(i[2]) #2
kb_pts.append(int(i[5])) #5
kb_ast.append(int(i[6])) #6
kb_reb.append(int(i[7])) #7
kb_blk.append(int(i[8])) #8
kb_dreb.append(int(i[9])) #9
kb_stl.append(int(i[10])) #10
# yearly stats arrays
lj_year_index = [] #0
lj_pts_yr = [] #2
lj_ast_yr = [] #3
lj_reb_yr = [] #4
lj_blk_yr = [] #5
lj_dreb_yr = [] #6
lj_stl_yr = [] #7
# individual games stats arrays
lj_year = [] #1
lj_date = [] #2
lj_pts = [] #5
lj_ast = [] #6
lj_reb = [] #7
lj_blk = [] #8
lj_dreb = [] #9
lj_stl = [] #10
for i in lj_yearly_data:
lj_year_index.append(int(i[0])) #0
lj_pts_yr.append(int(i[2])) #2
lj_ast_yr.append(int(i[3])) #2
lj_reb_yr.append(int(i[4])) #4
lj_blk_yr.append(int(i[5])) #5
lj_dreb_yr.append(int(i[6])) #6
lj_stl_yr.append(int(i[7])) #7
for i in lj_all_data:
lj_year.append(int(i[1])) #1
lj_date.append(i[2]) #2
lj_pts.append(int(i[5])) #5
lj_ast.append(int(i[6])) #6
lj_reb.append(int(i[7])) #7
lj_blk.append(int(i[8])) #8
lj_dreb.append(int(i[9])) #9
lj_stl.append(int(i[10])) #10
data = {
'kobe': {
'index_year': kb_year_index, 'pts_year': kb_pts_yr, 'ast_year': kb_ast_yr,
'reb_year': kb_reb_yr, 'blk_year': kb_blk_yr, 'dreb_year': kb_dreb_yr, 'stl_year': kb_stl_yr,
'year': kb_year, 'date': kb_date, 'points': kb_pts, 'assists': kb_ast, 'rebounds': kb_reb,
'blocks': kb_blk, 'def_rebounds': kb_dreb, 'steals': kb_stl
},
'lebron': {
'index_year': lj_year_index, 'pts_year': lj_pts_yr, 'ast_year': lj_ast_yr,
'reb_year': lj_reb_yr, 'blk_year': lj_blk_yr, 'dreb_year': lj_dreb_yr, 'stl_year': lj_stl_yr,
'year': lj_year, 'date': lj_date, 'points': lj_pts, 'assists': lj_ast, 'rebounds': lj_reb,
'blocks': lj_blk, 'def_rebounds': lj_dreb, 'steals': lj_stl
}
}
return data
#------------------------------------------------------------#
# Create an instance of Flask
#------------------------------------------------------------#
app = Flask(__name__)
#------------------------------------------------------------#
# Route to render index.html
#------------------------------------------------------------#
@app.route("/")
def home():
results = read_all_data()
print(f'This should be my dictionary with all results {results}')
return render_template('index.html', data=results)
#------------------------------------------------------------#
#Establish routes
#------------------------------------------------------------#
@app.route("/api")
def api():
year = request.args.get('year')
if year:
try:
year = int(year)
print(year)
return jsonify(get_all_data_by_year(year))
except ValueError:
pass
@app.route("/api/all")
def all_data():
return jsonify(read_all_data())
if __name__ == "__main__":
app.run(debug=True)
|
import configparser
import json
import os
conf = {}
default_conf_dir = os.path.join(os.path.expanduser("~"), ".config/fsspec")
conf_dir = os.environ.get("FSSPEC_CONFIG_DIR", default_conf_dir)
def set_conf_env(conf_dict, envdict=os.environ):
"""Set config values from environment variables
Looks for variable of the form ``FSSPEC_<protocol>_<kwarg>``.
There is no attempt to convert strings, but the kwarg keys will
be lower-cased.
Parameters
----------
conf_dict : dict(str, dict)
This dict will be mutated
envdict : dict-like(str, str)
Source for the values - usually the real environment
"""
for key in envdict:
if key.startswith("FSSPEC"):
if key.count("_") < 2:
continue
_, proto, kwarg = key.split("_", 2)
conf_dict.setdefault(proto.lower(), {})[kwarg.lower()] = envdict[key]
def set_conf_files(cdir, conf_dict):
"""Set config values from files
Scans for INI and JSON files in the given dictionary, and uses their
contents to set the config. In case of repeated values, later values
win.
In the case of INI files, all values are strings, and these will not
be converted.
Parameters
----------
cdir : str
Directory to search
conf_dict : dict(str, dict)
This dict will be mutated
"""
if not os.path.isdir(cdir):
return
allfiles = sorted(os.listdir(cdir))
for fn in allfiles:
if fn.endswith(".ini"):
ini = configparser.ConfigParser()
ini.read(os.path.join(cdir, fn))
for key in ini:
if key == "DEFAULT":
continue
conf_dict.setdefault(key, {}).update(dict(ini[key]))
if fn.endswith(".json"):
with open(os.path.join(cdir, fn)) as f:
js = json.load(f)
for key in js:
conf_dict.setdefault(key, {}).update(dict(js[key]))
def apply_config(cls, kwargs, conf_dict=None):
"""Supply default values for kwargs when instantiating class
Augments the passed kwargs, by finding entries in the config dict
which match the classes ``.protocol`` attribute (one or more str)
Parameters
----------
cls : file system implementation
kwargs : dict
conf_dict : dict of dict
Typically this is the global configuration
Returns
-------
dict : the modified set of kwargs
"""
if conf_dict is None:
conf_dict = conf
protos = cls.protocol if isinstance(cls.protocol, (tuple, list)) else [cls.protocol]
kw = {}
for proto in protos:
# default kwargs from the current state of the config
if proto in conf_dict:
kw.update(conf_dict[proto])
# explicit kwargs always win
kw.update(**kwargs)
kwargs = kw
return kwargs
set_conf_files(conf_dir, conf)
set_conf_env(conf)
|
import os
import subprocess
import click
from make_lambda_package import archive
from make_lambda_package import deps
from make_lambda_package import fsutil
from make_lambda_package import scm
@click.command('make-lambda-package')
@click.argument('source',
metavar='<path_or_url>')
@click.option('--repo-source-files',
metavar='<glob_pattern_in_source>',
help='Source files to package.')
@click.option('--requirements-file',
metavar='<path_in_source>',
help='Dependencies to package.')
@click.option('--local-source-file',
metavar='<path_from_cwd> <path_in_zip>',
multiple=True,
type=(click.Path(exists=True, dir_okay=False), str),
help='Files in the current working directory to package. Useful for config files.')
@click.option('--work-dir',
metavar='<output_directory>',
type=click.Path(exists=False, file_okay=False, writable=True),
help='Where to store intermediary files and the zipped package. ')
@click.option('--runtime',
metavar='<lambda_runtime>',
default='python2.7',
type=click.Choice(['python2.7', 'python3.6', 'python3.7']),
help='Lambda runtime. Docker image `lambci/lambda:build-{runtime}` will be used for the build.')
def main(
source,
repo_source_files,
requirements_file,
local_source_file,
work_dir,
runtime):
"""
Bundle up a deployment package for AWS Lambda.
From your local filesystem:
\b
$ make-lambda-package .
...
dist/lambda-package.zip
Or from a remote git repository:
\b
$ make-lambda-package https://github.com/NoRedInk/make-lambda-package.git
...
vendor/dist/NoRedInk-make-lambda-package.zip
Use # fragment to specify a commit or a branch:
\b
$ make-lambda-package https://github.com/NoRedInk/make-lambda-package.git#v1.0.0
Dependencies specified with --requirements-file will built using a docker container
that replicates AWS Lambda's execution environment, so that extension modules
are correctly packaged.
When packaging a local source, --work-dir defaults to `.`:
\b
* ./build will hold a virtualenv for building dependencies if specified.
* ./dist is where the zipped package will be saved
When packaging a remote source, --work-dir defaults to `./vendor`.
"""
scm_source = fsutil.parse_path_or_url(source)
paths = fsutil.decide_paths(scm_source, work_dir)
if requirements_file:
with open(os.devnull, 'w') as devnull:
docker_retcode = subprocess.call(['docker', '--help'], stdout=devnull)
if docker_retcode != 0:
raise click.UsageError(
"`docker` command doesn't seem to be available. "
"It's required to package dependencies.")
if not (requirements_file or repo_source_files or local_source_file):
click.secho(
'Warning: without --repo-source-files, --requirements-file, '
'or --local-source-file, nothing will be included in the zip file. '
'Assuming you have good reasons to do this and proceeding.',
fg='yellow')
fsutil.ensure_dirs(paths)
if isinstance(scm_source, fsutil.RemoteSource):
click.echo('Fetching repo..')
scm.fetch_repo(scm_source.url, scm_source.ref, paths.src_dir)
deps_file = None
if requirements_file:
click.echo('Building deps..')
deps_file = deps.build_deps(paths, requirements_file, runtime)
click.echo('Creating zip file..')
archive.make_archive(
paths,
runtime,
repo_source_files,
local_source_file,
deps_file)
click.echo(os.path.relpath(paths.zip_path, os.getcwd()))
if __name__ == '__main__':
main()
|
from di_container import DIContainerKeys, inject
from adapter_manager import AdapterManager, AdapterTypes
from display_adapter import DisplayAdapterCalls
class ErrorHandler:
def __init__(self):
self.display_manager = inject(DIContainerKeys.display_manager, AdapterManager)
def new_error(self, error, errorPretty = "See FULL ERROR below."):
error_msg = f"ERROR ====================\n{errorPretty}"
if error != "":
error_msg += f"\nFULL ERROR ==========\n{error}"
error_msg += "\n=========================="
return self.display_manager.call(DisplayAdapterCalls.show_text, error_msg)
|
CURRENT_PARLIAMENT_VERSION = '151c'
DEBUG = True
# Feature knobs are only intended for incomplete functionality.
FEATURES = {
'PARSE_MARKERS': True,
}
|
""" Profile model. """
# Django
from django.db import models
# Utilities
from cride.utils.models import CRideModel
class Profile(CRideModel):
""" Profile model.
A profile holds a user's public data like biography, picture,
and statistics.
"""
user = models.OneToOneField('users.User', on_delete= models.CASCADE)
picture = models.ImageField (
'profile picture',
upload_to = 'users/pictures',
blank= True,
null= True
)
biography = models.TextField(max_length = 500, blank= True)
# Stats
rides_taken = models.PositiveIntegerField(default = 0)
rides_offered = models.PositiveIntegerField(default = 0)
reputation = models.FloatField (
default= 0.0,
help_text = "Users's reputation based on the rides taken and offered."
)
def __str__(self):
""" Return user's str representation """
return str(self.user)
|
from tkinter import *
import math
#calc files
class calc:
def getandreplace(self):
self.expression = self.e.get()
self.newtext=self.expression.replace('/','/')
self.newtext=self.newtext.replace('x','*')
def equals(self):
self.getandreplace()
try:
self.value= eval(self.newtext)
except SyntaxError or NameError:
self.e.delete(0,END)
self.e.insert(0,'Invalid Input!')
else:
self.e.delete(0,END)
self.e.insert(0,self.value)
def squareroot(self):
self.getandreplace()
try:
self.value= eval(self.newtext)
except SyntaxError or NameError:
self.e.delete(0,END)
self.e.insert(0,'Invalid Input!')
else:
self.sqrtval=math.sqrt(self.value)
self.e.delete(0,END)
self.e.insert(0,self.sqrtval)
def square(self):
self.getandreplace()
try:
self.value= eval(self.newtext)
except SyntaxError or NameError:
self.e.delete(0,END)
self.e.insert(0,'Invalid Input!')
else:
self.sqval=math.pow(self.value,2)
self.e.delete(0,END)
self.e.insert(0,self.sqval)
def clearall(self):
self.e.delete(0,END)
def clear1(self):
self.txt=self.e.get()[:-1]
self.e.delete(0,END)
self.e.insert(0,self.txt)
def action(self,argi):
self.e.insert(END,argi)
def __init__(self,master):
master.title('Calulator')
master.geometry()
self.e = Entry(master)
self.e.grid(row=0,column=0,columnspan=6,pady=3)
self.e.focus_set()
Button(master,text="=",width=11,height=3,fg="black",
bg="#e7c347",command=lambda:self.equals()).grid(
row=4, column=4,columnspan=2)
Button(master,text='AC',width=5,height=3,
fg="black", bg="#e6654d",
command=lambda:self.clearall()).grid(row=1, column=4)
Button(master,text='C',width=5,height=3,
fg="black",bg="#e6654d",
command=lambda:self.clear1()).grid(row=1, column=5)
Button(master,text="+",width=5,height=3,
fg="black",bg="#29e48e",
command=lambda:self.action('+')).grid(row=4, column=3)
Button(master,text="x",width=5,height=3,
fg="black",bg="#29e48e",
command=lambda:self.action('x')).grid(row=2, column=3)
Button(master,text="-",width=5,height=3,
fg="black",bg="#29e48e",
command=lambda:self.action('-')).grid(row=3, column=3)
Button(master,text="÷",width=5,height=3,
fg="black",bg="#29e48e",
command=lambda:self.action('/')).grid(row=1, column=3)
Button(master,text="%",width=5,height=3,
fg="black",bg="#29e48e",
command=lambda:self.action('%')).grid(row=4, column=2)
Button(master,text="7",width=5,height=3,
fg="black",bg="#58cbf1",
command=lambda:self.action('7')).grid(row=1, column=0)
Button(master,text="8",width=5,height=3,
fg="black",bg="#58cbf1",
command=lambda:self.action(8)).grid(row=1, column=1)
Button(master,text="9",width=5,height=3,
fg="black",bg="#58cbf1",
command=lambda:self.action(9)).grid(row=1, column=2)
Button(master,text="4",width=5,height=3,
fg="black",bg="#58cbf1",
command=lambda:self.action(4)).grid(row=2, column=0)
Button(master,text="5",width=5,height=3,
fg="black",bg="#58cbf1",
command=lambda:self.action(5)).grid(row=2, column=1)
Button(master,text="6",width=5,height=3,
fg="black",bg="#58cbf1",
command=lambda:self.action(6)).grid(row=2, column=2)
Button(master,text="1",width=5,height=3,
fg="black",bg="#58cbf1",
command=lambda:self.action(1)).grid(row=3, column=0)
Button(master,text="2",width=5,height=3,
fg="black",bg="#58cbf1",
command=lambda:self.action(2)).grid(row=3, column=1)
Button(master,text="3",width=5,height=3,
fg="black",bg="#58cbf1",
command=lambda:self.action(3)).grid(row=3, column=2)
Button(master,text="0",width=5,height=3,
fg="black",bg="#58cbf1",
command=lambda:self.action(0)).grid(row=4, column=1)
Button(master,text=".",width=5,height=3,
fg="black",bg="#29e48e",
command=lambda:self.action('.')).grid(row=4, column=0)
Button(master,text="(",width=5,height=3,
fg="black",bg="#29e48e",
command=lambda:self.action('(')).grid(row=2, column=4)
Button(master,text=")",width=5,height=3,
fg="black",bg="#29e48e",
command=lambda:self.action(')')).grid(row=2, column=5)
Button(master,text="?",width=5,height=3,
fg="black",bg="#29e48e",
command=lambda:self.squareroot()).grid(row=3, column=4)
Button(master,text="x²",width=5,height=3,
fg="black",bg="#29e48e",
command=lambda:self.square()).grid(row=3, column=5)
#calling main calculator...
def calcul():
root = Tk()
obj=calc(root)
root.mainloop()
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Constants for AP Parsing modeling."""
import frozendict
RESERVED_TOKENS = ("[PAD]", "[MASK]", "[UNK]")
METADATA_FEATURES = ("note_id", "char_offset", "seq_length")
CLASS_NAMES = frozendict.frozendict({
"fragment_type": ("O", "B-PT", "I-PT", "B-PD", "I-PD", "B-AI", "I-AI"),
"action_item_type":
("O", "MED", "IMG", "OBS", "CONS", "NUT", "THERP", "DIAG", "OTH")
})
FRAGMENT_TYPE_TO_ENUM = frozendict.frozendict({"PT": 1, "PD": 2, "AI": 3})
FEATURE_NAMES = ("token_ids", "token_type", "is_upper", "is_title")
# derived constants
TOKEN_IDS = FEATURE_NAMES[0]
LABEL_NAMES = tuple(CLASS_NAMES.keys())
MODEL_FEATURES = FEATURE_NAMES + LABEL_NAMES
|
import time
import logging
from pgoapi.exceptions import (ServerSideRequestThrottlingException,
NotLoggedInException, ServerBusyOrOfflineException,
NoPlayerPositionSetException, EmptySubrequestChainException,
UnexpectedResponseException)
from pgoapi.pgoapi import PGoApi, PGoApiRequest, RpcApi
from pgoapi.protos.POGOProtos.Networking.Requests.RequestType_pb2 import RequestType
from human_behaviour import sleep
class PermaBannedException(Exception):
pass
class ApiWrapper(PGoApi):
def __init__(self):
PGoApi.__init__(self)
self.useVanillaRequest = False
def create_request(self):
RequestClass = ApiRequest
if self.useVanillaRequest:
RequestClass = PGoApiRequest
return RequestClass(
self,
self._position_lat,
self._position_lng,
self._position_alt
)
def login(self, *args):
# login needs base class "create_request"
self.useVanillaRequest = True
try:
ret_value = PGoApi.login(self, *args)
finally:
# cleanup code
self.useVanillaRequest = False
return ret_value
class ApiRequest(PGoApiRequest):
def __init__(self, *args):
PGoApiRequest.__init__(self, *args)
self.logger = logging.getLogger(__name__)
self.request_callers = []
self.last_api_request_time = None
self.requests_per_seconds = 2
def can_call(self):
if not self._req_method_list:
raise EmptySubrequestChainException()
if (self._position_lat is None) or (self._position_lng is None) or (self._position_alt is None):
raise NoPlayerPositionSetException()
if self._auth_provider is None or not self._auth_provider.is_login():
self.log.info('Not logged in')
raise NotLoggedInException()
return True
def _call(self):
return PGoApiRequest.call(self)
def _pop_request_callers(self):
r = self.request_callers
self.request_callers = []
return [i.upper() for i in r]
def is_response_valid(self, result, request_callers):
if not result or result is None or not isinstance(result, dict):
return False
if not 'responses' in result or not 'status_code' in result:
return False
if not isinstance(result['responses'], dict):
return False
try:
# Permaban symptom is empty response to GET_INVENTORY and status_code = 3
if result['status_code'] == 3 and 'GET_INVENTORY' in request_callers and not result['responses']['GET_INVENTORY']:
raise PermaBannedException
except KeyError:
# Still wrong
return False
# the response can still programatically be valid at this point
# but still be wrong. we need to check if the server did sent what we asked it
for request_caller in request_callers:
if not request_caller in result['responses']:
return False
return True
def call(self, max_retry=15):
request_callers = self._pop_request_callers()
if not self.can_call():
return False # currently this is never ran, exceptions are raised before
request_timestamp = None
api_req_method_list = self._req_method_list
result = None
try_cnt = 0
throttling_retry = 0
unexpected_response_retry = 0
while True:
request_timestamp = self.throttle_sleep()
# self._call internally clear this field, so save it
self._req_method_list = [req_method for req_method in api_req_method_list]
should_throttle_retry = False
should_unexpected_response_retry = False
try:
result = self._call()
except ServerSideRequestThrottlingException:
should_throttle_retry = True
except UnexpectedResponseException:
should_unexpected_response_retry = True
if should_throttle_retry:
throttling_retry += 1
if throttling_retry >= max_retry:
raise ServerSideRequestThrottlingException('Server throttled too many times')
sleep(1) # huge sleep ?
continue # skip response checking
if should_unexpected_response_retry:
unexpected_response_retry += 1
if unexpected_response_retry >= 5:
self.logger.warning('Server is not responding correctly to our requests. Waiting for 30 seconds to reconnect.')
sleep(30)
else:
sleep(2)
continue
if not self.is_response_valid(result, request_callers):
try_cnt += 1
if try_cnt > 3:
self.logger.warning('Server seems to be busy or offline - try again - {}/{}'.format(try_cnt, max_retry))
if try_cnt >= max_retry:
raise ServerBusyOrOfflineException()
sleep(1)
else:
break
self.last_api_request_time = request_timestamp
return result
def __getattr__(self, func):
if func.upper() in RequestType.keys():
self.request_callers.append(func)
return PGoApiRequest.__getattr__(self, func)
def throttle_sleep(self):
now_milliseconds = time.time() * 1000
required_delay_between_requests = 1000 / self.requests_per_seconds
difference = now_milliseconds - (self.last_api_request_time if self.last_api_request_time else 0)
if self.last_api_request_time != None and difference < required_delay_between_requests:
sleep_time = required_delay_between_requests - difference
time.sleep(sleep_time / 1000)
return now_milliseconds
|
import select
import socket
import threading
from typing import List
from common.application_params import PORT, MAX_MSG_LEN
from common.message_frame import MessageFrame
from messages.ping import Ping
from server.middleware import Middleware
class Server(threading.Thread):
def __init__(self, middleware: Middleware):
super().__init__()
self.keep_alive = True
self.initialized = False
self.socket: socket = None
self.read_list: List[socket] = []
self.middleware = middleware
def initialize_socket(self):
# create socket
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setblocking(False)
# bind and listen to socket
server_addr = ("0.0.0.0", PORT)
print("Starting to listen on %s:%s" % server_addr)
self.socket.bind(server_addr)
self.socket.listen(5)
self.read_list += [self.socket]
self.initialized = True
def run(self):
# init then run
if not self.initialized:
self.initialize_socket()
while self.keep_alive:
self.communicate()
def communicate(self) -> None:
# check if initialization occurred
if not self.initialized:
raise Exception("Socket was not initialized")
# check if the socket timed out
for sock in self.read_list:
if sock is self.socket:
continue
# ask the middleware if this session timed out
if self.middleware.check(sock):
self.read_list.remove(sock)
readable, writeable, exceptional = select.select(self.read_list, [], [], 10)
for sock in readable:
if sock is self.socket:
# Connect new peer
client_sock, addr = self.socket.accept()
self.read_list.append(client_sock)
self.middleware.connect_event(client_sock)
else:
try:
# receive Data
data = sock.recv(MAX_MSG_LEN)
except (ConnectionResetError, ConnectionAbortedError):
# Connection Terminated
print("Bad peer, disconnected")
self.read_list.remove(sock)
self.middleware.remove(sock)
continue
# Construct frame from raw data
frame = MessageFrame()
frame.decode(data)
# check if there is messages in the frame
while frame.available():
message = frame.get_message()
self.middleware.ingest(sock, message)
# craft frame with our own messages
frame = MessageFrame()
while self.middleware.has_message_for(sock):
frame.add_message(self.middleware.emit(sock))
# Make sure it isn't completely empty
if frame.empty():
frame.add_message(Ping())
try:
# Send our response
sock.send(frame.encode())
except (ConnectionResetError, ConnectionAbortedError):
# Connection Terminated
print("Bad peer, disconnected")
self.read_list.remove(sock)
self.middleware.remove(sock)
continue
|
import torch
from CLAPPVision.vision.models import FullModel, ClassificationModel
from CLAPPVision.utils import model_utils
def load_model_and_optimizer(opt, num_GPU=None, reload_model=False, calc_loss=True):
model = FullModel.FullVisionModel(
opt, calc_loss
)
optimizer = []
if opt.model_splits == 1:
optimizer.append(torch.optim.Adam(model.parameters(), lr=opt.learning_rate, weight_decay=opt.weight_decay))
elif opt.model_splits >= 2:
# use separate optimizer for each module, so gradients don't get mixed up
for idx, layer in enumerate(model.encoder):
optimizer.append(torch.optim.Adam(layer.parameters(), lr=opt.learning_rate, weight_decay=opt.weight_decay))
else:
raise NotImplementedError
# Note: module.parameters() acts recursively by default and adds all parameters of submodules as well
model, num_GPU = model_utils.distribute_over_GPUs(opt, model, num_GPU=num_GPU)
model, optimizer = model_utils.reload_weights(
opt, model, optimizer, reload_model=reload_model
)
return model, optimizer
def load_classification_model(opt):
if opt.in_channels == None:
in_channels = 1024
else:
in_channels = opt.in_channels
if opt.dataset == "stl10" or opt.dataset == "cifar10":
num_classes = 10
elif opt.dataset == "cifar100":
num_classes = 100
else:
raise Exception("Invalid option")
classification_model = ClassificationModel.ClassificationModel(
in_channels=in_channels, num_classes=num_classes,
).to(opt.device)
return classification_model
|
'''
Given an array with n integers, you need to find if there are triplets (i, j, k) which satisfies following conditions:
0 < i, i + 1 < j, j + 1 < k < n - 1
Sum of subarrays (0, i - 1), (i + 1, j - 1), (j + 1, k - 1) and (k + 1, n - 1) should be equal.
where we define that subarray (L, R) represents a slice of the original array starting from the element indexed L to the element indexed R.
Example:
Input: [1,2,1,2,1,2,1]
Output: True
Explanation:
i = 1, j = 3, k = 5.
sum(0, i - 1) = sum(0, 0) = 1
sum(i + 1, j - 1) = sum(2, 2) = 1
sum(j + 1, k - 1) = sum(4, 4) = 1
sum(k + 1, n - 1) = sum(6, 6) = 1
Note:
1 <= n <= 2000.
Elements in the given array will be in range [-1,000,000, 1,000,000].
'''
class Solution(object):
def splitArray(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
if not nums:
return False
s = [nums[0]]
count = 0
for i in xrange(1, len(nums)):
if nums[i] == nums[i-1]:
count += 1
else:
count = 0
if count > 7:
continue
s.append(s[-1] + nums[i])
for second_cut in xrange(3, len(s) - 3):
for first_cut in xrange(1, second_cut - 1):
if s[first_cut - 1] == s[second_cut - 1] - s[first_cut]:
for third_cut in xrange(second_cut + 1, len(s) - 1):
if s[third_cut - 1] - s[second_cut] == s[-1] - s[third_cut] == s[first_cut - 1]:
return True
return False
|
from os import environ
KCB_PASSWORD_GENERATOR_SIZE = int(environ.get(
"KCB_PASSWORD_GENERATOR_SIZE", 32))
KCB_AWS_REGION_NAME = environ.get("KCB_AWS_REGION_NAME", "us-east-1")
|
from django import VERSION as DJANGO_VERSION
from django.db import connection, transaction
from django.test import SimpleTestCase
from django_hstore.fields import HStoreDict
from django_hstore_tests.models import DataBag
class TestNotTransactional(SimpleTestCase):
allow_database_queries = True
if DJANGO_VERSION[:2] >= (1, 8):
def setUp(self):
# avoid error "connection already closed"
connection.connect()
def test_hstore_registring_in_transaction_block(self):
obj1 = DataBag.objects.create(name='alpha1', data={'v': '1', 'v2': '3'})
obj2 = DataBag.objects.create(name='alpha2', data={'v': '1', 'v2': '3'})
# Close any existing connection before doing anything
connection.close()
with transaction.atomic():
qs = DataBag.objects.filter(name__in=["alpha2", "alpha1"])
self.assertIsInstance(qs[0].data, HStoreDict)
obj1.delete()
obj2.delete()
connection.close()
|
import string, sys, os, getopt
from os.path import *
units = 'b'
def print_path (path, bytes):
if units == 'k':
print '%-8ld%s' % (bytes / 1024, path)
elif units == 'm':
print '%-5ld%s' % (bytes / 1024 / 1024, path)
else:
print '%-11ld%s' % (bytes, path)
def dir_size (start, follow_links, my_depth, max_depth):
total = 0L
try:
dir_list = os.listdir (start)
except:
if isdir (start):
print 'Cannot list directory %s' % start
return 0
for item in dir_list:
path = '%s/%s' % (start, item)
try:
stats = os.stat (path)
except:
print 'Cannot stat %s' % path
continue
total += stats[6]
if isdir (path) and (follow_links or \
(not follow_links and not islink (path))):
bytes = dir_size (path, follow_links, my_depth + 1, max_depth)
total += bytes
if (my_depth < max_depth):
print_path (path, bytes)
return total
def usage (name):
print "usage: %s [-bkLm] [-d depth] directory [diretory...]" % name
print '\t-b\t\tDisplay in Bytes (default)'
print '\t-k\t\tDisplay in Kilobytes'
print '\t-m\t\tDisplay in Megabytes'
print '\t-L\t\tFollow symbolic links (Unix only)'
print '\t-d, --depth\t# of directories down to print (default = 0)'
# main area
follow_links = 0
depth = 0
try:
opts, args = getopt.getopt (sys.argv[1:], "bkLmd:", ["depth="])
except getopt.GetoptError:
usage (sys.argv[0])
sys.exit (1)
for o, a in opts:
if o == '-b':
units = 'b'
elif o == '-k':
units = 'k'
elif o == '-L':
follow_links = 1
elif o == '-m':
units = 'm'
elif o in ('-d', '--depth'):
try:
depth = string.atoi (a)
except:
pass
if len (args) < 1:
usage (sys.argv[0])
sys.exit (1)
else:
paths = args
for path in paths:
bytes = dir_size (path, follow_links, 0, depth)
print_path (path, bytes)
|
# Macro for BUG #11065. This makes it possible to show the grid for a dataset in
# the background.
try: paraview.simple
except: from paraview.simple import *
paraview.simple._DisableFirstRenderCameraReset()
spcth_0 = GetActiveSource()
ExtractSurface2 = ExtractSurface()
DataRepresentation5 = Show()
DataRepresentation5.Representation = 'Wireframe'
DataRepresentation5.BackfaceRepresentation = 'Cull Frontface'
|
import logging
from enum import Enum
import requests
from back_office.config import ENVIRONMENT_TYPE, SLACK_ENRICHMENT_NOTIFICATION_URL, EnvironmentType
class SlackChannel(Enum):
ENRICHMENT_NOTIFICATIONS = 'ENRICHMENT_NOTIFICATIONS'
def slack_url(self) -> str:
if self == self.ENRICHMENT_NOTIFICATIONS:
return SLACK_ENRICHMENT_NOTIFICATION_URL
raise NotImplementedError(f'Missing slack channel url {self}.')
def send_slack_notification(
message: str, channel: SlackChannel = SlackChannel.ENRICHMENT_NOTIFICATIONS, prod_only: bool = True
) -> None:
if ENVIRONMENT_TYPE != EnvironmentType.PROD and prod_only:
return
url = channel.slack_url()
answer = requests.post(url, json={'text': message})
if not (200 <= answer.status_code < 300):
logging.error('Error with status code', answer.status_code)
logging.error(answer.content.decode())
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Serializer tests."""
from __future__ import absolute_import, print_function
from dojson.contrib.to_marc21 import to_marc21
from invenio_pidstore.models import PersistentIdentifier
from invenio_records import Record
from marshmallow import Schema, fields
from invenio_marc21.serializers.marcxml import MARCXMLSerializer
class MySchema(Schema):
"""Test marshmallow schema."""
control_number = fields.Str(attribute='pid.pid_value')
def test_serialize(app):
"""Test JSON serialize."""
data = MARCXMLSerializer(to_marc21, schema_class=MySchema).serialize(
PersistentIdentifier(pid_type='recid', pid_value='2'),
Record({'title': 'test'}))
expected = u"<?xml version='1.0' encoding='UTF-8'?>\n" \
u'<record xmlns="http://www.loc.gov/MARC21/slim">\n' \
u' <controlfield tag="001">2</controlfield>\n' \
u'</record>\n'
assert data.decode('utf8') == expected
def test_serialize_search():
"""Test MARCXML serialize."""
def fetcher(obj_uuid, data):
return PersistentIdentifier(pid_type='recid', pid_value=data['pid'])
s = MARCXMLSerializer(to_marc21, schema_class=MySchema)
data = s.serialize_search(
fetcher,
dict(
hits=dict(
hits=[
{'_source': dict(pid='1'), '_id': 'a', '_version': 1},
{'_source': dict(pid='2'), '_id': 'b', '_version': 1},
],
total=2,
),
aggregations={},
)
)
expected = u"<?xml version='1.0' encoding='UTF-8'?>\n" \
u'<collection xmlns="http://www.loc.gov/MARC21/slim">\n' \
u' <record>\n' \
u' <controlfield tag="001">1</controlfield>\n' \
u' </record>\n' \
u' <record>\n' \
u' <controlfield tag="001">2</controlfield>\n' \
u' </record>\n' \
u'</collection>\n'
assert data.decode('utf8') == expected
def test_serialize_no_schema_class():
"""Test MARCXML serialization without providing record schema."""
s = MARCXMLSerializer(to_marc21)
rec = Record({'__order__': ['control_number_identifier'],
'control_number_identifier': 'SzGeCERN'})
data = s.serialize(PersistentIdentifier(pid_type='recid', pid_value='1'),
rec)
expected = u'<?xml version=\'1.0\' encoding=\'UTF-8\'?>\n' \
u'<record xmlns="http://www.loc.gov/MARC21/slim">\n' \
u' <controlfield tag="003">SzGeCERN</controlfield>\n' \
u'</record>\n'
assert data.decode('utf8') == expected
def test_serialize_oaipmh():
"""Test MARCXML serialize."""
s = MARCXMLSerializer(to_marc21, schema_class=MySchema)
tree = s.serialize_oaipmh(
PersistentIdentifier(pid_type='recid', pid_value='2'),
{'_source': Record({'title': 'test'})})
assert tree.getchildren()[0].text == '2'
|
import configparser
def bootPropsConfig(artifact, resources, targetDir, scalaVersion = "2.13.1"):
"""Create the configuration to install an artifact and its dependencies"""
scala = {}
scala["version"] = scalaVersion
app = {}
app["org"] = artifact.org
app["name"] = artifact.name
app["version"] = artifact.version
app["class"] = "com.scleradb.pathgen.Main"
app["cross-versioned"] = "binary"
if resources:
app["resources"] = ", ".join(resources)
repositories = {}
repositories["local"] = None
repositories["typesafe-ivy-releases"] = "http://repo.typesafe.com/typesafe/ivy-releases/, [organization]/[module]/[revision]/[type]s/[artifact](-[classifier]).[ext]"
repositories["maven-central"] = None
repositories["Sonatype OSS Snapshots"] = "https://oss.sonatype.org/content/repositories/snapshots"
boot = {}
boot["directory"] = targetDir
log = {}
log["level"] = "error"
config = configparser.ConfigParser(allow_no_value = True, delimiters = ":")
config["scala"] = scala
config["app"] = app
config["repositories"] = repositories
config["boot"] = boot
config["log"] = log
return config
def writeConfig(config, targetPath):
"""Write the configuration into the specified file"""
with open(targetPath, "wt") as f:
config.write(f, space_around_delimiters = False)
|
#!/usr/bin/env python
# encoding: utf-8
"""
@version: v1.0
@author: Shijie Qin
@license: Apache Licence
@contact: qsj4work@gmail.com
@site: https://shijieqin.github.io
@software: PyCharm
@file: xmlrpc.py
@time: 2018/11/8 3:16 PM
"""
import xmlrpc.client
class XmlRpc:
@staticmethod
def connection(host, port, username, password):
if username == "" and password == "":
address = "http://{0}:{1}/RPC2".format(host, port)
else:
address = "http://{0}:{1}@{2}:{3}/RPC2".format(
username, password, host, port
)
try:
return xmlrpc.client.ServerProxy(address)
except Exception as e:
print(e)
return None
@staticmethod
def is_connected(connection):
if connection:
try:
print(connection.system.listMethods())
return 0
except (xmlrpc.client.ProtocolError, xmlrpc.client.Fault) as err:
print(err)
return -1
except Exception as err:
print(err)
return -1
return -1
|
import db
import add_idea
class MigrationManager:
def __init__(self, pen):
self.pen = pen
def run(self, to_version, name, data):
curr_version = data['version'] if 'version' in idea else 0
next_version = curr_version + 1
# already that version or newer
if curr_version >= to_version:
return name, data
# reached desired version
if next_version == to_version:
return self.run(to_version, name, data)
# run the next migration
self.run(next_version, name, data)
# run the rest of the migrations
return self.run(to_version, name, data)
def run(self, version, name, data):
# idea is already up to date
if 'version' in data and version <= data['version']:
return name, data
# MY GREATE TONGE MIGHT GRATE TONS some MIGRATIONS
# vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
if version == 1:
# fix original function to only return attributes, ie without total
update_evaluations = lambda pen, x: add_idea.evaluate_enjoyments(pen, x)[0]
# changed emotions and enjoyments to use plutchicks wheel
name, data = self.fix_deltas(name, data, 'emotions', add_idea.add_emotions)
name, data = self.fix_deltas(name, data, 'enjoyments', update_evaluations)
elif version == 2:
# added a required description attribute
self.pen.write("A new field was added")
self.pen.down()
self.pen.right(2)
data['description'] = add_idea.add_description(self.pen)
self.pen.left(2)
self.pen.up()
self.pen.clear(2)
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# add additional version migrations here as needed
# update the version
data['version'] = version
return name, data
# assumes that the config specifies provided attribute
def fix_deltas(self, name, data, attribute, update_idea):
expected = db.config()[attribute]
missing_attributes = self.find_missing(data, attribute, expected)
if len(missing_attributes) > 0:
# indicate what changed
self.pen.write(f"Updating {attribute}, found new options: {', '.join(missing_attributes)}")
self.pen.down()
# prompt for updates
self.pen.right()
data[attribute] = update_idea(self.pen, data[attribute])
self.pen.left()
else:
# attribute is all good
self.pen.write(f"Auto updated {attribute}")
self.pen.down()
return name, data
def find_missing(self, data, attribute, expected):
return list(filter(lambda val: val not in data[attribute], expected))
|
# File automatically generated by mapry. DO NOT EDIT OR APPEND!
"""parses JSONable objects."""
import collections
import typing
import some.graph
import some.graph.parse
def some_graph_from(
value: typing.Any,
ref: str,
errors: some.graph.parse.Errors
) -> typing.Optional[some.graph.SomeGraph]:
"""
parses SomeGraph from a JSONable value.
:param value: JSONable value
:param ref: reference to the value (e.g., a reference path)
:param errors: errors encountered during parsing
:return: parsed SomeGraph, or None if ``errors``
"""
if errors.full():
return None
if not isinstance(value, dict):
errors.add(
ref,
"Expected a dictionary, but got: {}".format(type(value)))
return None
graph = some.graph.parse.placeholder_some_graph()
##
# Parse array_of_maps
##
value_0 = value.get(
'array_of_maps',
None)
if value_0 is None:
errors.add(
ref,
'Property is missing: array_of_maps')
else:
if not isinstance(value_0, list):
errors.add(
'/'.join((
ref, 'array_of_maps')),
"Expected a list, but got: {}".format(
type(value_0)))
else:
target_1 = (
[]
) # type: typing.List[typing.MutableMapping[str, bool]]
for i_1, item_1 in enumerate(
value_0):
target_item_1 = (
None
) # type: typing.Optional[typing.MutableMapping[str, bool]]
if not isinstance(item_1, dict):
errors.add(
'/'.join((
ref, 'array_of_maps', str(i_1))),
"Expected a dict, but got: {}".format(
type(item_1)))
else:
if isinstance(item_1, collections.OrderedDict):
target_2 = (
collections.OrderedDict()
) # type: typing.MutableMapping[str, bool]
else:
target_2 = (
dict()
)
for key_2, value_2 in item_1.items():
if not isinstance(key_2, str):
errors.add(
'/'.join((
ref, 'array_of_maps', str(i_1))),
"Expected the key to be a str, but got: {}".format(
type(key_2)))
if errors.full():
break
else:
continue
target_item_2 = (
None
) # type: typing.Optional[bool]
if not isinstance(value_2, bool):
errors.add(
'/'.join((
ref, 'array_of_maps', str(i_1), repr(key_2))),
"Expected a bool, but got: {}".format(
type(value_2)))
else:
target_item_2 = value_2
if target_item_2 is not None:
target_2[key_2] = target_item_2
if errors.full():
break
if target_2 is not None:
target_item_1 = target_2
if target_item_1 is not None:
target_1.append(
target_item_1)
if errors.full():
break
graph.array_of_maps = target_1
if errors.full():
return None
if not errors.empty():
return None
return graph
|
import os
from os.path import exists, isfile, join
import sys
from langtests.c_test import runCTests
langFlags = {
"-c": "Run tests for the C language, requires additional argument for executable",
"-j": "Run tests for the Java language, requires additional argument for java file"
}
helpFlags = {
"-r": "Removes all test-suite files",
"-h": "Displays all flags and purposes"
}
def printDictionary(d: {}):
for key, value in d.items():
print("%s : %s" % (key, value))
def printValidFlags():
print("---VALID FLAGS---")
printDictionary(langFlags)
printDictionary(helpFlags)
print("-----------------")
def help(flag: str):
if flag == "-h":
printValidFlags()
elif flag == "-r":
os.system("rm -r inputs.in test-suite/ langtests/ *.test")
def test(flag: str, process: str):
failed = 0
# if flag is c, run c tests
if flag == "-c":
failed = runCTests(process)
if not failed:
print("---ALL TESTS PASSED---")
else:
print("---" + str(failed) + " TEST(S) HAVE FAILED---")
def main():
# check if there is more than one argument
length = len(sys.argv)
if length == 1:
print("suite.py <flag> <process>")
return
# check if flag is valid
flag = sys.argv[1]
if not (flag in helpFlags or flag in langFlags):
print("%s is not a valid flag" % (flag))
printValidFlags()
return
# check if there are too many arguments
if length > 3:
print("Too many arguments")
return
# check if the flag is the help flag
if length == 2 and flag in helpFlags:
help(flag)
return
elif flag in helpFlags and length > 2:
print("Help flags have no arguments.")
return
# check to make sure langFlag has enough arguments
if flag in langFlags and length != 3:
print("Not enough arguments for language flag.")
return
elif flag in langFlags and length == 3:
test(flag, sys.argv[2])
return
return
main() |
#!/usr/bin/env python3
import os
def parse_csv():
with open("SPEAKERS.TXT") as f:
for line in f.readlines()[1:]:
line = line.strip()
array = line.split("\t")
if len(array) == 8:
yield int(array[1].strip()), array[6].strip()
def move_files(id: int, gender: str):
try:
for f in os.listdir("clips/"):
if f.endswith(".mp3"):
# TODO: finich this method
os.system(f"ffmpeg -i clips/{id}/{item}/{f} {'male' if gender == 'male' else 'female'}/{f.replace('.mp3', '.wav')}")
print(id, gender)
except Exception:
pass
def main():
"""
This file has issues, but because I don't need the data right now, I will fix it later
"""
os.mkdir("./male")
os.mkdir("./female")
for id, gender in parse_csv():
move_files(id, gender)
if __name__ == '__main__':
main()
|
name = zhangsan
this is zhangsan
|
"""
Adapted from https://github.com/simbilod/optio
SMF specs from photonics.byu.edu/FiberOpticConnectors.parts/images/smf28.pdf
MFD:
- 10.4 for Cband
- 9.2 for Oband
"""
import hashlib
from typing import Any, Dict, Optional
import meep as mp
import numpy as np
from gdsfactory.serialization import clean_value_name
from gdsfactory.types import Floats
nm = 1e-3
nSi = 3.47
nSiO2 = 1.44
def fiber_ncore(fiber_numerical_aperture, fiber_nclad):
return (fiber_numerical_aperture**2 + fiber_nclad**2) ** 0.5
def get_simulation_grating_fiber(
period: float = 0.66,
fill_factor: float = 0.5,
n_periods: int = 30,
widths: Optional[Floats] = None,
gaps: Optional[Floats] = None,
slab_thickness: float = 150 * nm,
fiber_angle_deg: float = 20.0,
fiber_xposition: float = 1.0,
fiber_core_diameter: float = 10.4,
fiber_numerical_aperture: float = 0.14,
fiber_nclad: float = nSiO2,
nwg: float = nSi,
nslab: Optional[float] = None,
nclad: float = nSiO2,
nbox: float = nSiO2,
nsubstrate: float = nSi,
pml_thickness: float = 1.0,
substrate_thickness: float = 1.0,
box_thickness: float = 2.0,
wg_thickness: float = 220 * nm,
top_clad_thickness: float = 2.0,
air_gap_thickness: float = 1.0,
fiber_thickness: float = 2.0,
resolution: int = 64, # pixels/um
wavelength_start: float = 1.4,
wavelength_stop: float = 1.7,
wavelength_points: int = 150,
eps_averaging: bool = False,
fiber_port_y_offset_from_air: float = 1,
waveguide_port_x_offset_from_grating_start: float = 10,
fiber_port_x_size: Optional[float] = None,
xmargin: float = 10.0,
) -> Dict[str, Any]:
r"""Returns simulation results from grating coupler with fiber.
na**2 = ncore**2 - nclad**2
ncore = sqrt(na**2 + ncore**2)
Args:
period: fiber grating period
fill_factor: fraction of the grating period filled with the grating material.
n_periods: number of periods
widths: Optional list of widths. Overrides period, fill_factor, n_periods
gaps: Optional list of gaps. Overrides period, fill_factor, n_periods
fiber_angle_deg: fiber angle in degrees
fiber_xposition: xposition
fiber_core_diameter: fiber diameter
fiber_numerical_aperture: NA
fiber_nclad: fiber cladding index.
fiber_ncore: fiber core index
nwg: waveguide index.
nclad: top cladding index.
nbox: box index bottom.
nsubstrate: index substrate.
pml_thickness: pml_thickness (um)
substrate_thickness: substrate_thickness (um)
box_thickness: thickness for bottom cladding (um)
wg_thickness: wg_thickness (um)
top_clad_thickness: thickness of the top cladding.
air_gap_thickness: air gap thickness.
fiber_thickness: fiber_thickness
resolution: resolution pixels/um
wavelength_start: min wavelength (um)
wavelength_stop: max wavelength (um)
wavelength_points: wavelength points.
eps_averaging: epsilon averaging.
fiber_port_y_offset_from_air: y_offset from fiber to air (um).
waveguide_port_x_offset_from_grating_start:
fiber_port_x_size:
xmargin: margin from PML to grating end
.. code::
fiber_xposition
|
fiber_core_diameter
/ / / / |
/ / / / | fiber_thickness
/ / / / _ _ _| _ _ _ _ _ _ _
|
| air_gap_thickness
_ _ _| _ _ _ _ _ _ _
|
nclad | top_clad_thickness
_ _ _ _ _ _| _ _ _ _ _ _ _
nwg _| |_| |_| |__________| _
| |
nslab |wg_thickness | slab_thickness
______________ _ _ _|_ _ _ _ _ _ _ _|
|
nbox |box_thickness
______________ _ _ _|_ _ _ _ _ _ _ _
|
nsubstrate |substrate_thickness
______________ _ _ _|
|--------------------|<-------->
xmargin
"""
wavelengths = np.linspace(wavelength_start, wavelength_stop, wavelength_points)
wavelength = np.mean(wavelengths)
freqs = 1 / wavelengths
widths = widths or n_periods * [period * fill_factor]
gaps = gaps or n_periods * [period * (1 - fill_factor)]
nslab = nslab or nwg
settings = dict(
widths=widths,
gaps=gaps,
n_periods=n_periods,
nslab=nslab,
fiber_angle_deg=fiber_angle_deg,
fiber_xposition=fiber_xposition,
fiber_core_diameter=fiber_core_diameter,
fiber_numerical_aperture=fiber_numerical_aperture,
fiber_nclad=fiber_nclad,
nwg=nwg,
nclad=nclad,
nbox=nbox,
nsubstrate=nsubstrate,
pml_thickness=pml_thickness,
substrate_thickness=substrate_thickness,
box_thickness=box_thickness,
wg_thickness=wg_thickness,
top_clad_thickness=top_clad_thickness,
air_gap_thickness=air_gap_thickness,
fiber_thickness=fiber_thickness,
resolution=resolution,
wavelength_start=wavelength_start,
wavelength_stop=wavelength_stop,
wavelength_points=wavelength_points,
eps_averaging=eps_averaging,
fiber_port_y_offset_from_air=fiber_port_y_offset_from_air,
waveguide_port_x_offset_from_grating_start=waveguide_port_x_offset_from_grating_start,
fiber_port_x_size=fiber_port_x_size,
)
settings_string = clean_value_name(settings)
settings_hash = hashlib.md5(settings_string.encode()).hexdigest()[:8]
# Angle in radians
fiber_angle = np.radians(fiber_angle_deg)
# Z (Y)-domain
sz = (
+pml_thickness
+ substrate_thickness
+ box_thickness
+ wg_thickness
+ top_clad_thickness
+ air_gap_thickness
+ fiber_thickness
+ pml_thickness
)
# XY (X)-domain
# Assume fiber port dominates
fiber_port_y = (
-sz / 2
+ wg_thickness
+ top_clad_thickness
+ air_gap_thickness
+ fiber_port_y_offset_from_air
)
fiber_port_x_offset_from_angle = np.abs(fiber_port_y * np.tan(fiber_angle))
length_grating = np.sum(widths) + np.sum(gaps)
sxy = (
2 * xmargin
+ 2 * pml_thickness
+ 2 * fiber_port_x_offset_from_angle
+ length_grating
)
# Materials from indices
slab_material = mp.Medium(index=nslab)
wg_material = mp.Medium(index=nwg)
top_clad_material = mp.Medium(index=nclad)
bottom_clad_material = mp.Medium(index=nbox)
fiber_ncore = (fiber_numerical_aperture**2 + fiber_nclad**2) ** 0.5
fiber_clad_material = mp.Medium(index=fiber_nclad)
fiber_core_material = mp.Medium(index=fiber_ncore)
# Useful reference point
grating_start = (
-fiber_xposition
) # Since fiber dominates, keep it centered and offset the grating
# Initialize domain x-z plane simulation
cell_size = mp.Vector3(sxy, sz)
# Ports (position, sizes, directions)
fiber_port_y = -sz / 2 + (
+pml_thickness
+ substrate_thickness
+ box_thickness
+ wg_thickness
+ top_clad_thickness
+ air_gap_thickness
+ fiber_port_y_offset_from_air
)
fiber_port_center = mp.Vector3(fiber_port_x_offset_from_angle, fiber_port_y)
fiber_port_x_size = fiber_port_x_size or 3.5 * fiber_core_diameter
fiber_port_size = mp.Vector3(fiber_port_x_size, 0, 0)
# fiber_port_direction = mp.Vector3(y=-1).rotate(mp.Vector3(z=1), -1 * fiber_angle)
waveguide_port_y = -sz / 2 + (
+pml_thickness
+ substrate_thickness
+ box_thickness / 2
+ wg_thickness / 2
+ top_clad_thickness / 2
)
waveguide_port_x = grating_start - waveguide_port_x_offset_from_grating_start
waveguide_port_center = mp.Vector3(
waveguide_port_x, waveguide_port_y
) # grating_start - dtaper, 0)
waveguide_port_size = mp.Vector3(
0, box_thickness + wg_thickness / 2 + top_clad_thickness
)
waveguide_port_direction = mp.X
# Geometry
fiber_clad = 120
hfiber_geom = 200 # Some large number to make fiber extend into PML
geometry = [
mp.Block(
material=fiber_clad_material,
center=mp.Vector3(0, waveguide_port_y - wg_thickness / 2),
size=mp.Vector3(fiber_clad, hfiber_geom),
e1=mp.Vector3(x=1).rotate(mp.Vector3(z=1), -1 * fiber_angle),
e2=mp.Vector3(y=1).rotate(mp.Vector3(z=1), -1 * fiber_angle),
)
]
geometry.append(
mp.Block(
material=fiber_core_material,
center=mp.Vector3(x=0),
size=mp.Vector3(fiber_core_diameter, hfiber_geom),
e1=mp.Vector3(x=1).rotate(mp.Vector3(z=1), -1 * fiber_angle),
e2=mp.Vector3(y=1).rotate(mp.Vector3(z=1), -1 * fiber_angle),
)
)
# Air gap
geometry.append(
mp.Block(
material=mp.air,
center=mp.Vector3(
0,
-sz / 2
+ (
+pml_thickness
+ substrate_thickness
+ box_thickness
+ wg_thickness
+ top_clad_thickness
+ air_gap_thickness / 2
),
),
size=mp.Vector3(mp.inf, air_gap_thickness),
)
)
# Top cladding
geometry.append(
mp.Block(
material=top_clad_material,
center=mp.Vector3(
0,
-sz / 2
+ (
+pml_thickness
+ substrate_thickness
+ box_thickness
+ wg_thickness / 2
+ top_clad_thickness / 2
),
),
size=mp.Vector3(mp.inf, wg_thickness + top_clad_thickness),
)
)
# Bottom cladding
geometry.append(
mp.Block(
material=bottom_clad_material,
center=mp.Vector3(
0,
-sz / 2 + (+pml_thickness + substrate_thickness + box_thickness / 2),
),
size=mp.Vector3(mp.inf, box_thickness),
)
)
# slab
geometry.append(
mp.Block(
material=slab_material,
center=mp.Vector3(
0,
-sz / 2
+ (
+pml_thickness
+ substrate_thickness
+ box_thickness
+ slab_thickness / 2
),
),
size=mp.Vector3(mp.inf, slab_thickness),
)
)
etch_depth = wg_thickness - slab_thickness
x = grating_start
# grating teeth
for width, gap in zip(widths, gaps):
geometry.append(
mp.Block(
material=wg_material,
center=mp.Vector3(
x + gap / 2,
-sz / 2
+ (
+pml_thickness
+ substrate_thickness
+ box_thickness
+ wg_thickness
- etch_depth / 2
),
),
size=mp.Vector3(width, etch_depth),
)
)
x += width + gap
# waveguide
geometry.append(
mp.Block(
material=wg_material,
center=mp.Vector3(
-sxy / 2,
-sz / 2
+ (
+pml_thickness
+ substrate_thickness
+ box_thickness
+ wg_thickness
- etch_depth / 2
),
),
size=mp.Vector3(sxy, etch_depth),
)
)
# Substrate
geometry.append(
mp.Block(
material=mp.Medium(index=nsubstrate),
center=mp.Vector3(0, -sz / 2 + pml_thickness / 2 + substrate_thickness / 2),
size=mp.Vector3(mp.inf, pml_thickness + substrate_thickness),
)
)
# PMLs
boundary_layers = [mp.PML(pml_thickness)]
# mode frequency
fcen = 1 / wavelength
fwidth = 0.2 * fcen
# Waveguide source
sources_directions = [mp.X]
sources = [
mp.EigenModeSource(
src=mp.GaussianSource(frequency=fcen, fwidth=fwidth),
size=waveguide_port_size,
center=waveguide_port_center,
eig_band=1,
direction=sources_directions[0],
eig_match_freq=True,
eig_parity=mp.ODD_Z,
)
]
# Ports
waveguide_monitor_port = mp.ModeRegion(
center=waveguide_port_center + mp.Vector3(x=0.2), size=waveguide_port_size
)
fiber_monitor_port = mp.ModeRegion(
center=fiber_port_center - mp.Vector3(y=0.2), size=fiber_port_size
)
sim = mp.Simulation(
resolution=resolution,
cell_size=cell_size,
boundary_layers=boundary_layers,
geometry=geometry,
sources=sources,
dimensions=2,
eps_averaging=eps_averaging,
)
waveguide_monitor = sim.add_mode_monitor(
freqs, waveguide_monitor_port, yee_grid=True
)
fiber_monitor = sim.add_mode_monitor(freqs, fiber_monitor_port)
field_monitor_point = (0, 0, 0)
return dict(
sim=sim,
cell_size=cell_size,
freqs=freqs,
fcen=fcen,
waveguide_monitor=waveguide_monitor,
waveguide_port_direction=waveguide_port_direction,
fiber_monitor=fiber_monitor,
fiber_angle_deg=fiber_angle_deg,
sources=sources,
field_monitor_point=field_monitor_point,
initialized=False,
settings=settings,
settings_hash=settings_hash,
)
def get_port_1D_eigenmode(
sim_dict,
band_num: int = 1,
fiber_angle_deg: float = 15.0,
):
"""
Args:
sim_dict: simulation dict
band_num: band number to solve for
Returns:
Mode object compatible with /modes plugin
"""
# Initialize
sim = sim_dict["sim"]
source = sim_dict["sources"][0]
waveguide_monitor = sim_dict["waveguide_monitor"]
fiber_monitor = sim_dict["fiber_monitor"]
# Obtain source frequency
fsrc = source.src.frequency
# Obtain xsection
center_fiber = fiber_monitor.regions[0].center
size_fiber = fiber_monitor.regions[0].size
center_waveguide = waveguide_monitor.regions[0].center
size_waveguide = waveguide_monitor.regions[0].size
# Solve for the modes
if sim_dict["initialized"] is False:
sim.init_sim()
sim_dict["initialized"] = True
# Waveguide
eigenmode_waveguide = sim.get_eigenmode(
direction=mp.X,
where=mp.Volume(center=center_waveguide, size=size_waveguide),
band_num=band_num,
kpoint=mp.Vector3(
fsrc * 3.48, 0, 0
), # Hardcoded index for now, pull from simulation eventually
frequency=fsrc,
)
ys_waveguide = np.linspace(
center_waveguide.y - size_waveguide.y / 2,
center_waveguide.y + size_waveguide.y / 2,
int(sim.resolution * size_waveguide.y),
)
x_waveguide = center_waveguide.x
# Fiber
eigenmode_fiber = sim.get_eigenmode(
direction=mp.NO_DIRECTION,
where=mp.Volume(center=center_fiber, size=size_fiber),
band_num=band_num,
kpoint=mp.Vector3(0, fsrc * 1.45, 0).rotate(
mp.Vector3(z=1), -1 * np.radians(fiber_angle_deg)
), # Hardcoded index for now, pull from simulation eventually
frequency=fsrc,
)
xs_fiber = np.linspace(
center_fiber.x - size_fiber.x / 2,
center_fiber.x + size_fiber.x / 2,
int(sim.resolution * size_fiber.x),
)
y_fiber = center_fiber.y
return (
x_waveguide,
ys_waveguide,
eigenmode_waveguide,
xs_fiber,
y_fiber,
eigenmode_fiber,
)
def plot(sim, eps_parameters=None) -> None:
"""
sim: simulation object
"""
sim.plot2D(eps_parameters=eps_parameters)
# plt.colorbar()
if __name__ == "__main__":
import matplotlib.pyplot as plt
# Plotting
epsilons = [1, 1.43482, 1.44, 1.44427, 3.47]
eps_parameters = {"contour": True, "levels": np.unique(epsilons)}
fiber_na = float(np.sqrt(1.44427**2 - 1.43482**2))
sim_dict = get_simulation_grating_fiber(
# grating parameters
period=0.66,
fill_factor=0.5,
n_periods=30,
# fiber parameters,
fiber_angle_deg=20.0,
fiber_xposition=0.0,
fiber_core_diameter=9,
fiber_numerical_aperture=fiber_na,
fiber_nclad=nSiO2,
# material parameters
nwg=3.47,
nclad=1.44,
nbox=1.44,
nsubstrate=3.47,
# stack parameters
pml_thickness=1.0,
substrate_thickness=1.0,
box_thickness=2.0,
wg_thickness=220 * nm,
top_clad_thickness=2.0,
air_gap_thickness=1.0,
fiber_thickness=2.0,
# simulation parameters
resolution=50,
)
plot(sim_dict["sim"], eps_parameters=eps_parameters)
# plot(sim_dict["sim"])
plt.show()
|
from .mobile_robot_env import *
MAX_STEPS = 1500
class MobileRobot2TargetGymEnv(MobileRobotGymEnv):
"""
Gym wrapper for Mobile Robot environment with 2 targets
WARNING: to be compatible with kuka scripts, additional keyword arguments are discarded
:param urdf_root: (str) Path to pybullet urdf files
:param renders: (bool) Whether to display the GUI or not
:param is_discrete: (bool) Whether to use discrete or continuous actions
:param name: (str) name of the folder where recorded data will be stored
:param max_distance: (float) Max distance between end effector and the button (for negative reward)
:param shape_reward: (bool) Set to true, reward = -distance_to_goal
:param use_srl: (bool) Set to true, use srl_models
:param srl_model_path: (str) Path to the srl model
:param record_data: (bool) Set to true, record frames with the rewards.
:param use_ground_truth: (bool) Set to true, the observation will be the ground truth (arm position)
:param random_target: (bool) Set the target to a random position
:param state_dim: (int) When learning states
:param learn_states: (bool)
:param verbose: (bool) Whether to print some debug info
:param save_path: (str) location where the saved data should go
:param env_rank: (int) the number ID of the environment
:param pipe: (Queue, [Queue]) contains the input and output of the SRL model
:param fpv: (bool) enable first person view camera
:param srl_model: (str) The SRL_model used
"""
def __init__(self, name="mobile_robot_2target", **kwargs):
super(MobileRobot2TargetGymEnv, self).__init__(name=name, **kwargs)
self.current_target = 0
def reset(self):
self.current_target = 0
self.terminated = False
p.resetSimulation()
p.setPhysicsEngineParameter(numSolverIterations=150)
p.setTimeStep(self._timestep)
p.loadURDF(os.path.join(self._urdf_root, "plane.urdf"), [0, 0, 0])
p.setGravity(0, 0, -10)
# Init the robot randomly
x_start = self._max_x / 2 + self.np_random.uniform(- self._max_x / 3, self._max_x / 3)
y_start = self._max_y / 2 + self.np_random.uniform(- self._max_y / 3, self._max_y / 3)
self.robot_pos = np.array([x_start, y_start, 0])
# Initialize target position
self.button_uid = []
self.button_pos = []
x_pos = 0.9 * self._max_x
y_pos = self._max_y * 3 / 4
if self._random_target:
margin = 0.1 * self._max_x
x_pos = self.np_random.uniform(self._min_x + margin, self._max_x - margin)
y_pos = self.np_random.uniform(self._min_y + margin, self._max_y - margin)
self.button_uid.append(p.loadURDF("/urdf/cylinder.urdf", [x_pos, y_pos, 0], useFixedBase=True))
self.button_pos.append(np.array([x_pos, y_pos, 0]))
x_pos = 0.1 * self._max_x
y_pos = self._max_y * 3 / 4
if self._random_target:
margin = 0.1 * self._max_x
x_pos = self.np_random.uniform(self._min_x + margin, self._max_x - margin)
y_pos = self.np_random.uniform(self._min_y + margin, self._max_y - margin)
self.button_uid.append(p.loadURDF("/urdf/cylinder.urdf", [x_pos, y_pos, 0], useFixedBase=True))
self.button_pos.append(np.array([x_pos, y_pos, 0]))
# Change color to red for the second button
p.changeVisualShape(self.button_uid[-1], -1, rgbaColor=[0.8, 0, 0, 1])
# Add walls
# Path to the urdf file
wall_urdf = "/urdf/wall.urdf"
# RGBA (red, green, blue, alpha) colors
red, green, blue = [0.8, 0, 0, 1], [0, 0.8, 0, 1], [0, 0, 0.8, 1]
wall_left = p.loadURDF(wall_urdf, [self._max_x / 2, 0, 0], useFixedBase=True)
# Change color
p.changeVisualShape(wall_left, -1, rgbaColor=red)
# getQuaternionFromEuler -> define orientation
wall_bottom = p.loadURDF(wall_urdf, [self._max_x, self._max_y / 2, 0],
p.getQuaternionFromEuler([0, 0, np.pi / 2]), useFixedBase=True)
wall_right = p.loadURDF(wall_urdf, [self._max_x / 2, self._max_y, 0], useFixedBase=True)
p.changeVisualShape(wall_right, -1, rgbaColor=green)
wall_top = p.loadURDF(wall_urdf, [self._min_x, self._max_y / 2, 0],
p.getQuaternionFromEuler([0, 0, np.pi / 2]), useFixedBase=True)
p.changeVisualShape(wall_top, -1, rgbaColor=blue)
self.walls = [wall_left, wall_bottom, wall_right, wall_top]
# Add mobile robot
self.robot_uid = p.loadURDF(os.path.join(self._urdf_root, "racecar/racecar.urdf"), self.robot_pos,
useFixedBase=True)
self._env_step_counter = 0
for _ in range(50):
p.stepSimulation()
self._observation = self.getObservation()
if self.saver is not None:
self.saver.reset(self._observation, self.getTargetPos(), self.getGroundTruth())
if self.srl_model != "raw_pixels":
return self.getSRLState(self._observation)
return np.array(self._observation)
def getTargetPos(self):
# Return only the [x, y] coordinates
return self.button_pos[self.current_target][:2]
def step(self, action):
# True if it has bumped against a wall
self.has_bumped = False
if self._is_discrete:
dv = DELTA_POS
# Add noise to action
dv += self.np_random.normal(0.0, scale=NOISE_STD)
dx = [-dv, dv, 0, 0][action]
dy = [0, 0, -dv, dv][action]
real_action = np.array([dx, dy])
else:
raise ValueError("Only discrete actions is supported")
if self.verbose:
print(np.array2string(np.array(real_action), precision=2))
previous_pos = self.robot_pos.copy()
self.robot_pos[:2] += real_action
# Handle collisions
for i, (limit, robot_dim) in enumerate(zip([self._max_x, self._max_y], [ROBOT_LENGTH, ROBOT_WIDTH])):
margin = self.collision_margin + robot_dim / 2
# If it has bumped against a wall, stay at the previous position
if self.robot_pos[i] < margin or self.robot_pos[i] > limit - margin:
self.has_bumped = True
self.robot_pos = previous_pos
break
# Update mobile robot position
p.resetBasePositionAndOrientation(self.robot_uid, self.robot_pos, [0, 0, 0, 1])
p.stepSimulation()
self._env_step_counter += 1
self._observation = self.getObservation()
reward = self._reward()
done = self._termination()
if self.saver is not None:
self.saver.step(self._observation, action, reward, done, self.getGroundTruth())
if self.srl_model != "raw_pixels":
return self.getSRLState(self._observation), reward, done, {}
return np.array(self._observation), reward, done, {}
def _reward(self):
"""
:return: (float)
"""
# Distance to target
distance = np.linalg.norm(self.getTargetPos() - self.robot_pos[:2], 2)
reward = 0
if distance <= REWARD_DIST_THRESHOLD:
reward = 1
if self.current_target < len(self.button_pos) - 1:
self.current_target += 1
# Negative reward when it bumps into a wall
if self.has_bumped:
reward = -1
if self._shape_reward:
return -distance
return reward
|
# coding: utf-8
# #### Importing libraries
# In[6]:
from skimage.io import imread
from skimage.color import rgb2gray
import numpy as np
from matplotlib import pyplot as plt
# #### 1. Test the MATLAB image functions to read, display, and write images. Use buckeyes_gray.bmp and buckeyes_rgb.bmp from the class webpage
# In[5]:
grayIm = imread('buckeyes_gray.bmp')
plt.imsave('output/buckeyes_gray.jpg', grayIm, cmap= plt.get_cmap('gray'))
plt.imshow(grayIm, aspect='auto', cmap = plt.get_cmap('gray'))
# In[4]:
rgbIm = imread('buckeyes_rgb.bmp')
plt.imsave('output/buckeyes_rgb.jpg', rgbIm)
plt.imshow(rgbIm, aspect='auto')
# #### Q. Read and convert the rgb image to grayscale using the NTSC conversion formula via the MATLAB function rgb2gray. Display your image to verify the result
#
# The NTSC Conversion formula is given by $$ intensity = 0.2989*red + 0.5870*green + 0.1140*blue $$
#
# These values have be derived experimentally to match the human cognitive biases regarding colours.
# In[10]:
grayIm_converted = rgb2gray(rgbIm)
plt.imsave('output/buckeyes_gray_converted.bmp', grayIm_converted, cmap = plt.get_cmap('gray'))
plt.imshow(grayIm_converted, aspect='auto', cmap = plt.get_cmap('gray'))
# #### Q. Test more fully by creating, writing, and reading a checkerboard image
# In[22]:
zBlock = np.zeros((10,10))
oBlock = np.ones((10,10))*255
pattern = np.block([[zBlock,oBlock], [oBlock,zBlock]])
checkerIm = np.tile(pattern, (5,5))
plt.imsave('output/checker.bmp', checkerIm, cmap = plt.get_cmap('gray'))
plt.imshow(checkerIm, aspect='auto', cmap = plt.get_cmap('gray'))
|
#
# _/_/_/ _/_/_/ _/_/_/ _/_/_/ _/ _/ _/_/_/
# _/ _/ _/ _/ _/ _/ _/ _/ _/
# _/_/_/ _/_/_/ _/ _/ _/_/ _/_/
# _/ _/ _/ _/ _/ _/ _/ _/ _/
# _/_/_/ _/ _/ _/_/_/ _/_/_/ _/ _/ _/_/_/
#
# By Vlad Ivanov, 2018.
# By Nikita Ivanov, 2018
#
# Email: vlad94568@gmail.com
from src.common import *
# Fired rocket.
class Rocket(SceneElement):
def __init__(self, x, y):
SceneElement.__init__(self, x, y)
self.color = mk_random_color()
self.draw_count = 0
# Draws the rocket.
def draw(self, screen):
pygame.draw.rect(screen, self.color, (self.x, self.y, 5, 8), 1)
self.draw_count = self.draw_count + 1
if self.draw_count % 3 == 0:
self.color = mk_random_color()
|
"""Security configuration for our Pyramid application."""
import os
from pyramid.authentication import AuthTktAuthenticationPolicy
from pyramid.authorization import ACLAuthorizationPolicy
from pyramid.security import Everyone, Authenticated, Allow
from pyramid.session import SignedCookieSessionFactory
from passlib.apps import custom_app_context as context
class MyRoot(object):
def __init__(self, request):
self.request = request
__acl__ = [
(Allow, Authenticated, 'secret')
]
def check_credentials(username, password):
"""Checks credentials of a new user.
Return True if it checks out; otherwise return False."""
stored_username = os.environ.get('AUTH_USERNAME', '')
stored_password = os.environ.get('AUTH_PASSWORD', '')
is_authenticated = False
if stored_username and stored_password:
if username == stored_username:
if context.verify(password, stored_password):
is_authenticated = True
return is_authenticated
def includeme(config):
"""Configuration for security."""
auth_secret = os.environ.get('AUTH_SECRET', '')
authn_policy = AuthTktAuthenticationPolicy(
secret=auth_secret,
hashalg='sha512'
)
config.set_authentication_policy(authn_policy)
authz_policy = ACLAuthorizationPolicy()
config.set_authorization_policy(authz_policy)
config.set_root_factory(MyRoot)
session_secret = os.environ.get('SESSION_SECRET')
session_factory = SignedCookieSessionFactory(session_secret)
config.set_session_factory(session_factory)
config.set_default_csrf_options(require_csrf=True)
|
# Python - 3.6.0
Test.it('Basic tests')
Test.assert_equals(solve('1 2 36 4 8', 2), 16)
Test.assert_equals(solve('1 2 36 4 8', 3), 8)
Test.assert_equals(solve('1 2 36 4 8', 4), 11)
Test.assert_equals(solve('1 2 36 4 8', 8), 4)
|
# pylint: disable=attribute-defined-outside-init
import unittest
from tests import vcr
from corkus import Corkus
from corkus.objects import PartialIngredient, ProfessionType, LogicSymbol, IdentificationType
from corkus.errors import BadRequest
class TestIngredient(unittest.IsolatedAsyncioTestCase):
async def asyncSetUp(self):
self.corkus = Corkus()
@vcr.use_cassette
async def test_ingredient_get(self):
glow_bulb = await self.corkus.ingredient.get("Glow Bulb Seeds")
self.assertEqual(glow_bulb.name, "Glow Bulb Seeds")
self.assertEqual(glow_bulb.tier, 3)
self.assertEqual(glow_bulb.required_level, 105)
self.assertGreater(glow_bulb.sprite.id, 0)
self.assertIn(ProfessionType.TAILORING, glow_bulb.required_professions)
self.assertNotEqual(glow_bulb.item_modifiers.durability, 0)
self.assertGreater(glow_bulb.item_modifiers.skill_points.defence, 0)
self.assertTrue(any(i.type == IdentificationType.HEALTH_REGEN_RAW and i.values.min == 130 and i.values.max == 145 and i.value is None for i in glow_bulb.identifications))
major = await self.corkus.ingredient.get("Major's Badge")
self.assertNotEqual(major.position_modifiers.above, 0)
self.assertNotEqual(major.position_modifiers.left, 0)
self.assertNotEqual(major.position_modifiers.right, 0)
self.assertNotEqual(major.position_modifiers.under, 0)
self.assertNotEqual(major.position_modifiers.touching, 0)
self.assertNotEqual(major.position_modifiers.not_touching, 0)
self.assertNotEqual(major.consumable_modifiers.duration, 0)
breath = await self.corkus.ingredient.get("Draconic Bone Marrow")
self.assertGreater(breath.consumable_modifiers.charges, 0)
horizon = await self.corkus.ingredient.get("Vortexian Event Horizon")
self.assertGreater(horizon.sprite.damage, 0)
self.assertNotEqual(horizon.item_modifiers.durability, 0)
self.assertGreater(horizon.item_modifiers.skill_points.strength, 0)
self.assertGreater(horizon.item_modifiers.skill_points.dexterity, 0)
self.assertGreater(horizon.item_modifiers.skill_points.intelligence, 0)
self.assertGreater(horizon.item_modifiers.skill_points.defence, 0)
self.assertGreater(horizon.item_modifiers.skill_points.agility, 0)
@vcr.use_cassette
async def test_ingredient_invalid(self):
with self.assertRaises(BadRequest):
await self.corkus.ingredient.get('an invalid ingredient')
@vcr.use_cassette
async def test_ingredient_all(self):
all_ingredients = await self.corkus.ingredient.list_all()
self.assertTrue(any(i.name == "Gaze of Darkness" for i in all_ingredients))
@vcr.use_cassette
async def test_ingredient_search_name(self):
result = await self.corkus.ingredient.search_by_name("Glow Bulb Seeds")
self.assertEqual(len(result), 1)
self.assertTrue(result[0].name == "Glow Bulb Seeds")
@vcr.use_cassette
async def test_ingredient_search_tier(self):
result = await self.corkus.ingredient.search_by_tier(2)
self.assertGreater(len(result), 0)
self.assertTrue(all(i.tier == 2 for i in result))
@vcr.use_cassette
async def test_ingredient_search_level(self):
result = await self.corkus.ingredient.search_by_level(76)
self.assertGreater(len(result), 0)
self.assertTrue(all(i.required_level == 76 for i in result))
@vcr.use_cassette
async def test_ingredient_search_professions_and(self):
result = await self.corkus.ingredient.search_by_professions(
LogicSymbol.AND,
[ProfessionType.WOODWORKING, ProfessionType.ALCHEMISM]
)
self.assertGreater(len(result), 0)
for i in result:
self.assertIn(ProfessionType.WOODWORKING, i.required_professions)
self.assertIn(ProfessionType.ALCHEMISM, i.required_professions)
@vcr.use_cassette
async def test_ingredient_search_professions_or(self):
result = await self.corkus.ingredient.search_by_professions(
LogicSymbol.OR,
[ProfessionType.WOODWORKING, ProfessionType.ALCHEMISM]
)
self.assertGreater(len(result), 0)
for i in result:
self.assertTrue(
ProfessionType.WOODWORKING in i.required_professions or
ProfessionType.ALCHEMISM in i.required_professions
)
@vcr.use_cassette
async def test_ingredient_search_sprite(self):
result = await self.corkus.ingredient.search_by_sprite(
LogicSymbol.AND,
id = 449
)
self.assertGreater(len(result), 0)
self.assertTrue(all(i.sprite.id == 449 for i in result))
@vcr.use_cassette
async def test_ingredient_search_identifications(self):
result = await self.corkus.ingredient.search_by_identifications(
LogicSymbol.AND,
[
(IdentificationType.XP_BONUS, 4, 6),
(IdentificationType.LOOT_BONUS, None, None),
]
)
self.assertGreater(len(result), 0)
for ing in result:
self.assertTrue(any(id.type == IdentificationType.XP_BONUS for id in ing.identifications))
@vcr.use_cassette
async def test_ingredient_search_item_modifiers(self):
result = await self.corkus.ingredient.search_by_item_modifiers(
LogicSymbol.AND,
durability = -28
)
self.assertGreater(len(result), 0)
self.assertTrue(all(i.item_modifiers.durability == -28 for i in result))
@vcr.use_cassette
async def test_ingredient_search_consumabl_modifiers(self):
result = await self.corkus.ingredient.search_by_consumable_modifiers(
LogicSymbol.AND,
duration = 60
)
self.assertGreater(len(result), 0)
self.assertTrue(all(i.consumable_modifiers.duration == 60 for i in result))
@vcr.use_cassette
async def test_ingredient_partial(self):
ingredient = await PartialIngredient(self.corkus, "Gaze of Darkness").fetch()
self.assertTrue(ingredient.name == "Gaze of Darkness")
async def asyncTearDown(self):
await self.corkus.close()
|
from flask import render_template
from . import auth
@auth.app_errorhandler(404)
def fourOfour(error):
'''
view function that renders 404 error page when there is a 404 error in the app
'''
return render_template('fourofour.html'),404
|
import os
import os.path
import yaml
import terminal
from manifests import io
from manifests.deploymanifest import DeployManifest, DeployManifestArtifact, DeployManifestProcess
from manifests.errors import ReadManifestError
DEFAULT_MANIFEST_FILENAME = "deploy.yml"
def make_manifest_path(root_dir: str=None, filename: str=None, terminal_mode:bool=False):
if root_dir == None:
root_dir = os.getcwd()
if filename is None:
filename = DEFAULT_MANIFEST_FILENAME
manifest_path = os.path.realpath(os.path.join(root_dir, filename))
return manifest_path
def read(filename: str=None, terminal_mode:bool=False):
if filename is None:
manifest_path = make_manifest_path(terminal_mode=terminal_mode)
else:
manifest_path = os.path.realpath(filename)
manifest_filename = os.path.basename(manifest_path)
manifest_dirname = os.path.dirname(manifest_path)
if os.path.exists(manifest_path) == False:
if terminal_mode:
terminal.err(term.ExitCodes.GeneralError, f"{manifest_filename} doesn't exist in directory {manifest_dirname}.")
else:
raise ReadManifestError(f"Unable to read {manifest_filename}, file doesn't exist.")
if os.path.isfile(manifest_path) == False:
if terminal_mode:
terminal.err(term.ExitCodes.GeneralError, f"{manifest_path} is not a file.")
else:
raise ReadManifestError(f"{manifest_filename} is not a file.")
with io.read_file(manifest_path) as md:
manifest_data = md
return manifest_data
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.23 on 2020-02-05 11:03
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('questionnaires_manager', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='questionnaireanswers',
old_name='author',
new_name='reviewer',
),
]
|
import logging
import numpy as np
import pytest
from sklearndf.classification import RandomForestClassifierDF
from sklearndf.pipeline import ClassifierPipelineDF, RegressorPipelineDF
from sklearndf.regression import RandomForestRegressorDF
from ..conftest import check_ranking
from facet.data import Sample
from facet.selection import LearnerGrid, LearnerRanker
from facet.validation import StratifiedBootstrapCV
log = logging.getLogger(__name__)
def test_prediction_classifier(
iris_sample: Sample, cv_stratified_bootstrap: StratifiedBootstrapCV, n_jobs: int
) -> None:
expected_learner_scores = [0.889, 0.886, 0.885, 0.879]
# define parameters and crossfit
grids = LearnerGrid(
pipeline=ClassifierPipelineDF(
classifier=RandomForestClassifierDF(random_state=42)
),
learner_parameters={"min_samples_leaf": [16, 32], "n_estimators": [50, 80]},
)
# define an illegal grid list, mixing classification with regression
grids_illegal = [
grids,
LearnerGrid(
pipeline=RegressorPipelineDF(
regressor=RandomForestRegressorDF(random_state=42)
),
learner_parameters={"min_samples_leaf": [16, 32], "n_estimators": [50, 80]},
),
]
with pytest.raises(
ValueError, match="^arg grids mixes regressor and classifier pipelines$"
):
LearnerRanker(
grids=grids_illegal,
cv=cv_stratified_bootstrap,
)
model_ranker: LearnerRanker[
ClassifierPipelineDF[RandomForestClassifierDF]
] = LearnerRanker(
grids=grids,
cv=cv_stratified_bootstrap,
scoring="f1_macro",
n_jobs=n_jobs,
random_state=42,
)
model_ranker.fit(sample=iris_sample)
with pytest.raises(
ValueError, match="do not use arg sample_weight to pass sample weights"
):
model_ranker.fit(sample=iris_sample, sample_weight=iris_sample.weight)
log.debug(f"\n{model_ranker.summary_report()}")
check_ranking(
ranking=model_ranker.ranking_,
expected_scores=expected_learner_scores,
expected_learners=[RandomForestClassifierDF] * 4,
expected_parameters={
2: dict(classifier__min_samples_leaf=32, classifier__n_estimators=50),
3: dict(classifier__min_samples_leaf=32, classifier__n_estimators=80),
},
)
# consider: model_with_type(...) function for ModelRanking
crossfit = model_ranker.best_model_crossfit_
assert crossfit.is_fitted
accuracy_scores_per_split: np.ndarray = crossfit.score(scoring="accuracy")
assert (
(accuracy_scores_per_split > 0.9) & (accuracy_scores_per_split <= 1.0)
).all()
|
import libjevois as jevois
import cv2
import numpy as np
## Simple example of image processing using OpenCV in Python on JeVois
#
# This module is a basic FRC vision process.
#
# By default, it first gets an image, blurs it, extracts the green channel, thresholds that, and uses the threshold to place
# a mask over the initial image. It then runs an HSV filter on the masked image, erodes and dilates the result, and finally
# finds and filters the contours.
# You can find the constants for all of these using GRIP. Tune the program constants and generate Python code from GRIP. Then,
# paste those constants into the Constructor below. Custom code can also be inserted after all the GRIP process code.
#
# See http://jevois.org/tutorials for tutorials on getting started with programming JeVois in Python without having
# to install any development software on your host computer.
#
# @author Anand Rajamani
#
# @videomapping YUYV 320 240 59.9 YUYV 320 240 59.9 JeVois PythonSandbox
# @email anand.rajamani@scu.edu
# @mainurl http://jevois.org
# @supporturl http://jevois.org/doc
# @otherurl http://iLab.usc.edu
# @license GPL v3
# @distribution Unrestricted
# @restrictions None
# @ingroup modules
class FindDistance:
# ###################################################################################################
## Constructor
def __init__(self):
# Instantiate a JeVois Timer to measure our processing framerate:
self.timer = jevois.Timer("sandbox", 100, jevois.LOG_INFO)
# SPECIAL REPLACED BLUR CONSTANT
self.__blur_type = 0
# ###################################################################################################
# ALL CONSTANTS GO UNDER HERE (make sure to remove the self.__blur_type line)
self.__blur_radius = 6.909739928607854
self.blur_output = None
self.__cv_extractchannel_src = self.blur_output
self.__cv_extractchannel_channel = 1.0
self.cv_extractchannel_output = None
self.__cv_threshold_src = self.cv_extractchannel_output
self.__cv_threshold_thresh = 30.0
self.__cv_threshold_maxval = 255.0
self.__cv_threshold_type = cv2.THRESH_BINARY
self.cv_threshold_output = None
self.__mask_input = self.blur_output
self.__mask_mask = self.cv_threshold_output
self.mask_output = None
self.__normalize_input = self.mask_output
self.__normalize_type = cv2.NORM_MINMAX
self.__normalize_alpha = 0.0
self.__normalize_beta = 255.0
self.normalize_output = None
self.__hsv_threshold_input = self.normalize_output
self.__hsv_threshold_hue = [46.02792342397267, 120.58148236024165]
self.__hsv_threshold_saturation = [157.86767273600026, 255.0]
self.__hsv_threshold_value = [43.786072836645936, 255.0]
self.hsv_threshold_output = None
self.__cv_erode_src = self.hsv_threshold_output
self.__cv_erode_kernel = None
self.__cv_erode_anchor = (-1, -1)
self.__cv_erode_iterations = 3.0
self.__cv_erode_bordertype = cv2.BORDER_CONSTANT
self.__cv_erode_bordervalue = (-1)
self.cv_erode_output = None
self.__cv_dilate_src = self.cv_erode_output
self.__cv_dilate_kernel = None
self.__cv_dilate_anchor = (-1, -1)
self.__cv_dilate_iterations = 1.0
self.__cv_dilate_bordertype = cv2.BORDER_CONSTANT
self.__cv_dilate_bordervalue = (-1)
self.cv_dilate_output = None
self.__find_contours_input = self.cv_dilate_output
self.__find_contours_external_only = True
self.find_contours_output = None
self.__filter_contours_contours = self.find_contours_output
self.__filter_contours_min_area = 400.0
self.__filter_contours_min_perimeter = 100.0
self.__filter_contours_min_width = 0.0
self.__filter_contours_max_width = 1000.0
self.__filter_contours_min_height = 0.0
self.__filter_contours_max_height = 1000.0
self.__filter_contours_solidity = [75.32956685499059, 100]
self.__filter_contours_max_vertices = 1000.0
self.__filter_contours_min_vertices = 0.0
self.__filter_contours_min_ratio = 0.2
self.__filter_contours_max_ratio = 1.0
self.filter_contours_output = None
# END CONSTANTS
# ###################################################################################################
## Process function with USB output
def process(self, inframe, outframe):
# Get the next camera image (may block until it is captured) and here convert it to OpenCV BGR by default. If
# you need a grayscale image instead, just use getCvGRAY() instead of getCvBGR(). Also supported are getCvRGB()
# and getCvRGBA():
source0 = inimg = inframe.getCvBGR()
outimg = inimg = inframe.getCvBGR()
# Start measuring image processing time (NOTE: does not account for input conversion time):
self.timer.start()
#################################################################################################
# BEGIN GRIP CODE
#################################################################################################
"""
Runs the pipeline and sets all outputs to new values.
"""
# Step Blur0:
self.__blur_input = source0
(self.blur_output) = self.__blur(self.__blur_input, self.__blur_type, self.__blur_radius)
# Step CV_extractChannel0:
self.__cv_extractchannel_src = self.blur_output
(self.cv_extractchannel_output) = self.__cv_extractchannel(self.__cv_extractchannel_src, self.__cv_extractchannel_channel)
# Step CV_Threshold0:
self.__cv_threshold_src = self.cv_extractchannel_output
(self.cv_threshold_output) = self.__cv_threshold(self.__cv_threshold_src, self.__cv_threshold_thresh, self.__cv_threshold_maxval, self.__cv_threshold_type)
# Step Mask0:
self.__mask_input = self.blur_output
self.__mask_mask = self.cv_threshold_output
(self.mask_output) = self.__mask(self.__mask_input, self.__mask_mask)
# Step Normalize0:
self.__normalize_input = self.mask_output
(self.normalize_output) = self.__normalize(self.__normalize_input, self.__normalize_type, self.__normalize_alpha, self.__normalize_beta)
# Step HSV_Threshold0:
self.__hsv_threshold_input = self.normalize_output
(self.hsv_threshold_output) = self.__hsv_threshold(self.__hsv_threshold_input, self.__hsv_threshold_hue, self.__hsv_threshold_saturation, self.__hsv_threshold_value)
# Step CV_erode0:
self.__cv_erode_src = self.hsv_threshold_output
(self.cv_erode_output) = self.__cv_erode(self.__cv_erode_src, self.__cv_erode_kernel, self.__cv_erode_anchor, self.__cv_erode_iterations, self.__cv_erode_bordertype, self.__cv_erode_bordervalue)
# Step CV_dilate0:
self.__cv_dilate_src = self.cv_erode_output
(self.cv_dilate_output) = self.__cv_dilate(self.__cv_dilate_src, self.__cv_dilate_kernel, self.__cv_dilate_anchor, self.__cv_dilate_iterations, self.__cv_dilate_bordertype, self.__cv_dilate_bordervalue)
# Step Find_Contours0:
self.__find_contours_input = self.cv_dilate_output
(self.find_contours_output) = self.__find_contours(self.__find_contours_input, self.__find_contours_external_only)
# Step Filter_Contours0:
self.__filter_contours_contours = self.find_contours_output
(self.filter_contours_output) = self.__filter_contours(self.__filter_contours_contours, self.__filter_contours_min_area, self.__filter_contours_min_perimeter, self.__filter_contours_min_width, self.__filter_contours_max_width, self.__filter_contours_min_height, self.__filter_contours_max_height, self.__filter_contours_solidity, self.__filter_contours_max_vertices, self.__filter_contours_min_vertices, self.__filter_contours_min_ratio, self.__filter_contours_max_ratio)
#################################################################################################
# END GRIP CODE
##################################################################################################
# DEFAULT CUSTOM CODE
def getArea(con): # Gets the area of the contour
return cv2.contourArea(con)
def getYcoord(con): # Gets the Y coordinate of the contour
M = cv2.moments(con)
cy = int(M['m01']/M['m00'])
return cy
def getXcoord(con): # Gets the X coordinate of the contour
M = cv2.moments(con)
cy = int(M['m10']/M['m00'])
return cy
def sortByArea(conts) : # Returns an array sorted by area from smallest to largest
contourNum = len(conts) # Gets number of contours
sortedBy = sorted(conts, key=getArea) # sortedBy now has all the contours sorted by area
return sortedBy
##################################################################################################
# PUT YOUR CUSTOM CODE HERE
##################################################################################################
# Draws all contours on original image in red
cv2.drawContours(outimg, self.filter_contours_output, -1, (0, 0, 255), 1)
# Gets number of contours
contourNum = len(self.filter_contours_output)
# Sorts contours by the smallest area first
newContours = sortByArea(self.filter_contours_output)
# Send the contour data over Serial
for i in range (contourNum):
cnt = newContours[i]
x,y,w,h = cv2.boundingRect(cnt) # Get the stats of the contour including width and height
# which contour, 0 is first
if (i > 0):
cnt2 = newContours[i-1]
toSend = ("CON" + str(i) +
" area" + str(getArea(cnt)) + # Area of contour
" x" + str(round((getXcoord(cnt)*1000/320)-500, 2)) + # x-coordinate of contour, -500 to 500 rounded to 2 decimal
" y" + str(round(375-getYcoord(cnt)*750/240, 2)) + # y-coordinate of contour, -375 to 375 rounded to 2 decimal
" h" + str(round(h*750/240, 2)) + # Height of contour, 0-750 rounded to 2 decimal
" w" + str(round(w*1000/320, 2)) + # Width of contour, 0-1000 rounded to 2 decimal
" Distance:" + str( round(( (11.5*596.32)/abs((getXcoord(cnt) - getXcoord(cnt2))) ), 2)))
else:
toSend = ("CON" + str(i) +
" area" + str(getArea(cnt)) + # Area of contour
" x" + str(round((getXcoord(cnt)*1000/320)-500, 2)) + # x-coordinate of contour, -500 to 500 rounded to 2 decimal
" y" + str(round(375-getYcoord(cnt)*750/240, 2)) + # y-coordinate of contour, -375 to 375 rounded to 2 decimal
" h" + str(round(h*750/240, 2)) + # Height of contour, 0-750 rounded to 2 decimal
" w" + str(round(w*1000/320, 2))) # Width of contour, 0-1000 rounded to 2 decimal
jevois.sendSerial(toSend)
# Write a title:
cv2.putText(outimg, "JeVois Code", (3, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,255), 1, cv2.LINE_AA)
# Write frames/s info from our timer into the edge map (NOTE: does not account for output conversion time):
fps = self.timer.stop()
#height, width, channels = outimg.shape # if outimg is grayscale, change to: height, width = outimg.shape
height, width, channels = outimg.shape
cv2.putText(outimg, fps, (3, height - 6), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,255), 1, cv2.LINE_AA)
# Convert our BGR output image to video output format and send to host over USB. If your output image is not
# BGR, you can use sendCvGRAY(), sendCvRGB(), or sendCvRGBA() as appropriate:
outframe.sendCvBGR(outimg)
# outframe.sendCvGRAY(outimg)
##################################################################################################
# END CUSTOM CODE
###################################################################################################
# FUNCTIONS GO HERE (Anything that starts with "@staticmethod")
@staticmethod
def __blur(src, type, radius):
"""Softens an image using one of several filters.
Args:
src: The source mat (numpy.ndarray).
type: The blurType to perform represented as an int.
radius: The radius for the blur as a float.
Returns:
A numpy.ndarray that has been blurred.
"""
ksize = int(2 * round(radius) + 1)
return cv2.blur(src, (ksize, ksize))
#return cv2.medianBlur(src, (ksize, ksize)) # Perform a Median Blur
#return cv2.GaussianBlur(src,(ksize, ksize),0) # Perform a Gaussian Blur
@staticmethod
def __cv_extractchannel(src, channel):
"""Extracts given channel from an image.
Args:
src: A numpy.ndarray.
channel: Zero indexed channel number to extract.
Returns:
The result as a numpy.ndarray.
"""
return cv2.extractChannel(src, (int) (channel + 0.5))
@staticmethod
def __cv_threshold(src, thresh, max_val, type):
"""Apply a fixed-level threshold to each array element in an image
Args:
src: A numpy.ndarray.
thresh: Threshold value.
max_val: Maximum value for THRES_BINARY and THRES_BINARY_INV.
type: Opencv enum.
Returns:
A black and white numpy.ndarray.
"""
return cv2.threshold(src, thresh, max_val, type)[1]
@staticmethod
def __mask(input, mask):
"""Filter out an area of an image using a binary mask.
Args:
input: A three channel numpy.ndarray.
mask: A black and white numpy.ndarray.
Returns:
A three channel numpy.ndarray.
"""
return cv2.bitwise_and(input, input, mask=mask)
@staticmethod
def __normalize(input, type, a, b):
"""Normalizes or remaps the values of pixels in an image.
Args:
input: A numpy.ndarray.
type: Opencv enum.
a: The minimum value.
b: The maximum value.
Returns:
A numpy.ndarray of the same type as the input.
"""
return cv2.normalize(input, None, a, b, type)
@staticmethod
def __hsv_threshold(input, hue, sat, val):
"""Segment an image based on hue, saturation, and value ranges.
Args:
input: A BGR numpy.ndarray.
hue: A list of two numbers the are the min and max hue.
sat: A list of two numbers the are the min and max saturation.
lum: A list of two numbers the are the min and max value.
Returns:
A black and white numpy.ndarray.
"""
out = cv2.cvtColor(input, cv2.COLOR_BGR2HSV)
return cv2.inRange(out, (hue[0], sat[0], val[0]), (hue[1], sat[1], val[1]))
@staticmethod
def __cv_erode(src, kernel, anchor, iterations, border_type, border_value):
"""Expands area of lower value in an image.
Args:
src: A numpy.ndarray.
kernel: The kernel for erosion. A numpy.ndarray.
iterations: the number of times to erode.
border_type: Opencv enum that represents a border type.
border_value: value to be used for a constant border.
Returns:
A numpy.ndarray after erosion.
"""
return cv2.erode(src, kernel, anchor, iterations = (int) (iterations +0.5),
borderType = border_type, borderValue = border_value)
@staticmethod
def __cv_dilate(src, kernel, anchor, iterations, border_type, border_value):
"""Expands area of higher value in an image.
Args:
src: A numpy.ndarray.
kernel: The kernel for dilation. A numpy.ndarray.
iterations: the number of times to dilate.
border_type: Opencv enum that represents a border type.
border_value: value to be used for a constant border.
Returns:
A numpy.ndarray after dilation.
"""
return cv2.dilate(src, kernel, anchor, iterations = (int) (iterations +0.5),
borderType = border_type, borderValue = border_value)
@staticmethod
def __find_contours(input, external_only):
"""Sets the values of pixels in a binary image to their distance to the nearest black pixel.
Args:
input: A numpy.ndarray.
external_only: A boolean. If true only external contours are found.
Return:
A list of numpy.ndarray where each one represents a contour.
"""
if(external_only):
mode = cv2.RETR_EXTERNAL
else:
mode = cv2.RETR_LIST
method = cv2.CHAIN_APPROX_SIMPLE
im2, contours, hierarchy =cv2.findContours(input, mode=mode, method=method)
return contours
@staticmethod
def __filter_contours(input_contours, min_area, min_perimeter, min_width, max_width,
min_height, max_height, solidity, max_vertex_count, min_vertex_count,
min_ratio, max_ratio):
"""Filters out contours that do not meet certain criteria.
Args:
input_contours: Contours as a list of numpy.ndarray.
min_area: The minimum area of a contour that will be kept.
min_perimeter: The minimum perimeter of a contour that will be kept.
min_width: Minimum width of a contour.
max_width: MaxWidth maximum width.
min_height: Minimum height.
max_height: Maximimum height.
solidity: The minimum and maximum solidity of a contour.
min_vertex_count: Minimum vertex Count of the contours.
max_vertex_count: Maximum vertex Count.
min_ratio: Minimum ratio of width to height.
max_ratio: Maximum ratio of width to height.
Returns:
Contours as a list of numpy.ndarray.
"""
output = []
for contour in input_contours:
x,y,w,h = cv2.boundingRect(contour)
if (w < min_width or w > max_width):
continue
if (h < min_height or h > max_height):
continue
area = cv2.contourArea(contour)
if (area < min_area):
continue
if (cv2.arcLength(contour, True) < min_perimeter):
continue
hull = cv2.convexHull(contour)
solid = 100 * area / cv2.contourArea(hull)
if (solid < solidity[0] or solid > solidity[1]):
continue
if (len(contour) < min_vertex_count or len(contour) > max_vertex_count):
continue
ratio = (float)(w) / h
if (ratio < min_ratio or ratio > max_ratio):
continue
output.append(contour)
return output
#BlurType = Enum('BlurType', 'Box_Blur Gaussian_Blur Median_Filter Bilateral_Filter')
|
#!/usr/bin/python
#coding:utf-8
import os
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
# 从 Info.plist 中读取 QMUIKit 的版本号,将其定义为一个 static const 常量以便代码里获取
infoFilePath = str(os.getenv('SRCROOT')) + '/WXiOSCommonUtils/Info.plist'
infoTree = ET.parse(infoFilePath)
infoDictList = list(infoTree.find('dict'))
versionString = ''
for index in range(len(infoDictList)):
element = infoDictList[index]
if element.text == 'CFBundleShortVersionString':
versionString = infoDictList[index + 1].text
break
if versionString.startswith('$'):
versionEnvName = versionString[2:-1]
versionString = os.getenv(versionEnvName)
print 'umbrella creator: bundle versions string is %s, env name is %s' % (versionString, versionEnvName)
# 读取头文件准备生成 umbrella file
publicHeaderFilePath = str(os.getenv('BUILT_PRODUCTS_DIR')) + '/' + os.getenv('PUBLIC_HEADERS_FOLDER_PATH')
print 'umbrella creator: publicHeaderFilePath = ' + publicHeaderFilePath
umbrellaHeaderFileName = 'QMUIKit.h'
umbrellaHeaderFilePath = str(os.getenv('SRCROOT')) + '/QMUIKit/' + umbrellaHeaderFileName
print 'umbrella creator: umbrellaHeaderFilePath = ' + umbrellaHeaderFilePath
umbrellaFileContent = '''/*****
* Tencent is pleased to support the open source community by making QMUI_iOS available.
* Copyright (C) 2016-2019 THL A29 Limited, a Tencent company. All rights reserved.
* Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
* http://opensource.org/licenses/MIT
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
*****/
/// Automatically created by script in Build Phases
#import <UIKit/UIKit.h>
#ifndef QMUIKit_h
#define QMUIKit_h
static NSString * const QMUI_VERSION = @"%s";
''' % (versionString)
onlyfiles = [ f for f in os.listdir(publicHeaderFilePath) if os.path.isfile(os.path.join(publicHeaderFilePath, f))]
onlyfiles.sort()
for filename in onlyfiles:
if filename != umbrellaHeaderFileName:
umbrellaFileContent += '''#if __has_include("%s")
#import "%s"
#endif
''' % (filename, filename)
umbrellaFileContent += '#endif /* QMUIKit_h */'
umbrellaFileContent = umbrellaFileContent.strip()
f = open(umbrellaHeaderFilePath, 'r+')
f.seek(0)
oldFileContent = f.read().strip()
if oldFileContent == umbrellaFileContent:
print 'umbrella creator: ' + umbrellaHeaderFileName + '的内容没有变化,不需要重写'
else:
print 'umbrella creator: ' + umbrellaHeaderFileName + '的内容发生变化,开始重写'
print 'umbrella creator: umbrellaFileContent = ' + umbrellaFileContent
f.seek(0)
f.write(umbrellaFileContent)
f.truncate()
f.close()
|
import datetime, sys
from os.path import *
MIT = """
The MIT License (MIT)
Copyright (c) %04d Jake Lussier (Stanford University)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
""" % datetime.date.today().year
for f in sys.argv[1:]:
ext = splitext(f)[1]
if ext == ".py":
mit = '\"\"\"' + MIT + '\"\"\"'
elif ext == ".js":
mit = '/*' + MIT + '*/'
elif ext == ".html":
mit = '<!--' + MIT + '-->'
else:
raise Exception("Unknown extension %s."%ext)
lines = open( f, 'r' ).read()
open( f, 'w' ).write( mit+"\n"+lines )
|
import os
from os.path import join
from time import sleep
from tqdm import tqdm
import numpy as np
import pandas as pd
import gzbuilder_analysis.parsing as pg
import gzbuilder_analysis.aggregation as ag
# from shapely.affinity import scale
# from descartes import PolygonPatch
from PIL import Image
import argparse
loc = os.path.abspath(os.path.dirname(__file__))
lib = os.path.join(loc, 'lib')
parser = argparse.ArgumentParser(
description=(
'Calculate aggregation results for galaxy builder subjects'
)
)
parser.add_argument('--output', '-O', metavar='/path/to/output',
default=join(loc, 'output_files/aggregation_results'),
help='Where to save the output tuned model')
parser.add_argument('--subjects', metavar='subject_ids', type=int, nargs='+',
help='Subject ids to work on (otherwise will run all)')
args = parser.parse_args()
os.makedirs(args.output, exist_ok=True)
classifications = pd.read_csv('lib/galaxy-builder-classifications.csv', index_col=0)
classifications.created_at = pd.to_datetime(classifications.created_at)
fitting_metadata = pd.read_pickle('lib/fitting_metadata.pkl')
gal_df = pd.read_csv('lib/gal-metadata.csv', index_col=0)
with tqdm(args.subjects or fitting_metadata.index) as pbar:
for subject_id in pbar:
pbar.set_description('Subject: {}'.format(subject_id))
im = np.array(Image.open('lib/subject_data/{}/image.png'.format(subject_id)))[::-1]
fm = fitting_metadata.loc[subject_id]
data = fm['galaxy_data']
gal = gal_df.loc[subject_id]
# take the first 30 classifications recieved for this galaxy
c = (classifications
.query('subject_ids == {}'.format(subject_id))
.sort_values(by='created_at')
.head(30)
)
zoo_models = c.apply(
pg.parse_classification,
axis=1,
image_size=np.array(im.shape),
size_diff=im.shape[0] / data.shape[0],
ignore_scale=True # ignore scale slider when aggregating
)
scaled_models = zoo_models.apply(
pg.scale_model,
args=(fm['size_diff'],),
)
rotated_models = scaled_models.apply(
pg.rotate_model_about_centre,
args=(
np.array(im.shape) * fm['size_diff'],
fm.rotation_correction
),
)
models = rotated_models.apply(
pg.reproject_model,
wcs_in=fm['montage_wcs'], wcs_out=fm['original_wcs']
)
sanitized_models = models.apply(pg.sanitize_model)
try:
aggregation_result = ag.AggregationResult(sanitized_models, data)
pd.to_pickle(
aggregation_result,
join(args.output, '{}.pkl.gz'.format(subject_id))
)
except TypeError:
print('No disk cluster for {}'.format(subject_id))
|
import os
from collections import defaultdict
import tensorflow as tf
import numpy as np
import pdb
class Agent:
def __init__(self, name, sess, num_slots):
self.name_ = name
self.sess = sess
self.num_distractors_ = 2 ** num_slots
self.num_slots_ = num_slots
################
# Placeholders #
################
with tf.variable_scope('%s_Agent' % self.name_, reuse = tf.AUTO_REUSE):
self.calendar_tensor_ = tf.placeholder(tf.float32, shape = [None, self.num_distractors_, 1, self.num_slots_], name = 'calendars')
self.message_ = tf.placeholder(tf.float32, shape = [None, self.num_slots_], name = 'message')
self.other_belief_on_self_ = tf.placeholder(tf.float32, shape = [None, self.num_slots_], name = 'other_belief_on_self_')
self.other_belief_on_self_spvs_ = tf.placeholder(tf.float32, shape = [None, self.num_slots_], name = 'other_belief_on_self_spvs_')
self.self_belief_on_other_ = tf.placeholder(tf.float32, shape = [None, self.num_slots_], name = 'self_belief_on_other_')
self.self_belief_on_self_ = tf.placeholder(tf.float32, shape = [None, self.num_slots_], name = 'self_belief_on_self_')
self.q_net_spvs_ = tf.placeholder(tf.float32, shape = [None], name = 'q_net_spvs_')
self.advantage_ = tf.placeholder(tf.float32, shape = [None])
self.prediction_indices_ = tf.placeholder(tf.int32, shape = [None, 2])
self.pretrain_belief_spvs_ = tf.placeholder(tf.float32, shape = [None, self.num_slots_], name = 'pretrain_self_belief_on_other')
#actor-critic: critic
self.other_belief_on_other_ = tf.placeholder(tf.float32, shape = [None, self.num_slots_], name = 'other_belief_on_other_')
self.critic_value_spvs_ = tf.placeholder(tf.float32, shape = [None])
with tf.variable_scope('%s_Distractor_Feature_Extraction' % self.name_, reuse = tf.AUTO_REUSE):
self.df1_ = tf.layers.conv2d(self.calendar_tensor_, 3 * self.num_slots_, kernel_size = [1, 1],
kernel_initializer = tf.random_normal_initializer(mean = 0.0, stddev = 1e-2),
activation = tf.nn.leaky_relu)
self.df2_ = tf.layers.conv2d(self.df1_, 2 * self.num_slots_, kernel_size = [1, 1],
kernel_initializer = tf.random_normal_initializer(mean = 0.0, stddev = 1e-2),
activation = tf.nn.leaky_relu)
self.df3_ = tf.layers.conv2d(self.df2_, 1 * self.num_slots_, kernel_size = [1, 1],
kernel_initializer = tf.random_normal_initializer(mean = 0.0, stddev = 1e-2),
activation = tf.nn.leaky_relu)
#######################
#network belief update#
#######################
#teacher simulates student's new belief with all messages
with tf.variable_scope('%s_Belief_OS_Predict' % self.name_, reuse = tf.AUTO_REUSE):
# self.bos_feats_ = self.concat_cnn(self.df3_, [3 * self.num_slots_, 2 * self.num_slots_, self.num_slots_], [20, 10, self.num_distractors_])
# self.bos_msg_compat_ = tf.reduce_sum(tf.multiply(tf.squeeze(self.bos_feats_[-1], axis = 2), tf.expand_dims(self.message_, 1)), axis = 2)
# self.bos_belief_var_1d_ = tf.Variable(initial_value = -4, trainable = True, dtype = tf.float32)
# self.bos_kernel_columns_ = []
# for i in range(self.num_slots_):
# kernel_fc1 = tf.contrib.layers.fully_connected(self.bos_msg_compat_, 3 * self.num_slots_, activation_fn = tf.nn.leaky_relu)
# kernel_fc2 = tf.contrib.layers.fully_connected(kernel_fc1, 2 * self.num_slots_, activation_fn = tf.nn.leaky_relu)
# kernel_fc3 = tf.contrib.layers.fully_connected(kernel_fc2, self.num_slots_, activation_fn = None)
# self.bos_kernel_columns_.append(tf.expand_dims(kernel_fc3, -1))
# self.bos_kernel_ = tf.concat(self.bos_kernel_columns_, axis = 2)
# self.other_belief_on_self_pred_ = tf.squeeze(tf.sigmoid(tf.matmul(self.bos_kernel_, tf.expand_dims(self.other_belief_on_self_, -1)) + self.bos_belief_var_1d_), -1)
self.msg_old_bos_ = tf.concat([self.message_, self.other_belief_on_self_], axis = 1)
self.msg_old_bos_fc1_ = tf.contrib.layers.fully_connected(self.msg_old_bos_, 2 * self.num_slots_, activation_fn = tf.nn.leaky_relu)
self.msg_old_bos_fc2_ = tf.contrib.layers.fully_connected(self.msg_old_bos_fc1_, 2 * self.num_slots_, activation_fn = tf.nn.leaky_relu)
self.msg_old_bos_fc3_ = tf.contrib.layers.fully_connected(self.msg_old_bos_fc2_, 2 * self.num_slots_, activation_fn = None)
self.other_belief_on_self_pred_ = tf.clip_by_value(tf.contrib.layers.fully_connected(self.msg_old_bos_fc3_, self.num_slots_, activation_fn = tf.exp), 0, 1)
# self.bos_cross_entropy_ = -1 * tf.reduce_mean(tf.reduce_sum(tf.multiply(self.other_belief_on_self_spvs_, tf.math.log(self.other_belief_on_self_pred_ + 1e-9) +\
# tf.multiply(1 - self.other_belief_on_self_spvs_, tf.math.log(1 - self.other_belief_on_self_pred_ + 1e-9))), axis = 1))
self.bos_cross_entropy_ = tf.nn.l2_loss(self.other_belief_on_self_pred_ - self.other_belief_on_self_spvs_)
self.bos_predict_varlist_ = [v for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) if v.name.startswith('%s_Belief_OS_Predict' % self.name_)]
self.other_belief_on_self_update_opt_ = tf.train.AdamOptimizer(learning_rate = 1e-5)
self.bos_predict_train_op_ = self.other_belief_on_self_update_opt_.minimize(self.bos_cross_entropy_, var_list = self.bos_predict_varlist_)
####################
# Q-Net Estimation #
####################
#check which new belief is the most ideal one, i think the reason for combine self_belief_on_other as input
# is to send message that can most probability get an agreement
with tf.variable_scope('%s_Q-Net' % self.name_, reuse = tf.AUTO_REUSE):
self.q_net_opt_ = tf.train.AdamOptimizer(learning_rate = 1e-4)
self.q_df_b1_ = tf.identity(self.self_belief_on_self_)
self.q_df_b2_ = tf.identity(self.self_belief_on_other_)
self.q_df_b3_ = tf.identity(self.other_belief_on_self_pred_)
self.q_concat_df_b_ = tf.concat([self.q_df_b1_, self.q_df_b2_, self.q_df_b3_], axis = 1)
self.q_dfb_merge_pre_1_ = tf.contrib.layers.fully_connected(self.q_concat_df_b_, 9, activation_fn = tf.nn.leaky_relu,
weights_initializer = tf.random_normal_initializer(mean = 0.0, stddev = 1e-1))
self.q_dfb_merge_pre_2_ = tf.contrib.layers.fully_connected(self.q_dfb_merge_pre_1_, 6, activation_fn = tf.nn.leaky_relu,
weights_initializer = tf.random_normal_initializer(mean = 0.0, stddev = 1e-1))
self.q_dfb_merge_pre_3_ = tf.contrib.layers.fully_connected(self.q_dfb_merge_pre_2_, 4, activation_fn = tf.nn.leaky_relu,
weights_initializer = tf.random_normal_initializer(mean = 0.0, stddev = 1e-1))
self.q_dfb_merge_ = tf.contrib.layers.fully_connected(self.q_dfb_merge_pre_3_, 1, activation_fn = None,
weights_initializer = tf.random_normal_initializer(mean = 0.0, stddev = 1e-2))
self.q_value_ = tf.squeeze(self.q_dfb_merge_)
self.q_net_loss_ = tf.reduce_mean(tf.square(self.q_value_ - self.q_net_spvs_))
self.q_net_varlist_ = [v for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) if v.name.startswith('%s_Q-Net' % self.name_)]
self.q_net_train_op_ = self.q_net_opt_.minimize(self.q_net_loss_, var_list = self.q_net_varlist_)
#######################
#network belief update#
#######################
with tf.variable_scope('%s_Belief_SO_Update' % self.name_, reuse = tf.AUTO_REUSE):
# self.bso_feats_ = self.concat_cnn(self.df3_, [3 * self.num_slots_, 2 * self.num_slots_, self.num_slots_], [20, 10, self.num_distractors_])
# self.bso_msg_compat_ = tf.reduce_sum(tf.multiply(tf.squeeze(self.bso_feats_[-1], axis = 2), tf.expand_dims(self.message_, 1)), axis = 2)
# self.bso_belief_var_1d_ = tf.Variable(initial_value = -4, trainable = True, dtype = tf.float32)
# self.bso_kernel_columns_ = []
# for i in range(self.num_slots_):
# kernel_fc1 = tf.contrib.layers.fully_connected(self.bos_msg_compat_, 3 * self.num_slots_, activation_fn = tf.nn.leaky_relu)
# kernel_fc2 = tf.contrib.layers.fully_connected(kernel_fc1, 2 * self.num_slots_, activation_fn = tf.nn.leaky_relu)
# kernel_fc3 = tf.contrib.layers.fully_connected(kernel_fc2, self.num_slots_, activation_fn = None)
# self.bso_kernel_columns_.append(tf.expand_dims(kernel_fc3, -1))
# self.bso_kernel_ = tf.concat(self.bso_kernel_columns_, axis = 2)
self.msg_old_bso_ = tf.concat([self.message_, self.self_belief_on_other_], axis = 1)
self.msg_old_bso_fc1_ = tf.contrib.layers.fully_connected(self.msg_old_bso_, 2 * self.num_slots_, activation_fn = tf.nn.leaky_relu)
self.msg_old_bso_fc2_ = tf.contrib.layers.fully_connected(self.msg_old_bso_fc1_, 2 * self.num_slots_, activation_fn = tf.nn.leaky_relu)
self.msg_old_bso_fc3_ = tf.contrib.layers.fully_connected(self.msg_old_bso_fc2_, 2 * self.num_slots_, activation_fn = None)
self.new_self_belief_on_other_ = tf.clip_by_value(tf.contrib.layers.fully_connected(self.msg_old_bso_fc3_, self.num_slots_, activation_fn = tf.exp), 0, 1)
## pretrain varaibles ##
# self.bso_cross_entropy_ = -1 * tf.reduce_mean(tf.reduce_sum(tf.multiply(self.pretrain_belief_spvs_,
# tf.math.log(self.new_self_belief_on_other_ + 1e-9)) +\
# tf.multiply(1 - self.pretrain_belief_spvs_,
# tf.math.log(1 - self.new_self_belief_on_other_ + 1e-9)), axis = 1))
self.bso_cross_entropy_ = tf.reduce_mean(tf.nn.l2_loss(self.new_self_belief_on_other_ - self.pretrain_belief_spvs_))
self.pretrain_bso_opt_ = tf.train.AdamOptimizer(learning_rate = 1e-5)
self.pretrain_bso_train_op_ = self.pretrain_bso_opt_.minimize(self.bso_cross_entropy_)
self.pretrain_bso_varlist_ = [v for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)\
if v.name.startswith('%s_Belief_SO_Update' % self.name_)\
or v.name.startswith('%s_Distractor_Feature_Extraction' % self.name_)]
self.pretrain_op_ = tf.group(self.bos_predict_train_op_, self.pretrain_bso_train_op_)
self.pretrain_saver_ = tf.train.Saver()
self.pretrain_loader_ = tf.train.Saver([v for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) if v.name.startswith('%s_Belief' % self.name_)])
####################
# V-Net Estimation #
####################
with tf.variable_scope('%s_Value_Network' % self.name_, reuse = tf.AUTO_REUSE):
#this network is only to tune belief update as a listener, so state only involves the following three beliefs
self.v_df_b1_ = tf.identity(self.self_belief_on_self_)
self.v_df_b2_ = tf.identity(self.other_belief_on_other_)
self.v_df_b3_ = tf.identity(self.self_belief_on_other_)
self.v_concat_df_b_ = tf.concat([self.v_df_b1_, self.v_df_b2_, self.v_df_b3_], axis = 1)
self.v_dfb_merge_pre_1_ = tf.contrib.layers.fully_connected(self.v_concat_df_b_, 9, activation_fn = tf.nn.leaky_relu,
weights_initializer = tf.random_normal_initializer(mean = 0.0, stddev = 1e-1))
self.v_dfb_merge_pre_2_ = tf.contrib.layers.fully_connected(self.v_dfb_merge_pre_1_, 6, activation_fn = tf.nn.leaky_relu,
weights_initializer = tf.random_normal_initializer(mean = 0.0, stddev = 1e-1))
self.v_dfb_merge_pre_3_ = tf.contrib.layers.fully_connected(self.v_dfb_merge_pre_2_, 4, activation_fn = tf.nn.leaky_relu,
weights_initializer = tf.random_normal_initializer(mean = 0.0, stddev = 1e-1))
self.v_dfb_merge_ = tf.contrib.layers.fully_connected(self.v_dfb_merge_pre_3_, 1, activation_fn = None,
weights_initializer = tf.random_normal_initializer(mean = 0.0, stddev = 1e-2))
self.critic_value_ = tf.squeeze(self.v_dfb_merge_)
self.v_net_varlist_ = [v for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)\
if v.name.startswith('%s_Value_Network' % self.name_)]
self.v_net_loss_ = tf.reduce_mean(tf.square(self.critic_value_ - self.critic_value_spvs_))
self.value_iteration_opt_ = tf.train.AdamOptimizer(learning_rate = 1e-4)
self.value_iteration_train_op_ = self.value_iteration_opt_.minimize(self.v_net_loss_, var_list = self.v_net_varlist_)
####################
# Policy Network #
####################
with tf.variable_scope('%s_Policy_Network' % self.name_, reuse = tf.AUTO_REUSE):
self.common_slots_ = tf.multiply(1 - self.self_belief_on_self_, 1 - self.new_self_belief_on_other_)
#self.common_slots_ = tf.concat([self.self_belief_on_self_, self.new_self_belief_on_other_], axis = 1)
self.reject_prob_1_ = tf.contrib.layers.fully_connected(self.common_slots_, self.num_slots_,
activation_fn = tf.nn.leaky_relu)
self.reject_prob_2_ = tf.contrib.layers.fully_connected(self.reject_prob_1_, self.num_slots_,
activation_fn = tf.nn.leaky_relu)
self.reject_prob_3_ = tf.contrib.layers.fully_connected(self.reject_prob_1_, 4,
activation_fn = tf.nn.leaky_relu)
self.reject_prob_ = tf.contrib.layers.fully_connected(self.reject_prob_3_, 1, activation_fn = tf.nn.sigmoid)
self.hold_prob_1_ = tf.contrib.layers.fully_connected(self.common_slots_, self.num_slots_,
activation_fn = tf.nn.leaky_relu)
self.hold_prob_2_ = tf.contrib.layers.fully_connected(self.hold_prob_1_, self.num_slots_,
activation_fn = tf.nn.leaky_relu)
self.hold_prob_3_ = tf.contrib.layers.fully_connected(self.hold_prob_2_, 4,
activation_fn = tf.nn.leaky_relu)
self.hold_prob_ = tf.contrib.layers.fully_connected(self.hold_prob_3_, 1, activation_fn = tf.nn.sigmoid)
self.decision_prob_1_ = tf.contrib.layers.fully_connected(self.common_slots_, 2 * self.num_slots_,
activation_fn = tf.nn.leaky_relu)
self.decision_prob_2_ = tf.contrib.layers.fully_connected(self.decision_prob_1_, 2 * self.num_slots_,
activation_fn = tf.nn.leaky_relu)
self.decision_prob_3_ = tf.contrib.layers.fully_connected(self.decision_prob_2_, 2 * self.num_slots_,
activation_fn = tf.nn.leaky_relu)
self.decision_prob_ = tf.contrib.layers.fully_connected(self.decision_prob_3_, self.num_slots_,
activation_fn = tf.nn.softmax)
self.action_prob_ = tf.concat([self.decision_prob_, self.hold_prob_, self.reject_prob_], axis = 1)
#self.action_prob_ = self.decision_prob_
self.pg_varlist_ = [v for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)\
if v.name.startswith('%s_Policy_Network' % self.name_)]
self.bn_varlist_ = [v for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)\
if v.name.startswith('%s_Belief' % self.name_)]
self.policy_regularization_ = tf.add_n([ tf.nn.l2_loss(v) for v in self.pg_varlist_ if 'bias' not in v.name ])
self.act_prob_ = tf.math.log(tf.gather_nd(self.action_prob_, self.prediction_indices_) + 1e-9)
self.not_hold_prob_ = tf.math.log(tf.where(self.prediction_indices_[:, 1: 2] < self.num_slots_, 1 - self.hold_prob_ + 1e-9, tf.ones_like(self.hold_prob_)))
self.not_hold_prob_ += tf.math.log(tf.where(self.prediction_indices_[:, 1: 2] == self.num_slots_ + 1, 1 - self.hold_prob_ + 1e-9, tf.ones_like(self.hold_prob_)))
self.not_reject_prob_ = tf.math.log(tf.where(self.prediction_indices_[:, 1: 2] < self.num_slots_, 1 - self.reject_prob_ + 1e-9, tf.ones_like(self.reject_prob_)))
self.total_log_ = self.act_prob_ + self.not_hold_prob_ + self.not_reject_prob_
self.decision_entropy_ = tf.reduce_sum(tf.log(self.decision_prob_ + 1e-9), axis = 1)
self.loss_pg_ = -1 * tf.reduce_sum(tf.multiply(self.total_log_, self.advantage_)) + 1e-4 * self.decision_entropy_ + 1e-4 * self.policy_regularization_
self.policy_gradient_opt_ = tf.train.AdamOptimizer(learning_rate = 1e-4)
self.policy_gradient_behave_train_op_ = self.policy_gradient_opt_.minimize(self.loss_pg_, var_list = self.pg_varlist_)
self.policy_gradient_belief_train_op_ = self.policy_gradient_opt_.minimize(self.loss_pg_, var_list = self.bn_varlist_)
self.policy_gradient_train_op_ = self.policy_gradient_opt_.minimize(self.loss_pg_, var_list = self.bn_varlist_ + self.pg_varlist_)
self.total_saver_ = tf.train.Saver()
self.total_loader_ = tf.train.Saver([v for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) if v.name.startswith(self.name_)])
def concat_cnn(self, input_tensor, num_filters, num_dims):
assert(num_dims[-1] == self.num_distractors_)
feat_tensors = [input_tensor]
for idx, num_filter in enumerate(num_filters):
tensor_rows = []
kernel_dim = self.num_distractors_ if idx == 0 else num_dims[idx - 1]
for _ in range(num_dims[idx]):
tensor_rows.append(tf.layers.conv2d(feat_tensors[-1], num_filter, kernel_size = [kernel_dim, 1],
kernel_initializer = tf.random_normal_initializer(mean = 0.0, stddev = 1e-2),
padding = 'valid', activation = tf.nn.leaky_relu))
feat_tensor = tf.concat(tensor_rows, axis = 1)
feat_tensors.append(feat_tensor)
return feat_tensors
def pretrain_belief_update(self, data_batch):
_, bso_cross_entropy, bos_cross_entropy, new_self_belief_on_other, other_belief_on_self_pred =\
self.sess.run([self.pretrain_op_, self.bso_cross_entropy_, self.bos_cross_entropy_,
self.new_self_belief_on_other_, self.other_belief_on_self_pred_],
feed_dict = {self.self_belief_on_other_: data_batch['prev_belief'],
self.message_: data_batch['message'],
self.calendar_tensor_: data_batch['distractors'],
self.pretrain_belief_spvs_: data_batch['new_belief'],
self.other_belief_on_self_: data_batch['prev_belief'],
self.other_belief_on_self_spvs_: data_batch['new_belief']})
if np.sum(np.isnan(other_belief_on_self_pred)) > 0 or np.sum(np.isnan(new_self_belief_on_other)) > 0:
pdb.set_trace()
if np.isnan(bso_cross_entropy) or np.isnan(bos_cross_entropy):
pdb.set_trace()
return bso_cross_entropy, bos_cross_entropy, new_self_belief_on_other, other_belief_on_self_pred
def train_belief_update(self, data_batch):
_, bos_cross_entropy, other_belief_on_self_pred =\
self.sess.run([self.bos_predict_train_op_, self.bos_cross_entropy_, self.other_belief_on_self_pred_],
feed_dict = {self.message_: data_batch['message'],
self.calendar_tensor_: data_batch['distractors'],
self.other_belief_on_self_: data_batch['prev_belief'],
self.other_belief_on_self_spvs_: data_batch['new_belief']})
if np.sum(np.isnan(other_belief_on_self_pred)) > 0:
pdb.set_trace()
if np.isnan(bos_cross_entropy):
pdb.set_trace()
return bos_cross_entropy, other_belief_on_self_pred
def train_q_net(self, data_batch):
_, q_net_loss, q_value = self.sess.run([self.q_net_train_op_, self.q_net_loss_, self.q_value_],\
feed_dict = {self.calendar_tensor_: data_batch['distractors'],
self.q_net_spvs_: data_batch['target_q'],
self.message_: data_batch['message'],
self.self_belief_on_self_: data_batch['self_belief_on_self'],
self.self_belief_on_other_: data_batch['self_belief_on_other'],
self.other_belief_on_self_: data_batch['other_belief_on_self']})
ridx = np.random.randint(q_value.shape[0])
print('%s value est:' % self.name_, q_value[ridx: ridx + 10], data_batch['target_q'][ridx: ridx + 10])
print('Q learning loss: %f' % q_net_loss)
return q_net_loss
def pretrain_bayesian_belief_update(self, concept_generator, agent_pretraining_steps, agent_pretrain_batch_size,
agent_pretrain_ckpt_dir, agent_pretrain_ckpt_name, continue_steps = 0, silent = False):
if not os.path.exists(agent_pretrain_ckpt_dir):
os.makedirs(agent_pretrain_ckpt_dir)
ckpt_dir = os.path.join(agent_pretrain_ckpt_dir, self.name_)
if not os.path.exists(ckpt_dir):
os.makedirs(ckpt_dir)
ckpt = tf.train.get_checkpoint_state(ckpt_dir)
train_steps = agent_pretraining_steps
if ckpt:
self.pretrain_loader_.restore(self.sess, ckpt.model_checkpoint_path)
print('Loaded agent %s belief update ckpt from %s' % (self.name_, ckpt_dir))
train_steps = continue_steps
accuracies = []
l1_diffs = []
bayesian_wrongs = []
cross_entropies = []
for ts in range(train_steps):
data_batch = concept_generator.generate_batch(agent_pretrain_batch_size)
bso_cross_entropy, bos_cross_entropy, belief_pred_1, belief_pred_2 = self.pretrain_belief_update(data_batch)
belief_pred = np.concatenate([belief_pred_1, belief_pred_2], axis = 0)
target = np.tile(data_batch['new_belief'], [2, 1])
l1_diff = abs(belief_pred - target)
correct = np.mean(l1_diff <= 1e-2)
bayesian_wrong = np.mean(np.sum((target == 0) * (belief_pred > 1e-5), axis = 1) > 0)
accuracies.append(np.mean(correct))
l1_diffs.append(np.mean(l1_diff))
cross_entropies.append(0.5 * (bso_cross_entropy + bos_cross_entropy))
bayesian_wrongs.append(bayesian_wrong)
if ts % 1000 == 0 and not silent:
print('[%s:%d] batch mean cross entropy: %f, mean accuracies: %f, mean l1: %f, bayesian wrong: %f'\
% (self.name_, ts + 1, np.mean(cross_entropies), np.mean(accuracies), np.mean(l1_diffs), np.mean(bayesian_wrongs)))
if np.mean(accuracies) >= 0:
idx = np.random.randint(2 * agent_pretrain_batch_size)
print('\t target:', target[idx, :])
print('\t predict', belief_pred[idx, :])
accuracies = []
l1_diffs = []
bayesian_wrongs = []
if (ts + 1) % 10000 == 0:
self.pretrain_saver_.save(self.sess, os.path.join(ckpt_dir, agent_pretrain_ckpt_name),
global_step = agent_pretraining_steps)
print('Saved agent %s belief update ckpt to %s after %d training'\
% (self.name_, ckpt_dir, ts))
if train_steps != 0:
self.pretrain_saver_.save(self.sess, os.path.join(ckpt_dir, agent_pretrain_ckpt_name),
global_step = agent_pretraining_steps)
print('Saved agent %s belief update ckpt to %s after %d training'\
% (self.name_, ckpt_dir, train_steps))
#copy current belief for all messages, get q value for all message-belief pair
def get_q_value_for_all_msg(self, self_belief_on_self, self_belief_on_other, other_belief_on_self, concept_generator):
all_msg_embeddings = concept_generator.all_msgs_tensor_
num_total_msgs = concept_generator.num_msgs_
bss_tile = np.tile(self_belief_on_self, (num_total_msgs, 1))
bso_tile = np.tile(self_belief_on_other, (num_total_msgs, 1))
bos_tile = np.tile(other_belief_on_self, (num_total_msgs, 1))
calendars_tile = np.tile(np.expand_dims(
np.expand_dims(concept_generator.tensor_, axis = 1), axis = 0), (num_total_msgs, 1, 1, 1))
q_values, other_belief_on_self_pred =\
self.sess.run([self.q_value_, self.other_belief_on_self_pred_],
feed_dict = {self.calendar_tensor_: calendars_tile,
self.message_: all_msg_embeddings,
self.self_belief_on_self_: bss_tile,
self.self_belief_on_other_: bso_tile,
self.other_belief_on_self_: bos_tile})
return q_values, other_belief_on_self_pred
#first listener update self belief on other according to message passed on
#then use the new belief on other to generate action
def update_self_belief_on_other(self, prev_self_belief_on_other, other_belief_on_other,
self_belief_on_self, embed_msg, concept_generator, is_training):
calendars_tile = np.expand_dims(np.expand_dims(concept_generator.tensor_, axis = 1), axis = 0)
decision_prob, hold_prob, reject_prob, slots_belief, predict_prob, critic_value =\
self.sess.run([self.decision_prob_, self.hold_prob_, self.reject_prob_, self.common_slots_, self.new_self_belief_on_other_, self.critic_value_],
feed_dict = {self.calendar_tensor_: calendars_tile,
self.self_belief_on_other_: prev_self_belief_on_other,
self.self_belief_on_self_: self_belief_on_self,
self.other_belief_on_other_: other_belief_on_other,
self.message_: embed_msg})
action_prob = np.concatenate([decision_prob, hold_prob, reject_prob], axis = 1)
if np.sum(np.isnan(reject_prob)) > 0 or np.sum(np.isnan(hold_prob)) > 0:
pdb.set_trace()
if is_training:
hold = np.random.choice(2, 1, p = [1 - hold_prob[0][0], hold_prob[0][0]])[0]
if hold:
action_idx = self.num_slots_
else:
reject = np.random.choice(2, 1, p = [1 - reject_prob[0][0], reject_prob[0][0]])[0]
if reject:
action_idx = self.num_slots_ + 1
else:
try:
action_idx = np.random.choice(self.num_slots_, 1, p = decision_prob[0])[0]
except ValueError:
pdb.set_trace()
else:
if hold_prob[0][0] > 0.5:
action_idx = self.num_slots_
elif reject_prob[0][0] > 0.5:
action_idx = self.num_slots_ + 1
else:
action_idx = np.argmax(decision_prob)
return action_idx, slots_belief, predict_prob, action_prob, critic_value
# trajectory_batch = [[(distractors, belief, msg, action, advantage), ...], [], []]
# advantage should include 1/m, where m is the batch size
def update_net(self, trajectory_batch, phase = 1):
debug_batch = {}
#listener updates
num_traject = len(trajectory_batch['trajectory_batch'])
max_length = max([len(traject) for traject in trajectory_batch])
step_idx = 0
data_batch = {}
data_batch['distractors'] = []
data_batch['other_belief_on_other'] = []
data_batch['self_belief_on_other'] = []
data_batch['self_belief_on_self'] = []
data_batch['message'] = []
data_batch['action'] = []
data_batch['advantage'] = []
data_batch['critic_value'] = []
data_batch['correct_answer'] = []
data_batch['bayesian_belief'] = []
baseline = 0
for tj in trajectory_batch['trajectory_batch']:
baseline += tj[0]['gain']
baseline /= len(trajectory_batch['trajectory_batch'])
wait_debug = []
while step_idx < max_length:
for trajectory in trajectory_batch['trajectory_batch']:
if len(trajectory) > step_idx:
data_batch['distractors'].append(trajectory[step_idx]['distractors'])
data_batch['other_belief_on_other'].append(trajectory[step_idx]['other_belief_on_other'])
data_batch['self_belief_on_other'].append(trajectory[step_idx]['self_belief_on_other'])
data_batch['self_belief_on_self'].append(trajectory[step_idx]['self_belief_on_self'])
data_batch['message'].append(trajectory[step_idx]['message'])
data_batch['action'].append(trajectory[step_idx]['action'])
data_batch['critic_value'].append(trajectory[step_idx]['gain'])
data_batch['advantage'].append((trajectory[step_idx]['gain'] - baseline) / len(trajectory_batch['trajectory_batch']))
if trajectory[step_idx]['action'] == self.num_slots_:
wait_debug.append({'advantage': data_batch['advantage'][-1],
'gain': trajectory[step_idx]['gain'],
'critic_value': trajectory[step_idx]['critic_value']})
step_idx += 1
data_batch['action'] = list(zip(range(len(data_batch['action'])), data_batch['action']))
for k in data_batch:
data_batch[k] = np.array(data_batch[k])
data_batch['advantage'] /= num_traject
if len(wait_debug) > 0:
print('average wait advantage: %f' % np.mean([wa['advantage'] for wa in wait_debug]))
#pdb.set_trace()
loss_pg = -425
if phase == 1 or phase == 0:
train_op = self.policy_gradient_behave_train_op_
elif phase == 2:
train_op = self.policy_gradient_belief_train_op_
elif phase == 3:
train_op = self.policy_gradient_behave_train_op_
prev_weights = self.sess.run(self.pg_varlist_)
# loss_pg, _, action_prob,\
# decision_prob_1, decision_prob_2, decision_prob_3,\
# act_prob, not_hold_prob, not_reject_prob =\
# self.sess.run([self.loss_pg_, train_op, self.action_prob_,
# self.decision_prob_1_, self.decision_prob_2_, self.decision_prob_3_,
# self.act_prob_, self.not_hold_prob_, self.not_reject_prob_],
loss_pg, _, action_prob,\
decision_prob_1, decision_prob_2, decision_prob_3 =\
self.sess.run([self.loss_pg_, train_op, self.action_prob_,
self.decision_prob_1_, self.decision_prob_2_, self.decision_prob_3_],
feed_dict = {self.calendar_tensor_: data_batch['distractors'],
self.other_belief_on_other_: data_batch['other_belief_on_other'],
self.self_belief_on_self_: data_batch['self_belief_on_self'],
self.self_belief_on_other_: data_batch['self_belief_on_other'],
self.advantage_: data_batch['advantage'],
self.prediction_indices_: data_batch['action'],
self.message_: data_batch['message']})
new_weights = self.sess.run(self.pg_varlist_)
for nw in new_weights:
if np.sum(np.isnan(nw)) > 0 or np.sum(np.isinf(nw)) > 0:
pdb.set_trace()
v_net_loss, _ = self.sess.run([self.v_net_loss_, self.value_iteration_train_op_],
feed_dict = {self.calendar_tensor_: data_batch['distractors'],
self.other_belief_on_other_: data_batch['other_belief_on_other'],
self.self_belief_on_self_: data_batch['self_belief_on_self'],
self.self_belief_on_other_: data_batch['self_belief_on_other'],
self.critic_value_spvs_: data_batch['critic_value']})
debug_batch['v_net_loss'] = v_net_loss
debug_batch['loss_pg'] = loss_pg
if np.sum(np.isnan(action_prob)) > 0 or np.sum(np.isinf(action_prob)) > 0:
pdb.set_trace()
#speaker section
belief_update_batch = {}
belief_update_batch['prev_belief'] = []
belief_update_batch['new_belief'] = []
belief_update_batch['message'] = []
belief_update_batch['distractors'] = []
for belief_tuple in trajectory_batch['belief_update_batch']:
belief_update_batch['distractors'].append(belief_tuple[0])
belief_update_batch['prev_belief'].append(belief_tuple[1])
belief_update_batch['message'].append(belief_tuple[2])
belief_update_batch['new_belief'].append(belief_tuple[3])
for k in belief_update_batch:
belief_update_batch[k] = np.array(belief_update_batch[k])
if phase == 1:
cross_entropy, belief_pred = self.train_belief_update(belief_update_batch)
print('%s\'s belief esimate cross_entropy: %f' % (self.name_, cross_entropy))
debug_batch['%s_belief_prediction' % self.name_] = belief_pred
q_learning_batch = {}
q_learning_batch['self_belief_on_self'] = []
q_learning_batch['self_belief_on_other'] = []
q_learning_batch['other_belief_on_self'] = []
q_learning_batch['message'] = []
q_learning_batch['distractors'] = []
q_learning_batch['target_q'] = []
for q_learning_tuple in trajectory_batch['q_learning_batch']:
q_learning_batch['distractors'].append(q_learning_tuple['distractors'])
q_learning_batch['self_belief_on_self'].append(q_learning_tuple['self_belief_on_self'])
q_learning_batch['self_belief_on_other'].append(q_learning_tuple['self_belief_on_other'])
q_learning_batch['other_belief_on_self'].append(q_learning_tuple['other_belief_on_self'])
q_learning_batch['message'].append(q_learning_tuple['message'])
q_learning_batch['target_q'].append(q_learning_tuple['target_q'])
for k in q_learning_batch:
q_learning_batch[k] = np.array(q_learning_batch[k])
if phase == 1:
q_net_loss = self.train_q_net(q_learning_batch)
return debug_batch
def main():
num_slots = 8
from concept import Concept
calendars = Concept(num_slots)
sess = tf.Session(config = tf.ConfigProto(allow_soft_placement = True,
log_device_placement = False))
with tf.device('/cpu:0'):
agent = Agent('A', sess, num_slots)
if __name__ == '__main__':
main() |
#!/usr/bin/env python3
""" A Python script that displays the lyrics to the currently playing song on
Spotify in your terminal.
File name: spotify-lyrics.py
Author: Caleb Hamilton
Website: https://github.com/cjlh/spotify-lyrics
License: MIT
Python version: 3
Usage:
$ python spotify-lyrics.py
"""
import argparse
import dbus
import os
import re
import requests
import sys
import time
import unicodedata
from bs4 import BeautifulSoup
def get_song_info():
session_bus = dbus.SessionBus()
spotify_bus = session_bus.get_object("org.mpris.MediaPlayer2.spotify",
"/org/mpris/MediaPlayer2")
spotify_metadata = dbus.Interface(
spotify_bus, "org.freedesktop.DBus.Properties"
).Get("org.mpris.MediaPlayer2.Player", "Metadata")
return [
str(spotify_metadata["xesam:artist"][0]),
str(spotify_metadata["xesam:title"]),
]
def center_string(s):
terminal_cols = os.popen("tput cols").read()
return str("{:^" + str(int(terminal_cols) + 10) + "}").format(s)
def remove_accents(input_str):
nfkd_form = unicodedata.normalize("NFKD", input_str)
return "".join([c for c in nfkd_form if not unicodedata.combining(c)])
def letras_mus_provider(artist, title):
url = f"https://www.letras.mus.br/{artist}/{title}"
response = requests.get(url)
soup = BeautifulSoup(response.text, "html.parser")
lyric_box = soup.find("div", {"class": "cnt-letra p402_premium"})
if not lyric_box:
return ""
return str(lyric_box).replace('<div class="cnt-letra p402_premium">', "") \
.replace("</div>", "") \
.replace("<br/>", "\n") \
.replace("</br>", "\n") \
.replace("<br>", "\n") \
.replace("</br>", "\n") \
.replace("</p><p>", "\n\n") \
.replace("</p>", "") \
.replace("<p>", "").strip()
def make_it_personal_provider(artist, title):
pageurl = f"https://makeitpersonal.co/lyrics?artist={artist}&title={title}"
lyrics = requests.get(pageurl).text.strip()
if (lyrics == "Sorry, We don't have lyrics for this song yet. Add them to "
"https://lyrics.wikia.com"):
return ""
return lyrics
def fandom_provider(artist, title):
wiki_url = "https://lyrics.fandom.com/wiki/"
title = title.replace(" ", "_")
artist = artist.replace(" ", "_")
url = wiki_url + f"{artist}:{title}"
r = requests.get(url)
soup = BeautifulSoup(r.text, "html.parser")
lyric_box = soup.find("div", {"class": "lyricbox"})
if not lyric_box:
return ""
return str(lyric_box).replace("<br/>", "\n") \
.replace('<div class="lyricbox">', "") \
.replace('<div class="lyricsbreak">', "") \
.replace("</div>", "")
def format_request_param(request_param):
request_param = remove_accents(request_param)
request_param = request_param.replace("&", "e")
return re.sub("[^A-Za-z0-9\\s]+", "", request_param)
def print_lyrics(artist, title, lyrics_provider_name):
print(center_string("\033[4m" + artist + ": " + title + "\033[0m\n"))
artist = format_request_param(artist)
title = format_request_param(title)
lyrics = ""
if lyrics_provider_name:
lyrics_provider = PROVIDERS[lyrics_provider_name]
lyrics = lyrics_provider(artist, title)
else:
for lyrics_provider in PROVIDERS.values():
lyrics = lyrics_provider(artist, title)
if lyrics:
break
print(lyrics or "Lyrics could not be found for this song.")
def main():
old_song_info = None
while True:
song_info = get_song_info()
if song_info != old_song_info:
old_song_info = song_info
os.system("clear")
print_lyrics(song_info[0], song_info[1], args.lyrics_provider)
time.sleep(1)
PROVIDERS = {
"make_it_personal": make_it_personal_provider,
"fandom": fandom_provider,
"letras_br": letras_mus_provider,
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--lyrics-provider", help="Lyrics Provider", choices=PROVIDERS.keys()
)
args = parser.parse_args()
try:
main()
except dbus.exceptions.DBusException:
print("Spotify does not appear to be running! Please start Spotify and try "
"again.", file=sys.stderr)
exit(1)
|
from django.db import models
from django.contrib.auth.models import User
from backoffice.models import Topic
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
interests = models.ManyToManyField(Topic, related_name="subscriptions", blank=True)
def __str__(self):
return f'{self.user.username} Profile'
|
# Adapted from https://github.com/aam-at/cpgd
import math
from functools import partial
from typing import Optional
import torch
from torch import Tensor, nn, optim
from torch.autograd import grad
from torch.nn import functional as F
from adv_lib.distances.lp_norms import l0_distances, l1_distances, l2_distances, linf_distances
from adv_lib.utils.losses import difference_of_logits
from adv_lib.utils.projections import l1_ball_euclidean_projection
from adv_lib.utils.visdom_logger import VisdomLogger
def pdgd(model: nn.Module,
inputs: Tensor,
labels: Tensor,
targeted: bool = False,
num_steps: int = 500,
random_init: float = 0,
primal_lr: float = 0.1,
primal_lr_decrease: float = 0.01,
dual_ratio_init: float = 0.01,
dual_lr: float = 0.1,
dual_lr_decrease: float = 0.1,
dual_ema: float = 0.9,
dual_min_ratio: float = 1e-6,
callback: Optional[VisdomLogger] = None) -> Tensor:
"""
Primal-Dual Gradient Descent (PDGD) attack from https://arxiv.org/abs/2106.01538. This version is only suitable for
the L2-norm.
Parameters
----------
model : nn.Module
Model to attack.
inputs : Tensor
Inputs to attack. Should be in [0, 1].
labels : Tensor
Labels corresponding to the inputs if untargeted, else target labels.
targeted : bool
Whether to perform a targeted attack or not.
num_steps : int
Number of optimization steps. Corresponds to the number of forward and backward propagations.
random_init : float
If random_init != 0, will start from a random perturbation drawn from U(-random_init, random_init).
primal_lr : float
Learning rate for primal variables.
primal_lr_decrease : float
Final learning rate multiplier for primal variables.
dual_ratio_init : float
Initial ratio λ_0 / λ_1. A smaller value corresponds to a larger weight on the (mis)classification constraint.
dual_lr : float
Learning rate for dual variables.
dual_lr_decrease : float
Final learning rate multiplier for dual variables.
dual_ema : float
Coefficient for exponential moving average. Equivalent to no EMA if dual_ema == 0.
dual_min_ratio : float
Minimum ratio λ_0 / λ_1 and λ_1 / λ_0 to avoid having too large absolute values of λ_1.
callback : VisdomLogger
Callback to visualize the progress of the algorithm.
Returns
-------
best_adv : Tensor
Perturbed inputs (inputs + perturbation) that are adversarial and have smallest distance with the original
inputs.
"""
attack_name = 'PDGD L2'
device = inputs.device
batch_size = len(inputs)
batch_view = lambda tensor: tensor.view(batch_size, *[1] * (inputs.ndim - 1))
multiplier = -1 if targeted else 1
log_min_dual_ratio = math.log(dual_min_ratio)
# Setup variables
r = torch.zeros_like(inputs, requires_grad=True)
if random_init:
nn.init.uniform_(r, -random_init, random_init)
r.data.add_(inputs).clamp_(min=0, max=1).sub_(inputs)
optimizer = optim.Adam([r], lr=primal_lr)
lr_lambda = lambda i: primal_lr_decrease ** (i / num_steps)
scheduler = optim.lr_scheduler.LambdaLR(optimizer=optimizer, lr_lambda=lr_lambda)
λ = torch.zeros(batch_size, 2, dtype=torch.float, device=device)
λ[:, 1] = -math.log(dual_ratio_init)
λ_ema = λ.softmax(dim=1)
# Init trackers
best_l2 = torch.full((batch_size,), float('inf'), device=device)
best_adv = inputs.clone()
adv_found = torch.zeros_like(best_l2, dtype=torch.bool)
for i in range(num_steps):
adv_inputs = inputs + r
logits = model(adv_inputs)
l2 = r.flatten(1).norm(p=2, dim=1)
if i == 0:
labels_infhot = torch.zeros_like(logits.detach()).scatter(1, labels.unsqueeze(1), float('inf'))
dl_func = partial(difference_of_logits, labels=labels, labels_infhot=labels_infhot)
m_y = multiplier * dl_func(logits)
is_adv = m_y < 0
is_smaller = l2 < best_l2
is_both = is_adv & is_smaller
adv_found.logical_or_(is_adv)
best_l2 = torch.where(is_both, l2.detach(), best_l2)
best_adv = torch.where(batch_view(is_both), adv_inputs.detach(), best_adv)
L_r = λ_ema[:, 0] * l2 + λ_ema[:, 1] * F.softplus(m_y.clamp_min(0))
grad_r = grad(L_r.sum(), inputs=r, only_inputs=True)[0]
grad_λ = m_y.detach().sign()
# gradient descent on primal variables
r.grad = grad_r
optimizer.step()
scheduler.step()
r.data.add_(inputs).clamp_(min=0, max=1).sub_(inputs)
# gradient ascent on dual variables and exponential moving average
θ_λ = dual_lr * ((num_steps - 1 - i) / (num_steps - 1) * (1 - dual_lr_decrease) + dual_lr_decrease)
λ[:, 1].add_(grad_λ, alpha=θ_λ).clamp_(min=log_min_dual_ratio, max=-log_min_dual_ratio)
λ_ema.mul_(dual_ema).add_(λ.softmax(dim=1), alpha=1 - dual_ema)
if callback is not None:
callback.accumulate_line('m_y', i, m_y.mean(), title=f'{attack_name} - Logit difference')
callback_best = best_l2.masked_select(adv_found).mean()
callback.accumulate_line(['l2', 'best_l2'], i, [l2.mean(), callback_best],
title=f'{attack_name} - L2 norms')
callback.accumulate_line(['λ_1', 'λ_2'], i, [λ_ema[:, 0].mean(), λ_ema[:, 1].mean()],
title=f'{attack_name} - Dual variables')
callback.accumulate_line(['θ_r', 'θ_λ'], i, [optimizer.param_groups[0]['lr'], θ_λ],
title=f'{attack_name} - Learning rates')
callback.accumulate_line('success', i, adv_found.float().mean(), title=f'{attack_name} - Success')
if (i + 1) % (num_steps // 20) == 0 or (i + 1) == num_steps:
callback.update_lines()
return best_adv
def l0_proximal(x: Tensor, λ: Tensor) -> Tensor:
thresholding = x.max(dim=1, keepdim=True).values >= torch.sqrt(2 * λ)
return thresholding.float() * x
def l1_proximal(x: Tensor, λ: Tensor) -> Tensor:
return x.sign() * (x.abs() - λ).clamp_min(0)
def l2_proximal(x: Tensor, λ: Tensor) -> Tensor:
norms = x.flatten(1).norm(p=2, dim=1, keepdim=True)
return ((1 - λ.flatten(1) / norms).clamp_min(0) * x.flatten(1)).view_as(x)
def linf_proximal(x: Tensor, λ: Tensor) -> Tensor:
l1_projection = l1_ball_euclidean_projection(x=(x / λ).flatten(1), ε=1).view_as(x)
return x - λ * l1_projection
def l23_proximal(x: Tensor, λ: Tensor) -> Tensor:
"""Proximal operator for L_2/3 norm."""
th = 2 * (2 / 3 * λ).pow(3 / 4)
a = torch.sqrt(x.pow(4) / 256 - 8 * λ.pow(3) / 729)
x_square = x ** 2
b = (1 / 16 * x_square + a).pow(1 / 3) + (1 / 16 * x_square - a).pow(1 / 3)
b_ = 2 * b
b_sqrt = b_.sqrt()
z = x.sign() / 8 * (b_sqrt + torch.sqrt(2 * x.abs() / b_sqrt - b_)).pow(3)
return torch.nan_to_num(z, nan=0)
def pdpgd(model: nn.Module,
inputs: Tensor,
labels: Tensor,
norm: float,
targeted: bool = False,
num_steps: int = 500,
random_init: float = 0,
proximal_operator: Optional[float] = None,
primal_lr: float = 0.1,
primal_lr_decrease: float = 0.01,
dual_ratio_init: float = 0.01,
dual_lr: float = 0.1,
dual_lr_decrease: float = 0.1,
dual_ema: float = 0.9,
dual_min_ratio: float = 1e-6,
proximal_steps: int = 5,
ε_threshold: float = 1e-2,
callback: Optional[VisdomLogger] = None) -> Tensor:
"""
Primal-Dual Proximal Gradient Descent (PDPGD) attacks from https://arxiv.org/abs/2106.01538.
Parameters
----------
model : nn.Module
Model to attack.
inputs : Tensor
Inputs to attack. Should be in [0, 1].
labels : Tensor
Labels corresponding to the inputs if untargeted, else target labels.
norm: float
Norm to minimize.
targeted : bool
Whether to perform a targeted attack or not.
num_steps : int
Number of optimization steps. Corresponds to the number of forward and backward propagations.
random_init : float
If random_init != 0, will start from a random perturbation drawn from U(-random_init, random_init).
proximal_operator : float
If not None, uses the corresponding proximal operator in [0, 23, 1, 2, float('inf')]. 23 corresponds to the
L-2/3 proximal operator and is preferred to minimze the L0-norm instead of the L0 proximal operator.
primal_lr : float
Learning rate for primal variables.
primal_lr_decrease : float
Final learning rate multiplier for primal variables.
dual_ratio_init : float
Initial ratio λ_0 / λ_1. A smaller value corresponds to a larger weight on the (mis)classification constraint.
dual_lr : float
Learning rate for dual variables.
dual_lr_decrease : float
Final learning rate multiplier for dual variables.
dual_ema : float
Coefficient for exponential moving average. Equivalent to no EMA if dual_ema == 0.
dual_min_ratio : float
Minimum ratio λ_0 / λ_1 and λ_1 / λ_0 to avoid having too large absolute values of λ_1.
proximal_steps : int
Number of steps for proximal Adam (https://arxiv.org/abs/1910.10094).
ε_threshold : float
Convergence criterion for proximal Adam.
callback : VisdomLogger
Callback to visualize the progress of the algorithm.
Returns
-------
best_adv : Tensor
Perturbed inputs (inputs + perturbation) that are adversarial and have smallest distance with the original
inputs.
"""
attack_name = f'PDPGD L{norm}'
_distance = {
0: l0_distances,
1: l1_distances,
2: l2_distances,
float('inf'): linf_distances,
}
_proximal_operator = {
0: l0_proximal,
1: l1_proximal,
2: l2_proximal,
float('inf'): linf_proximal,
23: l23_proximal,
}
device = inputs.device
batch_size = len(inputs)
batch_view = lambda tensor: tensor.view(batch_size, *[1] * (inputs.ndim - 1))
multiplier = -1 if targeted else 1
distance = _distance[norm]
proximity_operator = _proximal_operator[proximal_operator or norm]
log_min_dual_ratio = math.log(dual_min_ratio)
# Setup variables
r = torch.zeros_like(inputs, requires_grad=True)
if random_init:
nn.init.uniform_(r, -random_init, random_init)
r.data.add_(inputs).clamp_(min=0, max=1).sub_(inputs)
optimizer = optim.Adam([r], lr=primal_lr)
lr_lambda = lambda i: primal_lr_decrease ** (i / num_steps)
scheduler = optim.lr_scheduler.LambdaLR(optimizer=optimizer, lr_lambda=lr_lambda)
λ = torch.zeros(batch_size, 2, dtype=torch.float, device=device)
λ[:, 1] = -math.log(dual_ratio_init)
λ_ema = λ.softmax(dim=1)
# Init trackers
best_dist = torch.full((batch_size,), float('inf'), device=device)
best_adv = inputs.clone()
adv_found = torch.zeros_like(best_dist, dtype=torch.bool)
for i in range(num_steps):
adv_inputs = inputs + r
logits = model(adv_inputs)
dist = distance(adv_inputs.detach(), inputs)
if i == 0:
labels_infhot = torch.zeros_like(logits.detach()).scatter(1, labels.unsqueeze(1), float('inf'))
dl_func = partial(difference_of_logits, labels=labels, labels_infhot=labels_infhot)
m_y = multiplier * dl_func(logits)
is_adv = m_y < 0
is_smaller = dist < best_dist
is_both = is_adv & is_smaller
adv_found.logical_or_(is_adv)
best_dist = torch.where(is_both, dist.detach(), best_dist)
best_adv = torch.where(batch_view(is_both), adv_inputs.detach(), best_adv)
cls_loss = F.softplus(m_y.clamp_min(0))
grad_r = grad(cls_loss.sum(), inputs=r, only_inputs=True)[0]
grad_λ = m_y.detach().sign()
# gradient descent on primal variables
r.grad = grad_r
optimizer.step()
θ_r = optimizer.param_groups[0]['lr']
scheduler.step()
r.data.add_(inputs).clamp_(min=0, max=1).sub_(inputs)
# proximal adam https://arxiv.org/abs/1910.10094
β_2 = optimizer.param_groups[0]['betas'][1]
v = optimizer.state[r]['exp_avg_sq']
ψ = torch.sqrt(v / (1 - β_2 ** optimizer.state[r]['step'])) + optimizer.param_groups[0]['eps']
ψ_max = ψ.flatten(1).max(dim=1).values
effective_lr = θ_r / ψ_max
# proximal sub-iterations variables
z_curr = r.detach()
ε = torch.ones_like(best_dist)
μ = λ_ema[:, 0] / λ_ema[:, 1] * effective_lr
for _ in range(proximal_steps):
z_prev = z_curr
z_new = proximity_operator(z_curr - ψ / batch_view(ψ_max) * (z_curr - r.detach()), batch_view(μ))
z_new.add_(inputs).clamp_(min=0, max=1).sub_(inputs)
z_curr = torch.where(batch_view(ε > ε_threshold), z_new, z_prev)
ε = (z_curr - z_prev).flatten(1).norm(p=2, dim=1) / z_curr.flatten(1).norm(p=2, dim=1)
if (ε < ε_threshold).all():
break
r.data = z_curr
# gradient ascent on dual variables and exponential moving average
θ_λ = dual_lr * ((num_steps - 1 - i) / (num_steps - 1) * (1 - dual_lr_decrease) + dual_lr_decrease)
λ[:, 1].add_(grad_λ, alpha=θ_λ).clamp_(min=log_min_dual_ratio, max=-log_min_dual_ratio)
λ_ema.mul_(dual_ema).add_(λ.softmax(dim=1), alpha=1 - dual_ema)
if callback is not None:
callback.accumulate_line('m_y', i, m_y.mean(), title=f'{attack_name} - Logit difference')
callback_best = best_dist.masked_select(adv_found).mean()
callback.accumulate_line([f'l{norm}', f'best_l{norm}'], i, [dist.mean(), callback_best],
title=f'{attack_name} - L{norm} norms')
callback.accumulate_line(['λ_1', 'λ_2'], i, [λ_ema[:, 0].mean(), λ_ema[:, 1].mean()],
title=f'{attack_name} - Dual variables')
callback.accumulate_line(['θ_r', 'θ_λ'], i, [θ_r, θ_λ], title=f'{attack_name} - Learning rates')
callback.accumulate_line('success', i, adv_found.float().mean(), title=f'{attack_name} - Success')
if (i + 1) % (num_steps // 20) == 0 or (i + 1) == num_steps:
callback.update_lines()
return best_adv
|
import logging
import os
from argparse import ArgumentParser
from datetime import date, datetime
from telegram import *
from telegram.ext import *
import settings
from utils import History, convert_date
logger = logging.getLogger(__name__)
class Stop(Exception):
pass
# get date from message text if any
def get_custom_date(text: str, default: date):
if not text:
return default
try:
return datetime.strptime(text[:10], '%Y-%m-%d').date()
except ValueError:
return default
def check_chat(update: Update, context: CallbackContext):
chat = update.effective_chat # type:Chat
bot = context.bot # type:Bot
if chat.type != Chat.SUPERGROUP:
bot.send_message(chat.id, 'Добавь меня в групповой чат, я буду записывать и потом напоминать о том, '
'что обсуждалось в чате некоторое время назад.')
raise Stop
# if chat type is supergroup, try to add message id to history file. Any other chat type - answer with a default text
def answer(update: Update, context: CallbackContext):
check_chat(update, context)
message = update.message # type:Message
text = message.text or message.caption
custom_date = get_custom_date(text, convert_date(message.date).date())
history = History(message.chat_id)
if history.has(custom_date):
# дата уже есть
history.close()
else:
history.add(custom_date, message.message_id)
history.save()
# for command 'save': add (replace - if date already exists) message id to history file
def save_command(update: Update, context: CallbackContext):
check_chat(update, context)
message = update.message # type:Message
reply_to_message = update.message.reply_to_message # type:Message
if not reply_to_message:
message.reply_text('Сделай реплай на сообщение')
return
message_id = str(reply_to_message.message_id)
text = reply_to_message.text or reply_to_message.caption
custom_date = get_custom_date(text, convert_date(reply_to_message.date).date())
history = History(message.chat_id)
history.add(custom_date, message_id)
history.save()
if history.has(custom_date):
reply = 'Обновлено для {}'
else:
reply = 'Сохранено для {}'
reply = reply.format(custom_date)
message.reply_text(reply)
def help_command(update: Update, context: CallbackContext):
chat = update.effective_chat # type:Chat
context.bot.send_message(
chat.id,
'Подробное <a href="{}">описание</a>, инструкция по использованию со скриншотами и disclaimer.\n\n'
'<a href="{}">Репозиторий</a>'.format(settings.HELP_URL, settings.REPOSITORY_URL),
parse_mode='HTML',
)
def error(update: Update, context: CallbackContext):
if isinstance(context.error, Stop):
# остановка
return
logger.exception(context.error)
if update:
message = update.effective_message # type:Message
message.reply_text('Произошла ошибка')
def main():
parser = ArgumentParser()
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
os.makedirs('data', exist_ok=True)
updater = Updater(token=settings.TELEGRAM_BOT_API, use_context=True)
level = logging.DEBUG if args.verbose else logging.INFO
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=level)
dispatcher = updater.dispatcher # type:Dispatcher
dispatcher.add_handler(CommandHandler('start', answer))
dispatcher.add_handler(CommandHandler('save', save_command))
dispatcher.add_handler(CommandHandler('help', help_command))
dispatcher.add_handler(MessageHandler(Filters.update.message & (Filters.text | Filters.caption), answer))
dispatcher.add_error_handler(error)
updater.start_polling(allowed_updates=['message'])
# TODO: try-except KeyboardInterrupt
if __name__ == '__main__':
main()
|
import pandas as pd
import numpy as np
import psycopg2
import matplotlib.pyplot as plt
from sklearn.model_selection import KFold
import Constants
import sys
from pathlib import Path
output_folder = Path(sys.argv[1])
output_folder.mkdir(parents = True, exist_ok = True)
conn = psycopg2.connect('dbname=mimic user=haoran host=mimic password=password')
pats = pd.read_sql_query('''
select subject_id, gender, dob, dod from mimiciii.patients
''', conn)
n_splits = 12
pats = pats.sample(frac = 1, random_state = 42).reset_index(drop = True)
kf = KFold(n_splits = n_splits, shuffle = True, random_state = 42)
for c,i in enumerate(kf.split(pats, groups = pats.gender)):
pats.loc[i[1], 'fold'] = str(c)
adm = pd.read_sql_query('''
select subject_id, hadm_id, insurance, language,
religion, ethnicity,
admittime, deathtime, dischtime,
HOSPITAL_EXPIRE_FLAG, DISCHARGE_LOCATION,
diagnosis as adm_diag
from mimiciii.admissions
''', conn)
df = pd.merge(pats, adm, on='subject_id', how = 'inner')
def merge_death(row):
if not(pd.isnull(row.deathtime)):
return row.deathtime
else:
return row.dod
df['dod_merged'] = df.apply(merge_death, axis = 1)
notes = pd.read_sql_query('''
select category, chartdate, charttime, hadm_id, row_id as note_id, text from mimiciii.noteevents
where iserror is null
''', conn)
# drop all outpatients. They only have a subject_id, so can't link back to insurance or other fields
notes = notes[~(pd.isnull(notes['hadm_id']))]
df = pd.merge(left = notes, right = df, on='hadm_id', how = 'left')
df.ethnicity.fillna(value = 'UNKNOWN/NOT SPECIFIED', inplace = True)
others_set = set()
def cleanField(string):
mappings = {'HISPANIC OR LATINO': 'HISPANIC/LATINO',
'BLACK/AFRICAN AMERICAN': 'BLACK',
'UNABLE TO OBTAIN':'UNKNOWN/NOT SPECIFIED',
'PATIENT DECLINED TO ANSWER': 'UNKNOWN/NOT SPECIFIED'}
bases = ['WHITE', 'UNKNOWN/NOT SPECIFIED', 'BLACK', 'HISPANIC/LATINO',
'OTHER', 'ASIAN']
if string in bases:
return string
elif string in mappings:
return mappings[string]
else:
for i in bases:
if i in string:
return i
others_set.add(string)
return 'OTHER'
df['ethnicity_to_use'] = df['ethnicity'].apply(cleanField)
df = df[df.chartdate >= df.dob]
ages = []
for i in range(df.shape[0]):
ages.append((df.chartdate.iloc[i] - df.dob.iloc[i]).days/365.24)
df['age'] = ages
df.loc[(df.category == 'Discharge summary') |
(df.category == 'Echo') |
(df.category == 'ECG'), 'fold'] = 'NA'
icds = (pd.read_sql_query('select * from mimiciii.diagnoses_icd', conn)
.groupby('hadm_id')
.agg({'icd9_code': lambda x: list(x.values)})
.reset_index())
df = pd.merge(left = df, right = icds, on = 'hadm_id')
def map_lang(x):
if x == 'ENGL':
return 'English'
if pd.isnull(x):
return 'Missing'
return 'Other'
df['language_to_use'] = df['language'].apply(map_lang)
for i in Constants.groups:
assert(i['name'] in df.columns), i['name']
acuities = pd.read_sql_query('''
select * from (
select a.subject_id, a.hadm_id, a.icustay_id, a.oasis, a.oasis_prob, b.sofa from
(mimiciii.oasis a
natural join mimiciii.sofa b )) ab
natural join
(select subject_id, hadm_id, icustay_id, sapsii, sapsii_prob from
mimiciii.sapsii) c
''', conn)
icustays = pd.read_sql_query('''
select subject_id, hadm_id, icustay_id, intime, outtime
from mimiciii.icustays
''', conn).set_index(['subject_id','hadm_id'])
def fill_icustay(row):
opts = icustays.loc[[row['subject_id'],row['hadm_id']]]
if pd.isnull(row['charttime']):
charttime = row['chartdate'] + pd.Timedelta(days = 2)
else:
charttime = row['charttime']
stay = opts[(opts['intime'] <= charttime)].sort_values(by = 'intime', ascending = True)
if len(stay) == 0:
return None
#print(row['subject_id'], row['hadm_id'], row['category'])
return stay.iloc[-1]['icustay_id']
df['icustay_id'] = df[df.category.isin(['Discharge summary','Physician ','Nursing','Nursing/other'])].apply(fill_icustay, axis = 1)
df = pd.merge(df, acuities.drop(columns = ['subject_id','hadm_id']), on = 'icustay_id', how = 'left')
df.loc[df.age >= 90, 'age'] = 91.4
df.to_pickle(output_folder / "df_raw.pkl")
|
from collections import defaultdict
import torch
import torch.nn as nn
from mighty.monitor.accuracy import AccuracyEmbedding
from mighty.trainer import TrainerEmbedding, TrainerGrad
from mighty.utils.common import batch_to_cuda, clone_cpu
from mighty.utils.data import DataLoader
from mighty.utils.stub import OptimizerStub
from mighty.utils.var_online import MeanOnlineLabels
from nn.kwta import WTAInterface, IterativeWTASoft
from nn.monitor import MonitorIWTA
from nn.utils import compute_loss, l0_sparsity
from mighty.monitor.accuracy import calc_accuracy, AccuracyEmbedding
class TrainerIWTA(TrainerEmbedding):
watch_modules = TrainerEmbedding.watch_modules + (WTAInterface,)
N_CHOOSE = 100
LEARNING_RATE = 0.001
def __init__(self,
model: nn.Module,
criterion: nn.Module,
data_loader: DataLoader,
optimizer=OptimizerStub(),
**kwargs):
super().__init__(model=model,
criterion=criterion,
data_loader=data_loader,
optimizer=optimizer,
accuracy_measure=AccuracyEmbedding(cache=True),
**kwargs)
self.mutual_info.save_activations = self.mi_save_activations_y
self.cached_labels = []
self.cached_output = defaultdict(list)
self.cached_output_prev = {}
self.loss_x = None
self.accuracy_x = None
def log_trainer(self):
super().log_trainer()
self.monitor.log(f"LEARNING_RATE={self.LEARNING_RATE}")
self.monitor.log(f"N_CHOOSE={self.N_CHOOSE}")
def mi_save_activations_y(self, module, tin, tout):
"""
A hook to save the activates at a forward pass.
"""
if not self.mutual_info.is_updating:
return
h, y = tout
layer_name = self.mutual_info.layer_to_name[module]
tout_clone = clone_cpu(y.detach().float())
tout_clone = tout_clone.flatten(start_dim=1)
self.mutual_info.activations[layer_name].append(tout_clone)
def _init_monitor(self, mutual_info):
monitor = MonitorIWTA(
mutual_info=mutual_info,
normalize_inverse=self.data_loader.normalize_inverse
)
return monitor
def full_forward_pass(self, train=True):
if not train:
return None
return super().full_forward_pass(train=train)
def update_contribution(self, h, y):
freq = dict(y=y.mean(dim=0), h=h.mean(dim=0))
for name, param in self.model.named_parameters():
param.update_contribution(freq[name[-1]])
def train_batch(self, batch):
x, labels = batch
h, y = self.model(x)
self.update_contribution(h, y)
loss = self._get_loss(batch, (h, y))
if isinstance(self.model, IterativeWTASoft):
loss.backward()
self.optimizer.step(closure=None)
else:
self.model.update_weights(x, h, y, n_choose=self.N_CHOOSE,
lr=self.LEARNING_RATE)
return loss
def _update_cached(self):
labels = torch.cat(self.cached_labels)
convergence = {}
sparsity = {}
for name, output in self.cached_output.items():
output = torch.cat(output)
mean = [output[labels == label].mean(dim=0)
for label in labels.unique()]
mean = torch.stack(mean)
self.monitor.clusters_heatmap(mean, title=f"Embeddings '{name}'")
# self.monitor.plot_assemblies(output, labels, name=name)
loss = compute_loss(output, labels)
self.monitor.update_loss(loss, mode=f'pairwise {name}')
sparsity[name] = l0_sparsity(output)
output = output.int()
if name in self.cached_output_prev:
xor = (self.cached_output_prev[name] ^ output).sum(dim=1)
convergence[name] = xor.float().mean().item() / output.size(1)
self.cached_output_prev[name] = output
self.monitor.update_output_convergence(convergence)
self.monitor.update_sparsity(sparsity)
self.monitor.update_loss(loss=self.loss_x, mode='pairwise x')
self.cached_output.clear()
self.cached_labels.clear()
if self.timer.epoch == self.timer.n_epochs:
print(f"convergence={convergence}")
print(f"sparsity={sparsity}")
def training_started(self):
# self.monitor.weights_heatmap(self.model)
self.monitor.update_weight_sparsity(self.model.weight_sparsity())
self.monitor.update_s_w(self.model.s_w())
x_centroids = AccuracyEmbedding()
x = []
for x_batch, labels in self.data_loader.eval():
x_batch, labels = batch_to_cuda((x_batch, labels))
x.append(x_batch)
x_centroids.partial_fit(x_batch, labels)
h, y = self.model(x_batch)
self.cached_labels.append(labels)
self.cached_output['h'].append(h)
self.cached_output['y'].append(y)
x = torch.cat(x)
self.monitor.log(f"sparsity x: {l0_sparsity(x):.3f}")
labels = torch.cat(self.cached_labels)
self.accuracy_x = calc_accuracy(labels, x_centroids.predict(x))
print(f"accuracy x: {self.accuracy_x:.3f}")
self.loss_x = compute_loss(x, labels)
self._update_cached()
def _epoch_finished(self, loss):
# self.monitor.weights_heatmap(self.model)
# self.monitor.update_permanences_removed(self.model.permanences_removed())
self.monitor.update_contribution(self.model.weight_contribution())
self.monitor.update_kwta_thresholds(self.model.kwta_thresholds())
self.monitor.update_weight_sparsity(self.model.weight_sparsity())
self.monitor.update_s_w(self.model.s_w())
self._update_cached()
self.model.epoch_finished()
TrainerGrad._epoch_finished(self, loss)
def _on_forward_pass_batch(self, batch, output, train):
h, y = output
if train:
x, labels = batch
self.cached_labels.append(labels)
self.cached_output['h'].append(h)
self.cached_output['y'].append(y)
TrainerGrad._on_forward_pass_batch(self, batch, y, train)
def _get_loss(self, batch, output):
# In case of unsupervised learning, '_get_loss' is overridden
# accordingly.
input, labels = batch
h, y = output
return self.criterion(y, labels)
|
# ---
# jupyter:
# jupytext:
# cell_markers: '{{{,}}}'
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Jupyter notebook
#
# This notebook is a simple jupyter notebook. It only has markdown and code cells. And it does not contain consecutive markdown cells. We start with an addition:
a = 1
b = 2
a + b
# Now we return a few tuples
a, b
a, b, a+b
# And this is already the end of the notebook
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
names = [
'Abeguwo', 'Abere', 'Adaro', 'Adi', 'Mailagu', 'Afekan', 'Agunua', 'Ahoeitu', 'Tupuai', 'Aiaru', 'Aku', 'Muki',
'Alalahe', 'Alilmenehune', 'Aluelop', 'Aluluei', 'Ao', 'Kahiwahiwa', 'Kanapanapa', 'Nue', 'Pakarea', 'Potano',
'Pour', 'Roa', 'Takawe', 'Hau', 'Ko', 'Hai', 'Mantangi', 'Areop', 'Enap', 'It', 'Eonin', 'Aroonoona', 'Arununa',
'Atanua', 'Atea', 'Au', 'Aumakua', 'Babamik', 'Bue', 'Dakuwanga', 'Darvi', 'Degei', 'Demas', 'Diwa', 'Zib',
'Dudugera', 'Eitumatupua', "Fa'ahotu", 'Ahotu', 'Fardu', 'Fati', 'Faumea', "Fe'e", 'Fe', 'Flaming', 'Teeth',
'Godsticks', 'Goga', 'Gogo', 'Gora', 'Daileng', 'Hana', 'Haumea', 'Haumiatiketike', 'Haumietiketike', "Hi'aka",
"Hi'iaka", 'Hiiaka', "Hikule'o", 'Hikule', 'Hina', 'Ika', 'Uri', 'Hine', 'Keha', 'Nuitepo', 'Titama', 'Hinenuitepo',
'Hinetitama', 'Hoa', 'Tapu', 'Honoyeta', 'Hukere', 'Nui', 'Tere', 'Maeri', 'Ilaheua', 'Ilaheva', 'Ira', 'Waru',
'Jari', 'Jugumishanta', 'Kae', 'Kahausibware', 'Kahoali', 'Aba', 'Kaitangata', 'Kakamora', "Kamapua'a", 'Kamapuaa',
'Kamarong', 'Kanaloa', 'Kane', 'Kang', 'Kilyaki', 'Pua', 'Kua', 'Akahi', 'Kuku', 'Lau', 'Kumu', 'Honua', 'Kwolam',
'Laka', 'Lalo', 'Latmikaik', 'Laufakanaa', 'Lejman', 'Lingadua', 'Lioumere', "Lo'au", "Lo'o", 'Lo', 'Loau', 'Lono',
'Lugeilan', 'Mahina', 'Mahu', 'Fatu', 'Rau', 'Mahuika', 'Make', 'Makea', 'Makemake', 'Marakihau', 'Marawa', 'Maru',
'Matabiri', 'Matagaigai', 'Maui', 'Menchune', 'Mesede', 'Milu', 'Miru', 'Moai', 'Moko', 'Mon', 'Moro', 'Funa',
'Morofuna', 'Motikitik', 'Murimuria', 'Atibu', 'Kaa', 'Kika', 'Nanganana', 'Naniumiap', 'Nareau', 'Ndauthina',
'Ndengi', 'Nei', 'Tituaabine', 'Ngani', 'Vatu', 'Ngarara', 'Ngaru', 'Ni', "Nu'u", 'U', 'Oa', 'Rove', 'Olifat',
'Olofat', 'Oma', 'Rumufu', 'Oro', 'Pahuanuiapitaaitera', 'Paikea', 'Pak', "A'a", 'Palulop', 'Papa', 'Tu', 'Anuku',
'Papatuanuku', 'Pele', 'Pelei', 'Pere', 'Po', 'Pouakai', "Pu'uhele", 'Pu', 'Uhele', 'Puarata', 'Pulotu', 'Qasavara',
'Qat', 'Raivuki', 'Rakim', 'Rangi', 'Ranginui', 'Ratu', 'Mai', 'Bulu', 'Rehua', 'Rigi', "Ro'o", 'Ro', 'Rokola',
'Rongo', 'Rongomatane', 'Roua', 'Ruaumoko', 'Sina', 'Sisimatailaa', 'Soido', 'Solal', 'Sosom', "Ta'arda", "Ta'aroa",
'Ta', 'Arda', 'Aroa', 'Oroa', 'Taburimai', 'Tagaro', 'Tahiri', 'Tea', 'Tahu', 'Tama', 'Tane', 'Mahuta',
'Tanemahuta', 'Tangaloa', 'Tangaroa', 'Tangata', 'Tangatamanu', "Tangi'ia", 'Tangi', 'Ia', 'Taonoui', 'Taonouit',
'Tautohito', 'Tawhiri', 'Tawhirimatea', 'Ikawai', 'Nao', 'Reere', 'Teanoi', 'Tiki', 'Wananga', 'Timbehes',
'Tinandi', 'Thambonga', 'Tintrau', "Titua'abine", 'Titua', 'Abine', 'To', 'Kabinana', 'Karvuvu', 'Toropotaa', 'Toi',
'Totoima', 'Tpereaki', 'Matavenga', 'Tuli', 'Tumatauenga', 'Tumuitearetoka', 'Tunaroa', 'Turi', 'Turukawa',
'Uenuku', 'Ulupoka', 'Unihi', 'Pili', 'Vari', 'Varima', 'Takere', 'Vehine', 'Hae', 'Wahini', 'Waiora', 'Whaitiri',
'Whiro', 'Yalafath']
|
import cv2
import mediapipe as mp
from mediapipe.framework.formats import landmark_pb2
mp_drawing = mp.solutions.drawing_utils
mp_drawing_styles = mp.solutions.drawing_styles
mp_face_mesh = mp.solutions.face_mesh
class FaceMeshPreview:
def __init__(self):
self.feature_drawing_spec = mp_drawing.DrawingSpec(color=mp_drawing.BLUE_COLOR)
self.rotation_drawing_spec = mp_drawing.DrawingSpec(color=mp_drawing.RED_COLOR)
self.mesh_drawing_spec = mp_drawing_styles.get_default_face_mesh_tesselation_style()
def show_image(self, base_image, selected_face = None, face_features = None, orientation=None, rotation=None, pivot=None):
# Draw the face mesh annotations on the image.
base_image.flags.writeable = True
base_image = cv2.cvtColor(base_image, cv2.COLOR_RGB2BGR)
if selected_face is not None:
# draw mesh
mp_drawing.draw_landmarks(
image=base_image,
landmark_list=selected_face,
connections=mp_face_mesh.FACEMESH_TESSELATION,
landmark_drawing_spec=None,
connection_drawing_spec=self.mesh_drawing_spec)
if face_features is not None:
# draw selected face features
feature_landmarks = landmark_pb2.NormalizedLandmarkList(
landmark = [
landmark_pb2.NormalizedLandmark(x=l[0], y=l[1], z=l[1]) for l in [
*face_features.values(),
]
]
)
mp_drawing.draw_landmarks(
image=base_image,
landmark_list=feature_landmarks,
landmark_drawing_spec=self.feature_drawing_spec)
if orientation is not None:
# draw direction vectors
rotation_landmarks = landmark_pb2.NormalizedLandmarkList(
landmark = [
landmark_pb2.NormalizedLandmark(x=l[0], y=l[1], z=l[1]) for l in [
face_features["nose"] - orientation[0] * 0.25,
face_features["nose"] - orientation[1] * 0.25,
face_features["nose"] - orientation[2] * 0.25
]
]
)
mp_drawing.draw_landmarks(
image=base_image,
landmark_list=rotation_landmarks,
landmark_drawing_spec=self.rotation_drawing_spec)
# Flip the image horizontally for a selfie-view display.
base_image = cv2.flip(base_image, 1)
if rotation is not None:
# write directions as text
for i, rot in enumerate(zip(["yaw", "pitch", "roll"], rotation)):
cv2.putText(
base_image,
"{}: {}".format(*rot),
(1, 10+10*i), #position at which writing has to start
cv2.FONT_HERSHEY_SIMPLEX, #font family
0.4, #font size
(209, 80, 0, 255), #font color
1) #font stroke
if pivot is not None:
# write directions as text
for i, rot in enumerate(zip("xyz", pivot)):
cv2.putText(
base_image,
"{}: {}".format(*rot),
(1, 10+10*3+10+10*i), #position at which writing has to start
cv2.FONT_HERSHEY_SIMPLEX, #font family
0.4, #font size
(209, 80, 0, 255), #font color
1) #font stroke
else:
base_image = cv2.flip(base_image, 1)
cv2.imshow("MediaPipe Face Mesh", base_image)
if cv2.waitKey(5) & 0xFF == 27:
# escape is pressed signal to stop
return True
return False
|
from pathlib import Path
import os
import yaml
import command
class Config(object):
def __init__(self, directory, filename = '.exec-helper'):
self._settings_file = Path(directory).joinpath(filename)
self._directory = directory
self._commands = dict()
self._patterns = set()
self._plugin_search_path = []
if os.path.exists(self._settings_file):
raise AssertionError("Temporary file '{file}' already exists!".format(file = self._settings_file))
def __del__(self):
# self.remove()
pass
@property
def directory(self):
return self._directory
@property
def file(self):
return self._settings_file
@property
def commands(self):
return self._commands
def create_command(self, command_id):
""" Creates a command for the given command id using an implementation-specific plugin
"""
self._commands[command_id] = command.Command(command_id, 'command-line-command', self._directory)
def add_command(self, command):
""" Adds the given command as a command associated with the command id to the configuration """
self._commands[command.id] = command
def set_environment(self, cmd, envs):
self._commands[cmd].set_environment(envs)
def add_pattern(self, pattern):
self._patterns.add(pattern)
def add_plugin_search_path(self, path):
self._plugin_search_path.append(str(path))
def write(self):
config_file = dict()
# Make sure the config file is not empty
config_file['blaat'] = []
if self._plugin_search_path:
config_file['additional-search-paths'] = self._plugin_search_path
if self._patterns:
config_file['patterns'] = {}
for pattern in self._patterns:
config_file['patterns'][pattern.id] = {
'default-values': pattern.default_values
}
if pattern.long_options:
config_file['patterns'][pattern.id]['long-option'] = pattern.long_options
if self._commands:
config_file['commands'] = []
for id,cmd in self._commands.items():
config_file['commands'].append(id)
config_file.update(cmd.to_dict())
cmd.write_binary()
with open(self._settings_file, 'w') as f:
yaml.dump(config_file, f)
def remove(self):
if os.path.exists(self._settings_file):
os.remove(self._settings_file)
|
import pickle, os
import thorpy
from mapobjects.objects import MapObject
def ask_save(me):
choice = thorpy.launch_binary_choice("Do you want to save this map ?")
default_fn = me.get_fn().replace(".map","")
if choice:
fn = thorpy.get_user_text("Filename", default_fn, size=(me.W//2,40))
fn += ".map"
to_file(me, fn)
thorpy.functions.quit_menu_func()
def get_saved_files_button(root):
files = [fn for fn in os.listdir(root) if fn.endswith(".map")]
ddl = thorpy.DropDownListLauncher.make("Choose a file to load", "", files)
def unlaunch():
ddl.default_unlaunch()
thorpy.functions.quit_menu_func()
ddl.unlaunch_func = unlaunch
return ddl
def ask_load():
pass
################################################################################
def obj_to_file(obj, f):
for attr in obj.get_saved_attributes():
value = getattr(obj, attr)
pickle.dump(value, f)
def file_to_obj(f, obj):
for attr in obj.get_saved_attributes():
value = pickle.load(f)
setattr(obj, attr, value)
def to_file(me, fn):
print("Saving map to", fn)
tmp_name = me.map_initializer.name
me.map_initializer.name = fn.replace("_", " ")
with open(fn, "wb") as f:
obj_to_file(me.map_initializer, f) #store map properties
#save modified cells
print("dumping", len(me.modified_cells), "modified cells")
pickle.dump(len(me.modified_cells), f) #len(modified cells)
for x,y in me.modified_cells:
cell = me.lm.cells[x][y]
pickle.dump((x,y),f)
pickle.dump(cell.name,f) #cell name
#save modified objects
print("dumping", len(me.dynamic_objects), "dynamic objects")
pickle.dump(len(me.dynamic_objects), f) #len(dynamic_objects)
for obj in me.dynamic_objects:
pickle.dump(obj.get_cell_coord(), f) #coord
obj_to_file(obj, f) #dyn obj
me.map_initializer.name = tmp_name
def from_file_base(f):
"""Load map properties and re-generate the map"""
from editor.mapbuilding import MapInitializer
print("Loading map")
mi = MapInitializer("")
file_to_obj(f, mi)
me = mi.configure_map_editor()
return me
def from_file_cells(f, me):
"""Load cells and their logical content (names, properties, etc.)"""
print("Loading cells")
n = pickle.load(f) #len(modified cells)
for i in range(n):
x,y = pickle.load(f) #coord
name = pickle.load(f) #name
#
me.lm.cells[x][y].set_name(name)
def from_file_units(f, me):
"""Load units and their logical content (names, properties, etc.)"""
print("Loading units")
n = pickle.load(f) #len(dynamic_objects)
for i in range(n):
coord = pickle.load(f) #coord
a = {}
for attr_name in MapObject.get_saved_attributes():
a[attr_name] = pickle.load(f)
#
print("*** Loading unit", a["name"])
print(a)
obj = MapObject(me, fns=a["fns"], name=a["name"], factor=a["factor"],
relpos=a["relpos"], build=a["build"], new_type=a["new_type"])
obj_added = me.add_unit(coord, obj, a["quantity"])
|
from django import forms
from enrollment.models import Servicio
from enrollment.models import TipoServicio
from enrollment.models import Matricula
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit
from django.forms import ModelForm, Form
from utils.models import TiposNivel
from django.utils.translation import ugettext_lazy as _
from discounts.models import Descuento
from discounts.models import TipoDescuento
from utils.middleware import get_current_colegio, get_current_userID
##############################################################
# Solicitar Descuentos
##############################################################
class SolicitarDescuentoForm(ModelForm):
"""
Formulario de la clase Descuento
Nota:
solo se añade como campos los que son definidos por los usuarios
"""
class Meta:
model = Descuento
fields = [
'matricula',
'tipo_descuento',
'numero_expediente',
'comentario',
]
labels = {
'matricula':_('Solicitante'),
'tipo_descuento':_('Descuento'),
'numero_expediente':_('Nro. Expediente'),
'comentario':_('Comentario'),
}
def ChoiceNiveles(self):
MY_CHOICES = (
('1', 'Inicial'),
('2', 'Primaria'),
('3', 'Secundaria'),
)
return MY_CHOICES
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
#self.fields['nivel'] = forms.ChoiceField(choices=self.ChoiceNiveles())
#self.fields['grado'] = forms.ChoiceField(choices=self.ChoiceGrados())
self.fields['matricula'].widget.attrs.update({'class': 'form-control'})
self.fields['tipo_descuento'].widget.attrs.update({'class': 'form-control'})
self.fields['numero_expediente'].widget.attrs.update({'class': 'form-control'})
self.fields['comentario'].widget.attrs.update({'class': 'form-control'})
self.fields['matricula'].widget.attrs['editable'] = False
class TipoDescuentForm(ModelForm):
"""
Formulario de la clase Descuento
Nota:
solo se añade como campos los que son definidos por los usuarios
"""
servicio = forms.ModelChoiceField(queryset=Servicio.objects.filter(activo=True))
class Meta:
model = TipoDescuento
fields = [
'servicio',
'descripcion',
'porcentaje',
]
labels = {
'servicio': _('Servicio'),
'descripcion': _('Descripción'),
'porcentaje': _('Porcentaje'),
}
def __init__(self, *args, **kwargs):
colegio = kwargs.pop('colegio', None)
super(TipoDescuentForm, self).__init__(*args, **kwargs)
# self.fields['nivel'] = forms.ChoiceField(choices=self.ChoiceNiveles())
# self.fields['grado'] = forms.ChoiceField(choices=self.ChoiceGrados())
self.fields['servicio'].widget.attrs.update({'class': 'form-control'})
self.fields['descripcion'].widget.attrs.update({'class': 'form-control'})
self.fields['porcentaje'].widget.attrs.update({'class': 'form-control'})
if colegio:
self.fields['servicio'].queryset = Servicio.objects.filter(activo=True,tipo_servicio__colegio__id_colegio=colegio)
##############################################################
# Aprobar Descuentos
##############################################################
class DetalleDescuentosForm(forms.Form):
"""
Formulario para filtar los detalles de Control de ingresos
Nota:
solo se añaden com campos los que son definidos por los usuarios
"""
alumno = forms.CharField(required=False)
anio = forms.CharField()
numero_expediente = forms.CharField(required=False)
estado = forms.CharField()
def ChoiceAnio(self):
MY_CHOICES = (
('2017', '2017'),
('2016', '2016'),
)
return MY_CHOICES
def ChoiceEstado(self):
MY_CHOICES = (
('Todos', 'Todos'),
('Aprobado', 'Aprobado'),
('No_aprobado', 'No aprobado'),
('Pendiente', 'Pendiente'),
)
return MY_CHOICES
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['anio'] = forms.ChoiceField(choices=self.ChoiceAnio())
self.fields['estado'] = forms.ChoiceField(choices=self.ChoiceEstado())
self.fields['alumno'].widget.attrs.update({'class': 'form-control'})
self.fields['anio'].widget.attrs.update({'class': 'form-control'})
self.fields['numero_expediente'].widget.attrs.update({'class': 'form-control'})
self.fields['estado'].widget.attrs.update({'class': 'form-control'})
|
import numpy as np
import cv2 as cv
flann_params= dict(algorithm = 6,
table_number = 6, # 12
key_size = 12, # 20
multi_probe_level = 1) #2
def init_feature():
"""initialize feature detector and matcher algorithm
"""
detector = cv.ORB_create(3000)
norm = cv.NORM_HAMMING
#matcher = cv.BFMatcher(norm)
matcher = cv.FlannBasedMatcher(flann_params, {})
return detector, matcher
def filter_matches(kp1, kp2, matches, ratio = 0.8):
"""filter matches to keep strong matches only
"""
mkp1, mkp2 = [], []
for m in matches:
if len(m) == 2 and m[0].distance < m[1].distance * ratio:
m = m[0]
mkp1.append( kp1[m.queryIdx] )
mkp2.append( kp2[m.trainIdx] )
p1 = np.float32([kp.pt for kp in mkp1])
p2 = np.float32([kp.pt for kp in mkp2])
kp_pairs = zip(mkp1, mkp2)
return p1, p2, list(kp_pairs)
c = []
def explore_match(win, img1, img2, kp_pairs, status = None, H = None):
h1, w1 = img1.shape[:2]
h2, w2 = img2.shape[:2]
vis = np.zeros((max(h1, h2), w1+w2, 3), np.uint8)
vis[:h1, :w1, :3] = img1
vis[:h2, w1:w1+w2, :3] = img2
img3 = vis
h3, w3 = img3.shape[:2]
if H is not None:
corners = np.float32([[0, 0], [w1, 0], [w1, h1], [0, h1]])
corners1 = np.float32( cv.perspectiveTransform(corners.reshape(1, -1, 2), H).reshape(-1, 2) + (w1, 0))
corners = np.int32( cv.perspectiveTransform(corners.reshape(1, -1, 2), H).reshape(-1, 2) + (w1, 0) )
c = corners
cv.polylines(vis, [corners], True, (0, 0, 255))
if status is None:
status = np.ones(len(kp_pairs), np.bool_)
p1, p2 = [], []
for kpp in kp_pairs:
p1.append(np.int32(kpp[0].pt))
p2.append(np.int32(np.array(kpp[1].pt) + [w1, 0]))
green = (0, 255, 0)
red = (0, 0, 255)
for (x1, y1), (x2, y2), inlier in zip(p1, p2, status):
if inlier:
col = green
cv.circle(vis, (x1, y1), 2, col, -1)
cv.circle(vis, (x2, y2), 2, col, -1)
else:
col = red
r = 2
thickness = 3
cv.line(vis, (x1-r, y1-r), (x1+r, y1+r), col, thickness)
cv.line(vis, (x1-r, y1+r), (x1+r, y1-r), col, thickness)
cv.line(vis, (x2-r, y2-r), (x2+r, y2+r), col, thickness)
cv.line(vis, (x2-r, y2+r), (x2+r, y2-r), col, thickness)
for (x1, y1), (x2, y2), inlier in zip(p1, p2, status):
if inlier:
cv.line(vis, (x1, y1), (x2, y2), green)
cv.imshow(win, vis)
return corners1
scale_percent =25
img1 = cv.imread(cv.samples.findFile('table7A.jpg'))
width = int(img1.shape[1] * scale_percent / 100)
height = int(img1.shape[0] * scale_percent / 100)
#img1 = cv.resize(img1, (width,height))
detector, matcher = init_feature()
# apply orb on table image
kp1, desc1 = detector.detectAndCompute(img1, None)
def getCorners(frame):
# apply orb on frame
kp2, desc2 = detector.detectAndCompute(frame, None)
print('matching...')
raw_matches = matcher.knnMatch(desc1, trainDescriptors = desc2, k = 2)
#filter matches and keep strong matches
p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches)
if len(p1) >= 4:
# H: transformation matrix
H, status = cv.findHomography(p1, p2, cv.RANSAC, 5.0)
print('%d / %d inliers/matched' % (np.sum(status), len(status)))
else:
H, status = None, None
print('%d matches found, not enough for homography estimation' % len(p1))
corners = explore_match('find_table', img1, frame, kp_pairs, status, H)
return corners
def getTableFromFrame (corners, frame):
h1, w1 = img1.shape[:2]
h2, w2 = frame.shape[:2]
vis = np.zeros((max(h1, h2), w1+w2, 3), np.uint8)
vis[:h1, :w1, :3] = img1
vis[:h2, w1:w1+w2, :3] = frame
pts1 = corners
pts2 = np.float32([[0,0],[w1,0],[w1,h1], [0,h1]])
M = cv.getPerspectiveTransform(pts1,pts2)
# print((w1, h1))
dst = cv.warpPerspective(vis, M,(w1,h1))
return dst
|
''' Register rule-based models or pre-trianed models
'''
from rlcard.models.registration import register, load
|
# -*- coding: utf-8 -*-
from enum import IntFlag
from enum import unique
from utils.misc import escape_enum
from utils.misc import pymysql_encode
__all__ = ('ClientFlags',)
@unique
@pymysql_encode(escape_enum)
class ClientFlags(IntFlag):
# NOTE: many of these flags are quite outdated and/or
# broken and are even known to false positive quite often.
# they can be helpful; just take them with a grain of salt.
"""osu! anticheat <= 2016 (unsure of age)"""
Clean = 0 # no flags sent
# flags for timing errors or desync.
SpeedHackDetected = 1 << 1
# flags when two internal values mismatch.
# XXX: this false flags a lot so most code
# written around the community just ignores
# this bit; i'll investigate a bit i guess.
IncorrectModValue = 1 << 2
MultipleOsuClients = 1 << 3
ChecksumFailure = 1 << 4
FlashlightChecksumIncorrect = 1 << 5
# these are only used on the osu!bancho official server.
OsuExecutableChecksum = 1 << 6
MissingProcessesInList = 1 << 7 # also deprecated as of 2018
# flags for either:
# 1. pixels that should be outside the visible radius
# (and thus black) being brighter than they should be.
# 2. from an internal alpha value being incorrect.
FlashLightImageHack = 1 << 8
SpinnerHack = 1 << 9
TransparentWindow = 1 << 10
# (mania) flags for consistently low press intervals.
FastPress = 1 << 11
# from my experience, pretty decent
# for detecting autobotted scores.
RawMouseDiscrepancy = 1 << 12
RawKeyboardDiscrepancy = 1 << 13
"""osu! anticheat 2019"""
# XXX: the aqn flags were fixed within hours of the osu!
# update, and vanilla hq is not so widely used anymore.
RunWithLdFlag = 1 << 14
ConsoleOpen = 1 << 15
ExtraThreads = 1 << 16
HQAssembly = 1 << 17
HQFile = 1 << 18
RegistryEdits = 1 << 19
SQL2Library = 1 << 20
libeay32Library = 1 << 21
aqnMenuSample = 1 << 22
|
import os
from django.contrib import messages
from django.shortcuts import render, redirect
from django.contrib.auth import authenticate, login, logout
from django.contrib.sites.shortcuts import get_current_site
from django.template.loader import render_to_string
from django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode
from django.utils.encoding import force_bytes, force_text, DjangoUnicodeDecodeError
from django.core.mail import EmailMessage, send_mail
from .forms import CreateUserForm
from .models import MyUser
from .utils import generate_token
def registerPage(request):
if request.user.is_authenticated:
messages.info(request, 'Already logged in')
return redirect('landing')
form = CreateUserForm()
if request.method == 'POST':
form = CreateUserForm(request.POST)
if form.is_valid():
user = form.save(commit=False)
user.is_active = False
user.save()
activate_msg(request, user)
messages.success(request, 'Please confirm your email')
return redirect('loginPage')
context = {'form': form}
return render(request, 'accounts/register.html', context)
def loginPage(request):
if request.user.is_authenticated:
messages.info(request, 'Already logged in')
return redirect('landing')
if request.method == "POST":
username = request.POST['username']
password = request.POST['password']
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
return redirect('landing')
else:
messages.error(request, 'Invalid credentials ')
return render(request, 'accounts/login.html')
def logoutUser(request):
print(request.user)
logout(request)
print(request.user)
return redirect('home')
def activate(request, uidbase64, token):
try:
uid = force_text(urlsafe_base64_decode(uidbase64))
user = MyUser.objects.get(pk=uid)
except Exception as e:
user = None
if user is not None and generate_token.check_token(user, token):
user.is_active = True
user.save()
messages.success(request, 'account activated successfully')
return redirect('loginPage')
return render(request, 'accounts/activate_failed.html', status=401)
def activate_msg(request, user):
print("###########################")
print('This is the user', user)
print('This is the request', request)
print("###########################")
current_site = get_current_site(request)
email_subject = "Activate your Account"
message = render_to_string('accounts/activate.html',
{
'user': user,
'domain': current_site.domain,
'uid': urlsafe_base64_encode(force_bytes(user.pk)),
'token': generate_token.make_token(user)
}
)
# email = EmailMessage(
# email_subject,
# message,
# 'HVC',
# [user.email],
# )
# email.send()
send_mail(
email_subject,
message,
'HVC',
[user.email],
fail_silently=False,
)
|
# views.team.team
from flask import redirect, url_for, request, flash
from app.forms import CreateTeam as CreateTeamForm
from app.util import session as session_util, team as team_util
from .team import TeamView
class CreateTeamView(TeamView):
"""Create a team.
"""
def get_form(self):
return CreateTeamForm()
def post(self):
create_form = CreateTeamForm(request.form)
if create_form.validate():
try:
account = session_util.get_account()
team = team_util.create_team(
account,
create_form.team_name.data
)
team_util.set_division(
team,
create_form.division.data
)
return redirect(url_for('team'))
except:
flash('Error creating team.')
return self.render_template(create_form=create_form)
def get(self):
return redirect(url_for('team'))
|
from flask import Blueprint
sc = Blueprint('sc', __name__, url_prefix='/sc')
from . import veiw |
"""!
This module contains code to estimate effective spring constants
of particles in crystals.
\ingroup lammpstools
"""
import lammpstools, dumpreader
import sys, os, numpy as np
def get_einstein_approx( d, ids, T, dims, alpha = 0.9, silent = True ):
""" ! Attempts to extract the chemical potential by determining
effective spring constaints for given ids and using the
chemical potential of an Einstein crystal. """
# Prepare initial average:
b = d.getblock()
im = lammpstools.make_id_map(b.ids)
if ids is None:
ids = []
n = lammpstools.neighborize( b, 1.3, 3, method = 0 )
for ni in n:
if len(ni) > 5:
ids.append( ni[0] )
Ncheck = len(ids)
xavg = np.zeros( [Ncheck, 3] )
k_eff = np.zeros( Ncheck )
dx2 = np.zeros( [Ncheck, 4] )
k_vals = []
for j in range(0,Ncheck):
i = im[ids[j]]
xavg[j] = b.x[i]*alpha + (1 - alpha)*xavg[j]
b = d.getblock()
# Now determine in time the relative fluctuations with respect to the
# moving average lattice position:
while not d.at_eof:
im = lammpstools.make_id_map(b.ids)
k_eff = np.zeros( Ncheck )
for j in range(0,Ncheck):
i = im[ids[j]]
dx = b.x[i][0] - xavg[j][0]
dy = b.x[i][1] - xavg[j][1]
dz = b.x[i][2] - xavg[j][2]
dx2[j][0] = dx*dx
dx2[j][1] = dy*dy
dx2[j][2] = dz*dz
dx2[j][3] = dx2[j][0] + dx2[j][1] + dx2[j][2]
k_eff[j] = dims*T / dx2[j][3]
if not silent:
print("%d %d %g %g" % (j, b.meta.t, dx2[j][3], k_eff[j]))
k_vals.append(k_eff)
for j in range(0,Ncheck):
xavg[j] = b.x[ im[ids[j]] ]*alpha + (1 - alpha)*xavg[j]
b = d.getblock()
return k_vals
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AssetBomAttribute import AssetBomAttribute
from alipay.aop.api.domain.AssetBomItem import AssetBomItem
class AssetBom(object):
def __init__(self):
self._asset_sub_type = None
self._attributes = None
self._bom_items = None
self._effect_img = None
self._include_qrcode = None
self._is_suite = None
self._item_id = None
self._item_name = None
self._item_type = None
self._request_id = None
@property
def asset_sub_type(self):
return self._asset_sub_type
@asset_sub_type.setter
def asset_sub_type(self, value):
self._asset_sub_type = value
@property
def attributes(self):
return self._attributes
@attributes.setter
def attributes(self, value):
if isinstance(value, list):
self._attributes = list()
for i in value:
if isinstance(i, AssetBomAttribute):
self._attributes.append(i)
else:
self._attributes.append(AssetBomAttribute.from_alipay_dict(i))
@property
def bom_items(self):
return self._bom_items
@bom_items.setter
def bom_items(self, value):
if isinstance(value, list):
self._bom_items = list()
for i in value:
if isinstance(i, AssetBomItem):
self._bom_items.append(i)
else:
self._bom_items.append(AssetBomItem.from_alipay_dict(i))
@property
def effect_img(self):
return self._effect_img
@effect_img.setter
def effect_img(self, value):
self._effect_img = value
@property
def include_qrcode(self):
return self._include_qrcode
@include_qrcode.setter
def include_qrcode(self, value):
self._include_qrcode = value
@property
def is_suite(self):
return self._is_suite
@is_suite.setter
def is_suite(self, value):
self._is_suite = value
@property
def item_id(self):
return self._item_id
@item_id.setter
def item_id(self, value):
self._item_id = value
@property
def item_name(self):
return self._item_name
@item_name.setter
def item_name(self, value):
self._item_name = value
@property
def item_type(self):
return self._item_type
@item_type.setter
def item_type(self, value):
self._item_type = value
@property
def request_id(self):
return self._request_id
@request_id.setter
def request_id(self, value):
self._request_id = value
def to_alipay_dict(self):
params = dict()
if self.asset_sub_type:
if hasattr(self.asset_sub_type, 'to_alipay_dict'):
params['asset_sub_type'] = self.asset_sub_type.to_alipay_dict()
else:
params['asset_sub_type'] = self.asset_sub_type
if self.attributes:
if isinstance(self.attributes, list):
for i in range(0, len(self.attributes)):
element = self.attributes[i]
if hasattr(element, 'to_alipay_dict'):
self.attributes[i] = element.to_alipay_dict()
if hasattr(self.attributes, 'to_alipay_dict'):
params['attributes'] = self.attributes.to_alipay_dict()
else:
params['attributes'] = self.attributes
if self.bom_items:
if isinstance(self.bom_items, list):
for i in range(0, len(self.bom_items)):
element = self.bom_items[i]
if hasattr(element, 'to_alipay_dict'):
self.bom_items[i] = element.to_alipay_dict()
if hasattr(self.bom_items, 'to_alipay_dict'):
params['bom_items'] = self.bom_items.to_alipay_dict()
else:
params['bom_items'] = self.bom_items
if self.effect_img:
if hasattr(self.effect_img, 'to_alipay_dict'):
params['effect_img'] = self.effect_img.to_alipay_dict()
else:
params['effect_img'] = self.effect_img
if self.include_qrcode:
if hasattr(self.include_qrcode, 'to_alipay_dict'):
params['include_qrcode'] = self.include_qrcode.to_alipay_dict()
else:
params['include_qrcode'] = self.include_qrcode
if self.is_suite:
if hasattr(self.is_suite, 'to_alipay_dict'):
params['is_suite'] = self.is_suite.to_alipay_dict()
else:
params['is_suite'] = self.is_suite
if self.item_id:
if hasattr(self.item_id, 'to_alipay_dict'):
params['item_id'] = self.item_id.to_alipay_dict()
else:
params['item_id'] = self.item_id
if self.item_name:
if hasattr(self.item_name, 'to_alipay_dict'):
params['item_name'] = self.item_name.to_alipay_dict()
else:
params['item_name'] = self.item_name
if self.item_type:
if hasattr(self.item_type, 'to_alipay_dict'):
params['item_type'] = self.item_type.to_alipay_dict()
else:
params['item_type'] = self.item_type
if self.request_id:
if hasattr(self.request_id, 'to_alipay_dict'):
params['request_id'] = self.request_id.to_alipay_dict()
else:
params['request_id'] = self.request_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AssetBom()
if 'asset_sub_type' in d:
o.asset_sub_type = d['asset_sub_type']
if 'attributes' in d:
o.attributes = d['attributes']
if 'bom_items' in d:
o.bom_items = d['bom_items']
if 'effect_img' in d:
o.effect_img = d['effect_img']
if 'include_qrcode' in d:
o.include_qrcode = d['include_qrcode']
if 'is_suite' in d:
o.is_suite = d['is_suite']
if 'item_id' in d:
o.item_id = d['item_id']
if 'item_name' in d:
o.item_name = d['item_name']
if 'item_type' in d:
o.item_type = d['item_type']
if 'request_id' in d:
o.request_id = d['request_id']
return o
|
# SPDX-FileCopyrightText: 2021 Melissa LeBlanc-Williams for Adafruit Industries
#
# SPDX-License-Identifier: MIT
"""Pin definitions for the STM32MP157C Development Kit 2."""
from adafruit_blinka.microcontroller.stm32.stm32mp157 import pin
D2 = pin.PA12
D3 = pin.PA11
D4 = pin.PA8
D5 = pin.PG2
D6 = pin.PH11
D7 = pin.PF3
D8 = pin.PF6
D9 = pin.PF8
D10 = pin.PF9
D11 = pin.PF7
D12 = pin.PD13
D13 = pin.PC7
D14 = pin.PB10
D15 = pin.PB12
D16 = pin.PB13
D17 = pin.PG8
D18 = pin.PI5
D19 = pin.PI7
D20 = pin.PI6
D21 = pin.PF11
D22 = pin.PG15
D23 = pin.PF1
D24 = pin.PF0
D25 = pin.PF4
D26 = pin.PF5
D27 = pin.PD7
SDA = D2
SCL = D3
SDA1 = pin.PF15
SCL1 = pin.PD12
SCLK = D11
MOSI = D10
MISO = D9
CE0 = D8
CE1 = D7
CS = CE0
SCK = SCLK
UART_TX = D14
UART_RX = D15
|
## this file is generated from settings in build.vel
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
# from options["setup"] in build.vel
config = %(setup)s
setup(**config)
|
# wrong version https://github.com/openai/baselines/blob/ea25b9e8b234e6ee1bca43083f8f3cf974143998/baselines/her/README.md
# right version https://github.com/DLR-RM/stable-baselines3/blob/c41368f2ead24c0cea218164c19e58d48a47422c/docs/modules/her.rst
# her sb3 config https://stable-baselines3.readthedocs.io/en/master/modules/her.html?highlight=HerReplayBuffer#example
# gym robotic https://gym.openai.com/envs/#robotics
import time
import os
import numpy as np
import gym
from stable_baselines3 import HerReplayBuffer, DDPG, SAC
from sb3_contrib.tqc.tqc import TQC
from sb3_contrib.common.wrappers import TimeFeatureWrapper
models_dir = f"models/{int(time.time())}/"
logdir = f"logs/{int(time.time())}/"
if not os.path.exists(models_dir):
os.makedirs(models_dir)
if not os.path.exists(logdir):
os.makedirs(logdir)
#env = gym.make("FetchReach-v1")
env = gym.make("Humanoid-v2")
#env = TimeFeatureWrapper(env)
env.reset()
"""
# config refferences https://github.com/DLR-RM/rl-baselines3-zoo/blob/master/hyperparams/her.yml
FetchReach-v1:
n_timesteps: !!float 20000
policy: 'MlpPolicy'
model_class: 'sac'
n_sampled_goal: 4
goal_selection_strategy: 'future'
buffer_size: 1000000
ent_coef: 'auto'
batch_size: 256
gamma: 0.95
learning_rate: 0.001
learning_starts: 1000
online_sampling: True
normalize: True
FetchPickAndPlace-v1:
env_wrapper:
- sb3_contrib.common.wrappers.TimeFeatureWrapper
# - utils.wrappers.DoneOnSuccessWrapper:
# reward_offset: 0
# n_successes: 4
# - stable_baselines3.common.monitor.Monitor
n_timesteps: !!float 1e6
policy: 'MlpPolicy'
model_class: 'tqc'
n_sampled_goal: 4
goal_selection_strategy: 'future'
buffer_size: 1000000
batch_size: 1024
gamma: 0.95
learning_rate: !!float 1e-3
tau: 0.05
policy_kwargs: "dict(n_critics=2, net_arch=[512, 512, 512])"
online_sampling: True
"""
"""
model = SAC('MultiInputPolicy', env,
replay_buffer_class=HerReplayBuffer,
# Parameters for HER
replay_buffer_kwargs=dict(
n_sampled_goal=4,
goal_selection_strategy='future',
#buffer_size=int(1e6),
#learning_rate=1e-3,
#gamma=0.95,
#batch_size=256,
online_sampling=True,
#learning_starts=1000,
#normalize=True
),
policy_kwargs=dict(n_critics=2, net_arch=[512, 512, 512]),
batch_size=1024,
gamma=0.95,
tau=0.05,
verbose=1,
tensorboard_log=logdir
)
"""
model = SAC('MlpPolicy', env,
policy_kwargs=dict(n_critics=2, net_arch=[512, 512, 512]),
batch_size=1024,
gamma=0.95,
tau=0.05,
verbose=1,
tensorboard_log=logdir
)
#ckpt = "./models/1644478002/400000"
#model= PPO.load(ckpt, verbose=1, tensorboard_log=logdir)
#model.set_env(env)
TIMESTEPS = 1e6
model.learn(total_timesteps=TIMESTEPS, reset_num_timesteps=False, tb_log_name=f"human-sac")
model.save(f"{models_dir}/{TIMESTEPS}") |
import os
import sys
base = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../')
sys.path.append(base)
import click
import glob
import subprocess
from collections import OrderedDict
from external.dada.flag_holder import FlagHolder
from external.dada.logger import Logger
from scripts.plot import plot
# options
@click.command()
# target
@click.option('-t', '--target_dir', type=str, required=True)
@click.option('-x', type=str, required=True)
@click.option('-y', type=str, default='')
@click.option('-a', '--plot_all', is_flag=True, default=False, help='plot all in single image')
def main(**kwargs):
plot_multi(**kwargs)
def plot_multi(**kwargs):
FLAGS = FlagHolder()
FLAGS.initialize(**kwargs)
FLAGS.summary()
run_dir = '../scripts'
target_path = os.path.join(FLAGS.target_dir, '**/*.csv')
weight_paths = sorted(glob.glob(target_path, recursive=True), key=lambda x: os.path.basename(x))
for weight_path in weight_paths:
# skip 'test*.csv'
if os.path.basename(weight_path) == 'test*.csv': continue
log_dir = os.path.join(os.path.dirname(weight_path), 'plot')
os.makedirs(log_dir, exist_ok=True)
basename = os.path.basename(weight_path)
basename, _ = os.path.splitext(basename)
log_path = os.path.join(log_dir, basename)+'.png'
cmd = 'python plot.py \
-t {target_dir} \
-x {x} \
-s \
-l {log_path}'.format(
target_dir=weight_path,
x=FLAGS.x,
log_path=log_path)
# add y
if FLAGS.y != '':
cmd += ' -y {y}'.format(y=FLAGS.y)
# add flag command
if FLAGS.plot_all:
cmd += ' --plot_all'
subprocess.run(cmd.split(), cwd=run_dir)
if __name__ == '__main__':
main() |
import os
import git
import dvc
import dvc.repo
import numpy as np
import subprocess
import argparse
def create_fix_demo_stages(path):
os.makedirs(path)
os.chdir(path)
gitrepo = git.Repo.init()
dvcrepo = dvc.repo.Repo.init()
# TODO: the first stage needs also a dependency !!!
subprocess.call(['dvc', 'run', '-o', 'out1_1.txt', 'echo text>out1_1.txt'])
subprocess.call(['dvc', 'run', '-d', 'out1_1.txt', '-o', 'out1_2.txt', 'echo text>out1_2.txt'])
subprocess.call(['dvc', 'run', '--no-exec', '-d', 'out1_2.txt', '-o', 'out1_3.txt', 'echo text>out1_3.txt'])
subprocess.call(['dvc', 'run', '-o', 'out2_1.txt', 'echo text>out2_1.txt'])
subprocess.call(['dvc', 'run', '-d', 'out2_1.txt', '-o', 'out2_2.txt', 'echo text>out2_2.txt'])
subprocess.call(['dvc', 'run', '-d', 'out2_2.txt', '-o', 'out2_3.txt', 'echo text>out2_3.txt'])
subprocess.call(['dvc', 'run', '--no-exec', '-d', 'out2_3.txt', '-o', 'out2_4_1.txt', 'echo text>out2_4_1.txt'])
subprocess.call(['dvc', 'run', '-d', 'out2_3.txt', '-o', 'out2_4_2.txt', 'echo text>out2_4_2.txt'])
subprocess.call(['dvc', 'run', '-d', 'out2_4_2.txt', '-o', 'out2_4_2_1.txt', 'echo text>out2_4_2_1.txt'])
subprocess.call(
['dvc', 'run', '--no-exec', '-d', 'out2_4_2.txt', '-o', 'out2_4_2_2.txt', 'echo text>out2_4_2_2.txt'])
subprocess.call(
['dvc', 'run', '--no-exec', '-d', 'out2_4_2_2.txt', '-d', 'out2_4_2_1.txt', '-d', 'out2_4_1.txt', '-o',
'out2_5.txt', 'echo text>out2_5.txt'])
def main():
parser = argparse.ArgumentParser(description='This argument allow you to create simple local git / dvc structure to test dvc-view on it.')
parser.add_argument('-p', '--path', type=str, default='dvc-view-sampleproject',help='The path to create the project. If this parameter is not set, it will create in the current dir a "dvc-view-sampleproject" folder with the sample project.')
args = parser.parse_args()
create_fix_demo_stages(args.path) |
from collidoscope import Collidoscope
from vharfbuzz import Vharfbuzz
from fontTools.ttLib import TTFont
import sys
from argparse import ArgumentParser
import warnings
import re
from termcolor import colored
from stringbrewer import StringBrewer
parser = ArgumentParser(description="Shaping regression tests")
parser.add_argument("font", help="Font file", metavar="FONT")
parser.add_argument("testfile", help="File containing tests", metavar="TESTFILE")
args = parser.parse_args()
vhb = Vharfbuzz(args.font)
col = Collidoscope(
args.font,
{
"cursive": True,
"marks": True,
"faraway": True,
"adjacentmarks": False,
"area": 0.01,
},
)
tests = []
ingredients = []
seen_blank = False
with open(args.testfile, "r") as testfile:
for line in testfile.read().split("\n"):
if re.match(r"^\s*#", line):
continue
if line == "":
seen_blank = True
if seen_blank:
ingredients.append(line)
else:
elements = line.split(":")
if elements[0].startswith('"') and elements[0].endswith('"'):
tests.append(
{
"type": "literal",
"string": elements[0][1:-1],
"options": elements[1:],
}
)
else:
tests.append(
{"type": "pattern", "string": elements[0], "options": elements[1:]}
)
tested = 0
passed = 0
def tfail(test, string, message):
global tested
tested += 1
print(f"{colored('🗴 ', 'red')} '{string}' {message}")
def tpass(test, string, message):
global tested
global passed
tested += 1
passed += 1
print(f"{colored('🗸 ', 'green')} '{string}' {message}")
def do_test(string, options):
glyphs = col.get_glyphs(string)
collisions = col.has_collisions(glyphs)
if collisions:
tfail(test, string, "overlap test")
else:
tpass(test, string, "overlap test")
if len(test["options"]) > 0:
buf = vhb.shape(string)
expected = options[0]
got = vhb.serialize_buf(buf)
if expected == got:
tpass(test, string, "shaping test")
else:
tfail(test, string, "shaping text: expected %s got %s" % (expected, got))
for test in tests:
if test["type"] == "pattern":
b = StringBrewer(from_string=test["string"] + "\n" + "\n".join(ingredients))
try:
for s in b.generate_all():
do_test(s, test["options"])
except Exception as e:
for i in range(1, 1000):
do_test(b.generate(), test["options"])
if test["type"] == "literal":
do_test(test["string"], test["options"])
print("\n%i tests, %i passed, %i failing" % (tested, passed, tested - passed))
|
from random_words import RandomWords, RandomNicknames
import requests
import socket
import hashlib
import json
import os
from .config import server_ip, server_port, listening_port
class IpExchange:
def __init__(self):
self.server_url = f'http://{server_ip}:{server_port}'
self.rw = RandomWords()
self.rn = RandomNicknames()
def send_info(self, filename, _zip='no'):
filename = os.path.split(filename)[-1]
# Get connection info
ip = self.get_local_ip()
# Generate pass phrase
passphrase = f"{self.rn.random_nick(gender='u').capitalize()}{self.rw.random_word().capitalize()}"
# Hash it
pass_hash = hashlib.md5(passphrase.encode()).hexdigest()
# Send it off to the server
payload = {"id": pass_hash,
"filename": filename,
"zip": _zip,
"size": os.path.getsize(filename),
"ip": ip,
"port": listening_port}
res = requests.post(f'{self.server_url}/transfer', data=payload)
res.raise_for_status()
return passphrase, payload
def get_info(self, passphrase):
# Request pass phrase and hash it
pass_hash = hashlib.md5(passphrase.encode()).hexdigest()
# Request ip and port from server
payload = {"id": pass_hash}
res = requests.get(f'{self.server_url}/transfer', data=payload)
res.raise_for_status()
return json.loads(res.content.decode())
def teardown(self, pass_hash):
payload = {'id': pass_hash}
res = requests.post(f'{self.server_url}/teardown', data=payload)
res.raise_for_status()
@staticmethod
def get_local_ip():
"""
Get the local IP
Thanks stackoverflow
https://stackoverflow.com/questions/166506/finding-local-ip-addresses-using-pythons-stdlib/25850698#25850698
:return:
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 1)) # connect() for UDP doesn't send packets
return s.getsockname()[0]
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convolutional Neural Network Estimator for MNIST, built with tf.layers."""
from nninst import mode
from nninst.backend.tensorflow.dataset.config import (
CIFAR100_TRAIN,
IMAGENET_RAW_TRAIN,
IMAGENET_TRAIN,
)
from nninst.backend.tensorflow.model.config import RESNET_18_CIFAR100, RESNET_50
from nninst.backend.tensorflow.trace.common import (
class_trace,
class_trace_compact,
class_trace_growth,
full_trace,
save_class_traces,
save_class_traces_low_latency,
save_full_trace_growth,
self_similarity,
)
from nninst.utils.ray import ray_init
__all__ = [
"resnet_18_cifar100_class_channel_trace",
"resnet_18_cifar100_self_channel_similarity",
]
name = "resnet_18_cifar100"
resnet_18_cifar100_class_channel_trace = class_trace(
name=name,
model_config=RESNET_18_CIFAR100,
data_config=CIFAR100_TRAIN,
per_channel=True,
)
resnet_18_cifar100_class_channel_trace_growth = class_trace_growth(
name=name,
model_config=RESNET_18_CIFAR100,
data_config=CIFAR100_TRAIN,
per_channel=True,
)
resnet_18_cifar100_class_channel_trace_compact = class_trace_compact(
resnet_18_cifar100_class_channel_trace,
name=name,
model_config=RESNET_18_CIFAR100,
per_channel=True,
)
resnet_18_cifar100_channel_trace = full_trace(
name=name, class_trace_fn=resnet_18_cifar100_class_channel_trace, per_channel=True
)
save_resnet_18_cifar100_channel_trace_growth = save_full_trace_growth(
name=name, class_trace_fn=resnet_18_cifar100_channel_trace
)
resnet_18_cifar100_self_channel_similarity = self_similarity(
name=name,
trace_fn=resnet_18_cifar100_class_channel_trace,
class_ids=range(0, 100),
per_channel=True,
)
if __name__ == "__main__":
# mode.check(False)
# mode.debug()
# mode.local()
mode.distributed()
# ray_init("dell")
# ray_init("gpu")
# ray_init()
ray_init()
threshold = 0.5
# threshold = 1
# threshold = 0.8
label = None
# label = "train_50"
# label = "train_start"
# label = "train_start_more"
save_class_traces(
resnet_18_cifar100_class_channel_trace,
range(0, 100),
threshold=threshold,
label=label,
example_num=500,
example_upperbound=500,
)
save_class_traces(
resnet_18_cifar100_class_channel_trace_compact,
range(0, 100),
threshold=threshold,
label=label,
)
resnet_18_cifar100_self_channel_similarity(threshold=threshold, label=label).save()
|
import pyjion
import unittest
import gc
class RecursionTestCase(unittest.TestCase):
def setUp(self) -> None:
pyjion.enable()
def tearDown(self) -> None:
pyjion.disable()
gc.collect()
def test_basic(self):
def _f():
def add_a(z):
if len(z) < 5:
z.append('a')
return add_a(z)
return z
return add_a([])
self.assertEqual(_f(), ['a', 'a', 'a', 'a', 'a'])
info = pyjion.info(_f)
self.assertTrue(info['compiled'])
if __name__ == "__main__":
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.