content
stringlengths 5
1.05M
|
|---|
#!/usr/bin/env python3
# This is a slightly modified version of timm's training script
""" ImageNet Training Script
This is intended to be a lean and easily modifiable ImageNet training script that reproduces ImageNet
training results with some of the latest networks and training techniques. It favours canonical PyTorch
and standard Python style over trying to be able to 'do it all.' That said, it offers quite a few speed
and training result improvements over the usual PyTorch example scripts. Repurpose as you see fit.
This script was started from an early version of the PyTorch ImageNet example
(https://github.com/pytorch/examples/tree/master/imagenet)
NVIDIA CUDA specific speedups adopted from NVIDIA Apex examples
(https://github.com/NVIDIA/apex/tree/master/examples/imagenet)
Hacked together by / Copyright 2020 Ross Wightman (https://github.com/rwightman)
"""
import argparse
from src import *
import torch
import yaml
from timm.models import create_model, resume_checkpoint
from timm.utils import *
try:
from apex import amp
from apex.parallel import DistributedDataParallel as ApexDDP
from apex.parallel import convert_syncbn_model
has_apex = True
except ImportError:
has_apex = False
has_native_amp = False
# try:
# if getattr(torch.cuda.amp, 'autocast') is not None:
# has_native_amp = True
# except AttributeError:
# pass
import torch
import numpy as np
import cv2
import matplotlib.pyplot as plt
from PIL import Image
from torchvision import transforms
try:
import wandb
has_wandb = True
except ImportError:
has_wandb = False
torch.backends.cudnn.benchmark = True
# The first arg parser parses out only the --config argument, this argument is used to
# load a yaml file containing key-values that override the defaults for the main parser below
# The first arg parser parses out only the --config argument, this argument is used to
# load a yaml file containing key-values that override the defaults for the main parser below
config_parser = argparse.ArgumentParser(description='Training Config', add_help=False)
config_parser.add_argument('-c', '--config', default='', type=str, metavar='FILE',
help='YAML config file specifying default arguments')
checkpoint_dict_cifar10 = {
'riem': '/home/iliask/Desktop/ilias/QCONPASS/Object_detection_research/Compact-Transformers/output/paper_results'
'/cifar10/20220125-151023-manifold_vit_6_4_32-32/model_best.pth.tar',
'all' : '/home/iliask/Desktop/ilias/QCONPASS/Object_detection_research/Compact-Transformers/output/paper_results'
'/cifar10/20220125-222928-manifold_vit_6_4_32-32/model_best.pth.tar',
'gm' : '/home/iliask/Desktop/ilias/QCONPASS/Object_detection_research/Compact-Transformers/output/paper_results'
'/cifar10/20220201-110749-manifold_vit_6_4_32-32/model_best.pth.tar',
'e':'/home/iliask/Desktop/ilias/QCONPASS/Object_detection_research/Compact-Transformers/output/euclidean/model_best.pth'}
checkpoint_dict_cifar100 = {
'riem': '/home/iliask/Desktop/ilias/QCONPASS/Object_detection_research/Compact-Transformers/output/paper_results/cifar100/20220125-224527-manifold_vit_6_4_32-32/model_best.pth.tar',
'gm' : '/home/iliask/Desktop/ilias/QCONPASS/Object_detection_research/Compact-Transformers/output/paper_results/cifar100/20220130-120606-manifold_vit_6_4_32-32/model_best.pth.tar',
'all' : '/home/iliask/Desktop/ilias/QCONPASS/Object_detection_research/Compact-Transformers/output/paper_results/cifar100/20220131-100728-manifold_vit_6_4_32-32/model_best.pth.tar',
'e':'/home/iliask/Desktop/ilias/QCONPASS/Object_detection_research/Compact-Transformers/output/paper_results/cifar100/euclidean/vitlite6-4_cifar100.pth'}
sample_images = ['./data/CIFAR-10-images-master/val/horse/0010.jpg',
'./data/CIFAR-10-images-master/val/ship/0045.jpg',
"./data/CIFAR-10-images-master/val/automobile/0000.jpg",
"./data/CIFAR-10-images-master/val/bird/0003.jpg",
'./data/CIFAR-10-images-master/val/dog/0233.jpg',
# './data/CIFAR-10-images-master/val/ship/0107.jpg'
]
sample_images_c100 = ['./data/cifar100/val/cup/0004.png',
]
# manifold_vit_6_4_32
# vit_6_4_32
def arguments():
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
# Dataset / Model parameters
parser.add_argument('--resume', default=checkpoint_dict_cifar100['e'], type=str, metavar='PATH',
help='Resume full model and optimizer state from checkpoint (default: none)')
parser.add_argument('--attention_type', default='gm', type=str, metavar='ATT',
help='Type of attention to use', choices=('self', 'gm', 'riem', 'all','spd'))
parser.add_argument('--dataset', '-d', metavar='NAME', default='',
help='dataset type (default: ImageFolder/ImageTar if empty)')
parser.add_argument('--train-split', metavar='NAME', default='train',
help='dataset train split (default: train)')
parser.add_argument('--val-split', metavar='NAME', default='validation',
help='dataset validation split (default: validation)')
parser.add_argument('--model', default='vit_6_4_32', type=str, metavar='MODEL',
help='Name of model to train (default: "countception"')
parser.add_argument('--pretrained', action='store_true', default=False,
help='Start with pretrained version of specified network (if avail)')
parser.add_argument('--initial-checkpoint', default='', type=str, metavar='PATH',
help='Initialize model from this checkpoint (default: none)')
parser.add_argument('--no-resume-opt', action='store_true', default=False,
help='prevent resume of optimizer state when resuming model')
parser.add_argument('--num-classes', type=int, default=None, metavar='N',
help='number of label classes (Model default if None)')
parser.add_argument('--gp', default=None, type=str, metavar='POOL',
help='Global pool type, one of (fast, avg, max, avgmax, avgmaxc). Model default if None.')
parser.add_argument('--img-size', type=int, default=None, metavar='N',
help='Image patch size (default: None => model default)')
parser.add_argument('--input-size', default=None, nargs=3, type=int,
metavar='N N N',
help='Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if '
'empty')
parser.add_argument('--crop-pct', default=None, type=float,
metavar='N', help='Input image center crop percent (for validation only)')
parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN',
help='Override mean pixel value of dataset')
parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD',
help='Override std deviation of of dataset')
parser.add_argument('--interpolation', default='', type=str, metavar='NAME',
help='Image resize interpolation type (overrides model)')
parser.add_argument('-b', '--batch-size', type=int, default=32, metavar='N',
help='input batch size for training (default: 32)')
parser.add_argument('-vb', '--validation-batch-size-multiplier', type=int, default=1, metavar='N',
help='ratio of validation batch size to training batch size (default: 1)')
# Augmentation & regularization parameters
parser.add_argument('--no-aug', action='store_true', default=False,
help='Disable all training augmentation, override other train aug args')
parser.add_argument('--scale', type=float, nargs='+', default=[0.08, 1.0], metavar='PCT',
help='Random resize scale (default: 0.08 1.0)')
parser.add_argument('--ratio', type=float, nargs='+', default=[3. / 4., 4. / 3.], metavar='RATIO',
help='Random resize aspect ratio (default: 0.75 1.33)')
parser.add_argument('--hflip', type=float, default=0.5,
help='Horizontal flip training aug probability')
parser.add_argument('--vflip', type=float, default=0.,
help='Vertical flip training aug probability')
parser.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT',
help='Color jitter factor (default: 0.4)')
parser.add_argument('--drop', type=float, default=0.0, metavar='PCT',
help='Dropout rate (default: 0.)')
parser.add_argument('--drop-connect', type=float, default=None, metavar='PCT',
help='Drop connect rate, DEPRECATED, use drop-path (default: None)')
parser.add_argument('--drop-path', type=float, default=None, metavar='PCT',
help='Drop path rate (default: None)')
parser.add_argument('--drop-block', type=float, default=None, metavar='PCT',
help='Drop block rate (default: None)')
# Batch norm parameters (only works with gen_efficientnet based models currently)
parser.add_argument('--bn-tf', action='store_true', default=False,
help='Use Tensorflow BatchNorm defaults for models that support it (default: False)')
parser.add_argument('--bn-momentum', type=float, default=None,
help='BatchNorm momentum override (if not None)')
parser.add_argument('--bn-eps', type=float, default=None,
help='BatchNorm epsilon override (if not None)')
parser.add_argument('--sync-bn', action='store_true',
help='Enable NVIDIA Apex or Torch synchronized BatchNorm.')
parser.add_argument('--dist-bn', type=str, default='',
help='Distribute BatchNorm stats between nodes after each epoch ("broadcast", "reduce", or "")')
parser.add_argument('--split-bn', action='store_true',
help='Enable separate BN layers per augmentation split.')
parser.add_argument('--seed', type=int, default=42, metavar='S',
help='random seed (default: 42)')
parser.add_argument("--local_rank", default=0, type=int)
parser.add_argument('--gpu', default='0', type=str)
parser.add_argument('--use-multi-epochs-loader', action='store_true', default=False,
help='use the multi-epochs-loader to save time at the beginning of every epoch')
parser.add_argument('--torchscript', dest='torchscript', action='store_true',
help='convert model torchscript for inference')
parser.add_argument('--log-wandb', action='store_true', default=False,
help='log training and validation metrics to wandb')
return parser
def _parse_args(config_parser, parser):
# Do we have a config file to parse?
args_config, remaining = config_parser.parse_known_args()
# print(remaining)
# print(args_config)
if args_config.config:
with open(args_config.config, 'r') as f:
cfg = yaml.safe_load(f)
parser.set_defaults(**cfg)
# The main arg parser parses the rest of the args, the usual
# defaults will have been overridden if config file specified.
args = parser.parse_args(remaining)
# print(args)
# Cache the args as a text string to save them in the output dir later
args_text = yaml.safe_dump(args.__dict__, default_flow_style=False)
return args, args_text
def show_cam_on_image(img, mask):
heatmap = cv2.applyColorMap(np.uint8(255 * (1 - mask)), cv2.COLORMAP_JET)
heatmap = np.float32(heatmap) # / 255
cam = 0.5 * heatmap + np.float32(img)
print(img.max(), heatmap.max())
cam = cam / np.max(cam)
return heatmap, cam
def main():
parser = arguments()
setup_default_logging()
args, args_text = _parse_args(config_parser, parser)
args.device = 'cuda:0'
args.world_size = 1
args.rank = 0 # global rank
random_seed(args.seed, args.rank)
model = create_model(
args.model,
pretrained=args.pretrained,
num_classes=args.num_classes,
drop_rate=args.drop,
drop_connect_rate=args.drop_connect, # DEPRECATED, use drop_path
drop_path_rate=args.drop_path,
drop_block_rate=args.drop_block,
global_pool=args.gp,
bn_tf=args.bn_tf,
bn_momentum=args.bn_momentum,
bn_eps=args.bn_eps,
scriptable=args.torchscript,
checkpoint_path=args.initial_checkpoint, attention_type=args.attention_type, return_map=True)
print(model)
if args.resume:
resume_checkpoint(
model, args.resume,
optimizer=None,
loss_scaler=None,
log_info=args.local_rank == 0)
labels = dict(enumerate(['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']))
model.eval()
transform = transforms.Compose([
transforms.Resize((32, 32)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4914, 0.4822, 0.4465], std=[0.2470, 0.2435, 0.2616]),
])
img_path = sample_images_c100[0]
im = Image.open(sample_images_c100[0]
)
x = transform(im)
logits, att_mat = model(x.unsqueeze(0),return_attention=True)
att_mat = torch.stack([att_mat ]).squeeze(1)
# Average the attention weights across all heads.
att_mat = torch.mean(att_mat, dim=1)
# To account for residual connections, we add an identity matrix to the
# attention matrix and re-normalize the weights.
residual_att = torch.eye(att_mat.size(1))
aug_att_mat = att_mat + residual_att
aug_att_mat = aug_att_mat / aug_att_mat.sum(dim=-1).unsqueeze(-1)
# Recursively multiply the weight matrices
joint_attentions = torch.zeros(aug_att_mat.size())
joint_attentions[0] = aug_att_mat[0]
for n in range(1, aug_att_mat.size(0)):
joint_attentions[n] = torch.matmul(aug_att_mat[n], joint_attentions[n - 1])
# Attention from the output token to the input space.
v = joint_attentions[-1]
print(v.size())
grid_size = int(np.sqrt(aug_att_mat.size(-1)))
mask = v[0, 1:].reshape(grid_size, grid_size).detach().numpy()
mask = cv2.resize(mask / mask.max(), im.size)[..., np.newaxis]
hm, cam = show_cam_on_image(np.array(im), mask)
mask2 = cv2.applyColorMap(np.uint8(255 * mask), cv2.COLORMAP_JET)
print(mask.shape, im.size)
result = (mask / 255.0 + im).astype("uint8")
#
fig, (ax1, ax2, ax3) = plt.subplots(ncols=3)
im = cv2.resize(np.array(im),(224,224))
cam = cv2.resize(np.array(cam),(224,224))
hm = cv2.resize(np.array(hm/255.0),(224,224))
ax1.set_title('Original')
ax2.set_title('Attention Map Overlay')
ax3.set_title('Attention Map ')
_ = ax1.imshow(im)
_ = ax2.imshow(cam)
_ = ax3.imshow(hm )
probs = torch.nn.Softmax(dim=-1)(logits)
top5 = torch.argsort(probs, dim=-1, descending=True)
print("Prediction Label and Attention Map!\n")
for idx in top5[0, :5]:
print(f'{probs[0, idx.item()] :.5f}\t : {idx.item()}\n', end='')
save_path = args.resume.rsplit('/',1)[0]
print( save_path)
overlay_img = f"/overlay{img_path.replace('/','_')}.jpg"
print(overlay_img)
cv2.imwrite(save_path +overlay_img, cv2.cvtColor(np.uint8(cam *255 ),
cv2.COLOR_RGB2BGR))
overlay_img = f"/map{img_path.replace('/', '_')}.jpg"
cv2.imwrite(save_path +overlay_img,
cv2.cvtColor(np.uint8(hm*255), cv2.COLOR_RGB2BGR))
# axs[0, 1].plot(x, y, 'tab:orange')
# axs[0, 1].set_title('Axis [0, 1]')
# axs[1, 0].plot(x, -y, 'tab:green')
# axs[1, 0].set_title('Axis [1, 0]')
# axs[1, 1].plot(x, -y, 'tab:red')
# axs[1, 1].set_title('Axis [1, 1]')
# plt.savefig(args.resume.replace('model_best.pth.tar','overlay.jpg'))
# print('MAX ',cam.max())#cv2.cvtColor(, cv2.COLOR_RGB2BGR)
# cv2.imwrite(args.resume.replace('model_best.pth.tar','overlay.jpg'),cv2.cvtColor(np.uint8(cam*255),
# cv2.COLOR_RGB2BGR))
# cv2.imwrite(args.resume.replace('model_best.pth.tar', 'map.jpg'),
# cv2.cvtColor(np.uint8(hm), cv2.COLOR_RGB2BGR))
# for i, v in enumerate(joint_attentions):
# # Attention from the output token to the input space.
# mask = v[0, 1:].reshape(grid_size, grid_size).detach().numpy()
# mask = cv2.resize(mask / mask.max(), im.size)[..., np.newaxis]
# result = (mask * im).astype("uint8")
#
# fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(16, 16))
# ax1.set_title('Original')
# ax2.set_title('Attention Map_%d Layer' % (i + 1))
# _ = ax1.imshow(im)
# _ = ax2.imshow(hm/255.0)
#
plt.show()
def sublpot_image_patches():
im = Image.open(
sample_images[0])
im = np.array(im)
print(im.shape)
im = cv2.resize(np.array(im), (256, 256))
#patches = np.reshape(im,(16,64,64,3) )
patches = torch.from_numpy(im).view(-1,64,64,3).numpy()
print(patches.shape)
fig, axs = plt.subplots(1, 16 , sharex=True, sharey=True,squeeze=True,gridspec_kw={'wspace':0, 'hspace':0},)
fig.tight_layout()
#im = cv2.resize(np.array(im), (224, 224))
for i in range(4):
for j in range(4):
axs[j + 4*i].set_xticks([])
axs[j + 4*i].set_yticks([])
axs[ j + 4*i].imshow(im[i*64:(i+1)*64,j*64:(j+1)*64,:])
#axs[i, j].set_title('Image')
# axs[0, 0].set_xticks([])
# axs[0, 0].set_yticks([])
# axs[0, 0].imshow(im)
# axs[0, 0].set_title('Image')
plt.show()
def sublpot_attention():
"""
Plot attention scores on the image
"""
im = Image.open(
sample_images[0])
im_gm = Image.open(
"/home/iliask/Desktop/ilias/QCONPASS/Object_detection_research/Compact-Transformers/output/paper_results"
"/cifar10/20220201-110749-manifold_vit_6_4_32-32/overlay_horse.jpg")
im_all = Image.open(
"/home/iliask/Desktop/ilias/QCONPASS/Object_detection_research/Compact-Transformers/output/paper_results"
"/cifar10/20220125-222928-manifold_vit_6_4_32-32/overlay_horse.jpg")
im_spd = Image.open('/home/iliask/Desktop/ilias/QCONPASS/Object_detection_research/Compact-Transformers/output/'
'paper_results/cifar10/20220125-151023-manifold_vit_6_4_32-32/overlay_horse.jpg')
im_e = Image.open('/home/iliask/Desktop/ilias/QCONPASS/Object_detection_research/Compact-Transformers/output/euclidean/overlay_horse.jpg')
fig, axs = plt.subplots(3, 5, sharex=True, sharey=True,squeeze=True,gridspec_kw={'wspace':0, 'hspace':0},)
fig.tight_layout()
im = cv2.resize(np.array(im), (224, 224))
axs[0, 0].set_xticks([])
axs[0, 0].set_yticks([])
axs[0, 0].imshow(im)
#axs[0, 0].set_title('Image')
axs[0, 1].set_xticks([])
axs[0, 1].set_yticks([])
axs[0, 1].imshow(im_e)
#axs[0, 1].set_title('E')
axs[0, 2].set_xticks([])
axs[0, 2].set_yticks([])
axs[0, 2].imshow(im_spd)
#axs[0, 2].set_title('E_SPD')
axs[0, 3].set_xticks([])
axs[0, 3].set_yticks([])
axs[0, 3].imshow(im_gm)
#axs[0, 3].set_title('E_G')
axs[0, 4].set_xticks([])
axs[0, 4].set_yticks([])
axs[0, 4].imshow(im_all)
#axs[0, 4].set_title('E_SPD_G')
##################################################33
im = Image.open(
sample_images[-1])
im_gm = Image.open(
"/home/iliask/Desktop/ilias/QCONPASS/Object_detection_research/Compact-Transformers/output/paper_results"
"/cifar10/20220201-110749-manifold_vit_6_4_32-32/overlay_airplane.jpg")
im_all = Image.open(
"/home/iliask/Desktop/ilias/QCONPASS/Object_detection_research/Compact-Transformers/output/paper_results"
"/cifar10/20220125-222928-manifold_vit_6_4_32-32/overlay_airplane.jpg")
im_spd = Image.open('/home/iliask/Desktop/ilias/QCONPASS/Object_detection_research/Compact-Transformers/output/'
'paper_results/cifar10/20220125-151023-manifold_vit_6_4_32-32/overlay_airplane.jpg')
im_e = Image.open('/home/iliask/Desktop/ilias/QCONPASS/Object_detection_research/Compact-Transformers/output/euclidean/overlay_airplane.jpg')
# fig, axs = plt.subplots(2, 4, figsize=(32, 32))
# fig.tight_layout()
im = cv2.resize(np.array(im), (224, 224))
axs[1, 0].set_xticks([])
axs[1, 0].set_yticks([])
axs[1, 0].imshow(im)
axs[1, 1].set_xticks([])
axs[1, 1].set_yticks([])
axs[1, 1].imshow(im_e)
axs[1, 2].set_xticks([])
axs[1, 2].set_yticks([])
axs[1, 2].imshow(im_spd)
axs[1, 3].set_xticks([])
axs[1, 3].set_yticks([])
axs[1, 3].imshow(im_gm)
axs[1, 4].set_xticks([])
axs[1, 4].set_yticks([])
axs[1, 4].imshow(im_all)
##################################################33
im = Image.open(
sample_images[2])
im_gm = Image.open(
"/home/iliask/Desktop/ilias/QCONPASS/Object_detection_research/Compact-Transformers/output/paper_results"
"/cifar10/20220201-110749-manifold_vit_6_4_32-32/overlay_automobile.jpg")
im_all = Image.open(
"/home/iliask/Desktop/ilias/QCONPASS/Object_detection_research/Compact-Transformers/output/paper_results"
"/cifar10/20220125-222928-manifold_vit_6_4_32-32/overlay_automobile.jpg")
im_spd = Image.open('/home/iliask/Desktop/ilias/QCONPASS/Object_detection_research/Compact-Transformers/output/'
'paper_results/cifar10/20220125-151023-manifold_vit_6_4_32-32/overlay_automobile.jpg')
# fig, axs = plt.subplots(2, 4, figsize=(32, 32))
# fig.tight_layout()
im_e = Image.open('/home/iliask/Desktop/ilias/QCONPASS/Object_detection_research/Compact-Transformers/output/euclidean/overlay_auto.jpg')
im = cv2.resize(np.array(im), (224, 224))
axs[2, 0].set_xticks([])
axs[2, 0].set_yticks([])
axs[2, 0].imshow(im)
axs[2, 1].set_xticks([])
axs[2, 1].set_yticks([])
axs[2, 1].imshow(im_e)
axs[2, 2].set_xticks([])
axs[2, 2].set_yticks([])
axs[2, 2].imshow(im_spd)
axs[2, 3].set_xticks([])
axs[2, 3].set_yticks([])
axs[2, 3].imshow(im_gm)
axs[2, 4].set_xticks([])
axs[2, 4].set_yticks([])
axs[2, 4].imshow(im_all)
plt.show()
def sublpot_attention_cifar100():
"""
Plot attention scores on the image
"""
im = Image.open(
'data/cifar100/val/cup/0004.png')
im_all = Image.open(
"output/paper_results/cifar100/20220130-120606-manifold_vit_6_4_32-32/overlay._data_cifar100_val_cup_0004.png.jpg")
im_gm = Image.open(
"output/paper_results/cifar100/20220131-100728-manifold_vit_6_4_32-32/overlay._data_cifar100_val_cup_0004.png.jpg")
im_spd = Image.open('output/paper_results/cifar100/20220125-224527-manifold_vit_6_4_32-32/overlay._data_cifar100_val_cup_0004.png.jpg')
im_e = Image.open('output/paper_results/cifar100/euclidean/overlay._data_cifar100_val_cup_0004.png.jpg')
fig, axs = plt.subplots(3, 5, sharex=True, sharey=True,squeeze=True,gridspec_kw={'wspace':0, 'hspace':0},)
fig.tight_layout()
im = cv2.resize(np.array(im), (224, 224))
axs[0, 0].set_xticks([])
axs[0, 0].set_yticks([])
axs[0, 0].imshow(im)
#axs[0, 0].set_title('Image')
axs[0, 1].set_xticks([])
axs[0, 1].set_yticks([])
axs[0, 1].imshow(im_e)
#axs[0, 1].set_title('E')
axs[0, 2].set_xticks([])
axs[0, 2].set_yticks([])
axs[0, 2].imshow(im_spd)
#axs[0, 2].set_title('E_SPD')
axs[0, 3].set_xticks([])
axs[0, 3].set_yticks([])
axs[0, 3].imshow(im_gm)
#axs[0, 3].set_title('E_G')
axs[0, 4].set_xticks([])
axs[0, 4].set_yticks([])
axs[0, 4].imshow(im_all)
#axs[0, 4].set_title('E_SPD_G')
##################################################33
im = Image.open(
'data/cifar100/val/boy/0001.png')
im_all = Image.open(
"output/paper_results/cifar100/20220130-120606-manifold_vit_6_4_32-32/overlay._data_cifar100_val_boy_0001.png.jpg")
im_gm = Image.open(
"output/paper_results/cifar100/20220131-100728-manifold_vit_6_4_32-32/overlay._data_cifar100_val_boy_0001.png.jpg")
im_spd = Image.open('output/paper_results/cifar100/20220125-224527-manifold_vit_6_4_32-32/overlay._data_cifar100_val_boy_0001.png.jpg')
im_e = Image.open('output/paper_results/cifar100/euclidean/overlay._data_cifar100_val_boy_0001.png.jpg')
im = cv2.resize(np.array(im), (224, 224))
axs[1, 0].set_xticks([])
axs[1, 0].set_yticks([])
axs[1, 0].imshow(im)
axs[1, 1].set_xticks([])
axs[1, 1].set_yticks([])
axs[1, 1].imshow(im_e)
axs[1, 2].set_xticks([])
axs[1, 2].set_yticks([])
axs[1, 2].imshow(im_spd)
axs[1, 3].set_xticks([])
axs[1, 3].set_yticks([])
axs[1, 3].imshow(im_gm)
axs[1, 4].set_xticks([])
axs[1, 4].set_yticks([])
axs[1, 4].imshow(im_all)
##################################################33
im = Image.open(
'data/cifar100/val/bear/0001.png')
im_gm = Image.open(
"output/paper_results/cifar100/20220130-120606-manifold_vit_6_4_32-32/overlay._data_cifar100_val_bear_0001.png.jpg")
im_all = Image.open(
"output/paper_results/cifar100/20220131-100728-manifold_vit_6_4_32-32/overlay._data_cifar100_val_bear_0001.png.jpg")
im_spd = Image.open('output/paper_results/cifar100/20220125-224527-manifold_vit_6_4_32-32/overlay._data_cifar100_val_bear_0001.png.jpg')
im_e = Image.open('output/paper_results/cifar100/euclidean/overlay._data_cifar100_val_bear_0001.png.jpg')
im = cv2.resize(np.array(im), (224, 224))
axs[2, 0].set_xticks([])
axs[2, 0].set_yticks([])
axs[2, 0].imshow(im)
axs[2, 1].set_xticks([])
axs[2, 1].set_yticks([])
axs[2, 1].imshow(im_e)
axs[2, 2].set_xticks([])
axs[2, 2].set_yticks([])
axs[2, 2].imshow(im_spd)
axs[2, 3].set_xticks([])
axs[2, 3].set_yticks([])
axs[2, 3].imshow(im_gm)
axs[2, 4].set_xticks([])
axs[2, 4].set_yticks([])
axs[2, 4].imshow(im_all)
plt.show()
#sublpot_image_patches()
#sublpot_attention()
if __name__ == '__main__':
sublpot_attention_cifar100()
# import os
# import glob
#
# os.chdir('/home/iliask/Desktop/ilias/QCONPASS/Object_detection_research/Compact-Transformers/data/cifar100/val')
# print(glob.glob(os.getcwd()))
|
"""
Prepare training and testing datasets as CSV dictionaries
Created on 11/26/2018
@author: RH
"""
import os
import pandas as pd
import sklearn.utils as sku
import numpy as np
# get all full paths of images
def image_ids_in(root_dir, ignore=['.DS_Store','dict.csv', 'all.csv']):
ids = []
for id in os.listdir(root_dir):
if id in ignore:
print('Skipping ID:', id)
else:
ids.append(id)
return ids
# Get intersection of 2 lists
def intersection(lst1, lst2):
lst3 = [value for value in lst1 if value in lst2]
return lst3
def tile_ids_in(slide, root_dir, label):
ids = []
try:
for id in os.listdir(root_dir+'/pos'):
if '.png' in id:
ids.append([slide, root_dir+'/pos/'+id, label, 1])
else:
print('Skipping ID:', id)
except FileNotFoundError:
print('Ignore:', root_dir+'/pos')
try:
for id in os.listdir(root_dir+'/neg'):
if '.png' in id:
ids.append([slide, root_dir+'/neg/'+id, label, 0])
else:
print('Skipping ID:', id)
except FileNotFoundError:
print('Ignore:', root_dir+'/neg')
return ids
# Get all svs images with its label as one file; level is the tile resolution level
def big_image_sum(path='../tiles/', ref_file='../dummy_His_MUT_joined.csv'):
if not os.path.isdir(path):
os.mkdir(path)
import Cutter
Cutter.cut()
allimg = image_ids_in(path)
ref = pd.read_csv(ref_file, header=0)
big_images = []
negimg = intersection(ref.loc[ref['label'] == 0]['name'].tolist(), allimg)
posimg = intersection(ref.loc[ref['label'] == 1]['name'].tolist(), allimg)
for i in negimg:
big_images.append([i, path + "{}".format(i), 0])
for i in posimg:
big_images.append([i, path + "{}".format(i), 1])
datapd = pd.DataFrame(big_images, columns=['slide', 'path', 'label'])
return datapd
# seperate into training and testing; each type is the same separation ratio on big images
# test and train csv files contain tiles' path.
def set_sep(alll, path, cut=0.3):
trlist = []
telist = []
valist = []
for i in range(2):
subset = alll.loc[alll['label'] == i]
unq = list(subset.slide.unique())
np.random.shuffle(unq)
validation = unq[:int(len(unq) * cut / 2)]
valist.append(subset[subset['slide'].isin(validation)])
test = unq[int(len(unq) * cut / 2):int(len(unq) * cut)]
telist.append(subset[subset['slide'].isin(test)])
train = unq[int(len(unq) * cut):]
trlist.append(subset[subset['slide'].isin(train)])
test = pd.concat(telist)
train = pd.concat(trlist)
validation = pd.concat(valist)
test_tiles_list = []
train_tiles_list = []
validation_tiles_list = []
for idx, row in test.iterrows():
tile_ids = tile_ids_in(row['slide'], row['path'], row['label'])
test_tiles_list.extend(tile_ids)
for idx, row in train.iterrows():
tile_ids = tile_ids_in(row['slide'], row['path'], row['label'])
train_tiles_list.extend(tile_ids)
for idx, row in validation.iterrows():
tile_ids = tile_ids_in(row['slide'], row['path'], row['label'])
validation_tiles_list.extend(tile_ids)
test_tiles = pd.DataFrame(test_tiles_list, columns=['slide', 'path', 'slide_label', 'tile_label'])
train_tiles = pd.DataFrame(train_tiles_list, columns=['slide', 'path', 'slide_label', 'tile_label'])
validation_tiles = pd.DataFrame(validation_tiles_list, columns=['slide', 'path', 'slide_label', 'tile_label'])
# No shuffle on test set
train_tiles = sku.shuffle(train_tiles)
validation_tiles = sku.shuffle(validation_tiles)
test_tiles.to_csv(path+'/te_sample.csv', header=True, index=False)
train_tiles.to_csv(path+'/tr_sample.csv', header=True, index=False)
validation_tiles.to_csv(path+'/va_sample.csv', header=True, index=False)
return train_tiles, test_tiles, validation_tiles
|
import sys
import os
import re
from operator import attrgetter
class LibrarySource(object):
"""Holds info on all the library source code"""
class SourceFile(object):
"""Info for an individual source file"""
class SectionFinder(object):
"""Match a section within a source file"""
def __init__(self, source_file):
self.match = None
self.line_number = 0
self.lines = []
self.source_file = source_file
def check_for_end(self, line):
if self.end_re.search(line):
self.finish()
self.lines = []
return True
return False
def check_for_start(self, line_number, line):
match = self.start_re.search(line)
if match:
self.match = match
self.line_number = line_number
self.lines.append(line)
return True
return False
class LineFinder(object):
"""Match a line within a source file"""
def __init__(self, source_file):
self.source_file = source_file
def check_match(self, line, line_number):
match = self.line_re.search(line)
if match:
self.add(match, line_number)
class SubroutineFinder(SectionFinder):
start_re = re.compile(
r'^\s*(RECURSIVE\s+)?SUBROUTINE\s+([A-Z0-9_]+)\(',
re.IGNORECASE)
end_re = re.compile(r'^\s*END\s*SUBROUTINE', re.IGNORECASE)
def finish(self):
name = self.match.group(2)
self.source_file.subroutines[name] = Subroutine(
name, self.line_number, self.lines, self.source_file)
class InterfaceFinder(SectionFinder):
start_re = re.compile(
r'^\s*INTERFACE\s+([A-Z0-9_]+)',
re.IGNORECASE)
end_re = re.compile(r'^\s*END\s*INTERFACE', re.IGNORECASE)
def finish(self):
name = self.match.group(1)
self.source_file.interfaces[name] = Interface(
name, self.line_number, self.lines, self.source_file)
class TypeFinder(SectionFinder):
start_re = re.compile(r'^\s*TYPE\s+([A-Z0-9_]+)', re.IGNORECASE)
end_re = re.compile(r'^\s*END\s*TYPE', re.IGNORECASE)
def finish(self):
name = self.match.group(1)
self.source_file.types[name] = Type(
name, self.line_number, self.lines, self.source_file)
class PublicFinder(LineFinder):
line_re = re.compile(
r'^\s*PUBLIC\s*:*\s*([A-Z0-9_,\s]+)',
re.IGNORECASE)
def add(self, match, line_number):
for symbol in match.group(1).split(','):
self.source_file.public.add(symbol.strip())
class ConstantFinder(LineFinder):
line_re = re.compile(
r'^\s*INTEGER\([A-Z0-9\(\),_\s]+::\s*'
r'([A-Z0-9_]+)\s*=\s*([A-Z0-9_\-\.]+)[^!]*(!<.*$)?',
re.IGNORECASE)
def add(self, match, line_number):
name = match.group(1)
assignment = match.group(2)
if match.group(3) is None:
doxy = ''
else:
doxy = match.group(3)[2:].strip()
self.source_file.constants[name] = Constant(
name, line_number, assignment, doxy)
class DoxygenGroupingFinder(LineFinder):
# match at least one whitespace character before the ! to make sure
# we don't get stuff from the file header
line_re = re.compile(
r'^\s+!\s*>\s*(\\(addtogroup|brief|see)|@[\{\}])(.*$)',
re.IGNORECASE)
def add(self, match, line_number):
line = match.group(1)
if match.group(3) is not None:
line += match.group(3)
self.source_file.doxygen_groupings.append(
DoxygenGrouping(line_number, line))
def __init__(self, source_file, params_only=False):
"""Initialise SourceFile object
Arguments:
source_file -- Path to the source file
"""
self.file_path = source_file
self.public = IdentifierSet()
self.doxygen_groupings = []
self.interfaces = IdentifierDict()
self.subroutines = IdentifierDict()
self.constants = IdentifierDict()
self.types = IdentifierDict()
self.parse_file(params_only)
def parse_file(self, params_only=False):
"""Run through file once, getting everything we'll need"""
source_lines = _join_lines(
open(self.file_path, 'r').read()).splitlines()
if not params_only:
# only keep the source_lines if we need them
self.source_lines = source_lines
# Set the things we want to find
line_finders = []
section_finders = []
line_finders.append(self.ConstantFinder(self))
if not params_only:
line_finders.extend((
self.PublicFinder(self),
self.DoxygenGroupingFinder(self)))
section_finders.extend((
self.SubroutineFinder(self),
self.InterfaceFinder(self),
self.TypeFinder(self)))
# Find them
current_section = None
for (line_number, line) in enumerate(source_lines):
if current_section is not None:
current_section.lines.append(line)
if current_section.check_for_end(line):
current_section = None
else:
for line_finder in line_finders:
line_finder.check_match(line, line_number)
for section in section_finders:
if section.check_for_start(line_number, line):
current_section = section
break
def __init__(self, cm_path):
"""Load library information from source files
Arguments:
cm_path -- Path to OpenCMISS iron directory
"""
self.lib_source = self.SourceFile(
os.sep.join((cm_path, 'src', 'opencmiss_iron.F90')))
cm_source_path = cm_path + os.sep + 'src'
source_files = [
cm_source_path + os.sep + file_name
for file_name in os.listdir(cm_source_path)
if file_name.endswith('.F90') and file_name != 'opencmiss_iron.F90']
self.sources = [
self.SourceFile(source, params_only=True)
for source in source_files]
self.resolve_constants()
# Get all public types, constants and routines to include
# Store all objects to be output in a dictionary with the line number
# as the key
public_objects = {}
for t in self.lib_source.types.values():
if t.name in self.lib_source.public:
public_objects[t.line_number] = t
for const in self.lib_source.constants.values():
if const.name in self.lib_source.public:
public_objects[const.line_number] = const
self.public_subroutines = [
routine
for routine in self.lib_source.subroutines.values()
if routine.name in self.lib_source.public]
for interface in self.lib_source.interfaces.values():
if interface.name in self.lib_source.public:
self.public_subroutines += [
self.lib_source.subroutines[routine]
for routine in interface.get_subroutines()]
self.public_subroutines = sorted(
self.public_subroutines, key=attrgetter('name'))
# Remove cmfe...TypesCopy routines, as these are only used within the
# C bindings. Also remove cmfe_GeneratedMeshSurfaceGet for now as it
# takes an allocatable array but will be removed soon anyways.
self.public_subroutines = list(filter(
lambda r:
not (r.name.startswith('cmfe_GeneratedMesh_SurfaceGet') or
r.name.endswith('TypesCopy')),
self.public_subroutines))
self.unbound_routines = []
for routine in self.public_subroutines:
routine.get_parameters()
owner_class = routine.get_class()
if owner_class != None:
try:
type = self.lib_source.types[owner_class]
type.methods.append(routine)
except KeyError:
sys.stderr.write("Warning: Couldn't find matching class "
"for routine %s" % routine.name)
else:
self.unbound_routines.append(routine)
for routine in self.public_subroutines:
public_objects[routine.line_number] = routine
for doxygen_grouping in self.lib_source.doxygen_groupings:
public_objects[doxygen_grouping.line_number] = doxygen_grouping
self.ordered_objects = [public_objects[k]
for k in sorted(public_objects.keys())]
def resolve_constants(self):
"""Go through all public constants and work out their actual values"""
for pub in self.lib_source.constants:
if pub in self.lib_source.public:
self.get_constant_value(pub)
def get_constant_value(self, constant):
"""Get the actual value for a constant from the source files
Arguments:
constant -- Name of the constant to get the value for
"""
assignment = self.lib_source.constants[constant].assignment
exhausted = False
while ((not self.lib_source.constants[constant].resolved) and
(not exhausted)):
for (i, source) in enumerate(self.sources):
if assignment in source.constants:
if source.constants[assignment].resolved:
self.lib_source.constants[constant].value = (
source.constants[assignment].value)
self.lib_source.constants[constant].resolved = True
break
else:
assignment = source.constants[assignment].assignment
break
if i == (len(self.sources) - 1):
exhausted = True
if not self.lib_source.constants[constant].resolved:
sys.stderr.write("Warning: Couldn't resolve constant value: %s\n"
% constant)
def group_constants(self):
"""Returns a list of enums and ungrouped constants"""
enums = []
enum_dict = {}
ungrouped_constants = []
current_enum = None
for o in self.ordered_objects:
if isinstance(o, DoxygenGrouping):
if o.type == 'group':
# Strip CMISS/OpenCMISS prefix from some constant group names
if o.group.startswith('CMISS'):
group_name = o.group[5:]
elif o.group.startswith('OpenCMISS'):
group_name = o.group[9:]
else:
group_name = o.group
if group_name in enum_dict:
current_enum = enum_dict[group_name]
else:
current_enum = Enum(group_name)
enum_dict[group_name] = current_enum
elif o.type == 'brief':
current_enum.comment = o.brief
elif o.type == 'close':
if (current_enum is not None and
len(current_enum.constants) > 0):
enums.append(current_enum)
current_enum = None
elif isinstance(o, Constant):
if current_enum is not None:
current_enum.constants.append(o)
else:
ungrouped_constants.append(o)
if current_enum is not None:
sys.stderr.write("Error: Didn't match a closing group "
"for Doxygen groupings\n")
return (enums, ungrouped_constants)
class CodeObject(object):
"""Base class for any line or section of code"""
def __init__(self, name, line_number):
self.name = name
self.line_number = line_number
def _get_comments(self):
"""Sets the comment_lines property
This is a list of comment lines above this section or line of code
"""
self.comment_lines = []
line_num = self.line_number - 1
while self.source_file.source_lines[line_num].strip().startswith('!>'):
self.comment_lines.append(
self.source_file.source_lines[line_num].strip()[2:].strip())
line_num -= 1
self.comment_lines.reverse()
class Constant(CodeObject):
"""Information on a public constant"""
def __init__(self, name, line_number, assignment, comment):
"""Initialise Constant
Extra arguments:
assignment -- Value or another variable assigned to this variable
comment -- Contents of the doxygen comment describing
the constant
"""
super(Constant, self).__init__(name, line_number)
self.assignment = assignment
self.comment = comment
try:
self.value = int(self.assignment)
self.resolved = True
except ValueError:
try:
self.value = float(self.assignment)
self.resolved = True
except ValueError:
self.value = None
self.resolved = False
class Interface(CodeObject):
"""Information on an interface"""
def __init__(self, name, line_number, lines, source_file):
"""Initialise an interface
Arguments:
name -- Interface name
line_number -- Line number where the interface starts
lines -- Contents of interface as a list of lines
source_file -- Source file containing the interface
"""
super(Interface, self).__init__(name, line_number)
self.lines = lines
self.source = source_file
def get_subroutines(self):
"""Find the subroutines for an interface
Choose the one with the highest number if there are options. This
corresponds to the routine that takes array parameters
Returns a list of subroutines
"""
all_subroutines = []
routine_re = re.compile(
r'^\s*MODULE PROCEDURE ([A-Z0-9_]+)', re.IGNORECASE)
varying_string_re1 = re.compile(
r'VSC*(Obj|Number|)[0-9]*$', re.IGNORECASE)
varying_string_re2 = re.compile(
r'VSC*(Obj|Number|Region|Interface|)*$', re.IGNORECASE)
for line in self.lines:
match = routine_re.search(line)
if match:
routine_name = match.group(1)
if (varying_string_re1.search(routine_name) or
varying_string_re2.search(routine_name)):
# Don't include routines using varying_string parameters
pass
else:
all_subroutines.append(routine_name)
subroutines = self._get_array_routines(all_subroutines)
for routine in subroutines:
try:
self.source.subroutines[routine].interface = self
except KeyError:
raise KeyError("Couldn't find subroutine %s for interface %s" %
(routine, self.name))
return subroutines
def _get_array_routines(self, routine_list):
"""Return a list of the routines that take array parameters if there
is an option between passing an array or a scalar. All other routines
are also returned.
Arguments:
routine_list -- List of subroutine names
"""
routine_groups = {}
routines = []
# Group routines depending on their name, minus any number indicating
# whether they take a scalar or array
for routine in routine_list:
routine_group = re.sub('\d', '0', routine)
if routine_group in routine_groups:
routine_groups[routine_group].append(routine)
else:
routine_groups[routine_group] = [routine]
for group in routine_groups.keys():
max_number = -1
for routine in routine_groups[group]:
try:
number = int(''.join([c for c in routine if str.isdigit(c)]))
if number > max_number:
array_routine = routine
except ValueError:
# only one routine in group
array_routine = routine
routines.append(array_routine)
return routines
class Subroutine(CodeObject):
"""Store information for a subroutine"""
def __init__(self, name, line_number, lines, source_file):
super(Subroutine, self).__init__(name, line_number)
self.lines = lines
self.source_file = source_file
self.parameters = None
self.interface = None
self.self_idx = -1
self._get_comments()
def get_parameters(self):
"""Get details of the subroutine parameters
Sets the Subroutines parameters property as a list of all parameters,
excluding the Err parameter.
"""
def filter_match(string):
if string is None:
return ''
else:
return string.strip()
self.parameters = []
match = re.search(
r'^\s*(RECURSIVE\s+)?SUBROUTINE\s+'
r'([A-Z0-9_]+)\(([A-Z0-9_,\*\s]*)\)',
self.lines[0],
re.IGNORECASE)
if not match:
raise ValueError(
"Could not read subroutine line:\n %s" % self.lines[0])
parameters = [p.strip() for p in match.group(3).split(',')]
try:
parameters.remove('Err')
except ValueError:
try:
parameters.remove('err')
except ValueError:
sys.stderr.write("Warning: Routine doesn't take Err parameter:"
"%s\n" % self.name)
for param in parameters:
param_pattern = r"""
# parameter type at start of line, followed by possible type
# info, eg DP or SP in brackets
^\s*([A-Z_]+\s*(\(([A-Z_=,\*0-9]+)\))?)
# extra specifications such as intent or pointer
\s*([A-Z0-9\s_\(\):,\s]+)?\s*
::
# Allow for other parameters to be included on the same line
[A-Z_,\s\(\):]*
# Before parameter name
[,\s:]
# Parameter name
%s\b
# Array dimensions if present
(\(([0-9,:]+)\))?
# Doxygen comment
[^!]*(!<(.*)$)?
""" % param
param_re = re.compile(param_pattern, re.IGNORECASE | re.VERBOSE)
for line in self.lines:
match = param_re.search(line)
if match:
param_type = match.group(1)
(type_params, extra_stuff, array, comment) = (
filter_match(match.group(i)) for i in (3, 4, 6, 8))
self.parameters.append(
Parameter(param, self, param_type, type_params,
extra_stuff, array, comment))
break
if not match:
raise RuntimeError("Couldn't find parameter %s "
"for subroutine %s" % (param, self.name))
def get_class(self):
"""Work out if this routine is a method of a class
Sets the self_idx attribute
Uses the routine name, which will be in the form Object_Method
if this is a method of a class. The same naming is also used
for user number routines so we have to check the parameter
type is correct.
"""
if len(self.parameters) == 0:
return
if self.name.count('_') > 1:
# Type name eg. = cmfe_Basis
# Sometimes the type name has an extra bit at the end, eg cmfe_FieldMLIO,
# but routines are named cmfe_FieldML_OutputCreate, so we check if
# the parameter type name starts with the routine type name
routine_type_name = '_'.join(self.name.split('_')[0:2])
# Object parameter is either first or last, it is last if this
# is a Create or CreateStart routine, otherwise it is first
if self.parameters[0].var_type == Parameter.CUSTOM_TYPE:
param_type_name = self.parameters[0].type_name
if param_type_name.startswith(routine_type_name):
self.self_idx = 0
return param_type_name
# Some stuff like cmfe_FieldML_OutputCreate has the "self" object
# as the last parameter, check for these here:
if (self.parameters[-1].var_type == Parameter.CUSTOM_TYPE and
self.name.find('Create') > -1):
param_type_name = self.parameters[-1].type_name
if param_type_name.startswith(routine_type_name):
self.self_idx = len(self.parameters) - 1
return param_type_name
class Parameter(object):
"""Information on a subroutine parameter"""
# Parameter types enum:
(INTEGER,
FLOAT,
DOUBLE,
CHARACTER,
LOGICAL,
CUSTOM_TYPE) = range(6)
def __init__(self, name, routine, param_type, type_params, extra_stuff,
array, comment):
"""Initialise a parameter
Arguments:
name -- Parameter name
routine -- Pointer back to the subroutine this parameter belongs to
param_type -- String from the parameter declaration
type_params -- Any parameters for parameter type, eg "DP" for a real
extra_stuff -- Any extra parameter properties listed after the type,
including intent
array -- The array dimensions included after the parameter name if they
exist, otherwise an empty string
comment -- The doxygen comment after the parameteter
"""
self.name = name
self.routine = routine
self.pointer = False
self.comment = comment
self.type_name = None
intent = None
if extra_stuff != '':
match = re.search(
r'INTENT\(([A-Z]+)\)?', extra_stuff, re.IGNORECASE)
if match is not None:
intent = match.group(1)
if extra_stuff.find('DIMENSION') > -1:
sys.stderr.write("Warning: Ignoring DIMENSION specification "
"on parameter %s of routine %s\n" %
(self.name, routine.name))
sys.stderr.write(" Using DIMENSION goes against "
"the OpenCMISS style guidelines.\n")
if extra_stuff.find('POINTER') > -1:
self.pointer = True
# Get parameter intent
if intent is None:
self.intent = 'INOUT'
sys.stderr.write("Warning: No intent for parameter %s of "
"routine %s\n" % (self.name, routine.name))
else:
self.intent = intent
# Get array dimensions and work out how many dimension sizes
# are variable
if array != '':
self.array_spec = [a.strip() for a in array.split(',')]
self.array_dims = len(self.array_spec)
self.required_sizes = self.array_spec.count(':')
else:
self.array_spec = []
self.array_dims = 0
self.required_sizes = 0
# Work out the type of parameter
param_type = param_type.upper()
if param_type.startswith('INTEGER'):
self.var_type = Parameter.INTEGER
elif param_type.startswith('REAL'):
precision = type_params
if precision == 'DP':
self.var_type = Parameter.DOUBLE
else:
self.var_type = Parameter.FLOAT
elif param_type.startswith('CHARACTER'):
self.var_type = Parameter.CHARACTER
# Add extra dimension, 1D array of strings in Fortran is a 2D
# array of chars in C
self.array_spec.append(':')
self.array_dims += 1
self.required_sizes += 1
elif param_type.startswith('LOGICAL'):
self.var_type = Parameter.LOGICAL
elif param_type.startswith('TYPE'):
self.var_type = Parameter.CUSTOM_TYPE
self.type_name = type_params
else:
sys.stderr.write("Error: Unknown type %s for routine %s\n" %
(param_type, routine.name))
self.var_type = None
self.type_name = param_type
class Type(CodeObject):
"""Information on a Fortran type"""
def __init__(self, name, line_number, lines, source_file):
"""Initialise type
Arguments:
name -- Type name
line_number -- Line number in source where this is defined
lines -- Contents of lines where this type is defined
"""
super(Type, self).__init__(name, line_number)
self.lines = lines
self.source_file = source_file
self.methods = []
self._get_comments()
class DoxygenGrouping(object):
"""Store a line used for grouping in Doxygen"""
def __init__(self, line_number, line):
self.line_number = line_number
self.line = line.strip()
if line.find(r'\see') > -1:
self.type = 'see'
elif line.find(r'\addtogroup') > -1:
self.type = 'group'
self.group = line[
line.find('OpenCMISS_') + len('OpenCMISS_'):].split()[0]
elif line.find(r'\brief') > -1:
self.type = 'brief'
self.brief = line[line.find(r'\brief') + len(r'\brief'):].strip()
elif line.find(r'@{') > -1:
self.type = 'open'
elif line.find(r'@}') > -1:
self.type = 'close'
else:
self.type = None
class Enum(object):
"""A group of constants"""
def __init__(self, name):
self.name = name
self.constants = []
self.comment = ''
class UnsupportedParameterError(Exception):
pass
def _join_lines(source):
"""Remove Fortran line continuations"""
return re.sub(r'[\t ]*&[\t ]*[\r\n]+[\t ]*&[\t ]*', ' ', source)
class IdentifierDict(dict):
"""Dictionary used to store Fortran identifiers, to allow
getting items with case insensitivity"""
def __getitem__(self, key):
try:
val = dict.__getitem__(self, key)
except KeyError:
for ikey in self:
if ikey.lower() == key.lower():
val = dict.__getitem__(self, ikey)
break
else:
raise
return val
class IdentifierSet(set):
"""Set used to store Fortran identifiers, to allow
checking for items with case insensitivity"""
def add(self, val):
set.add(self, val.lower())
def __contains__(self, val):
return set.__contains__(self, val.lower())
|
"""Extract subject-question-answer triples from 20 Questions game HITs.
See ``python extractquestions.py --help`` for more information.
"""
import collections
import json
import logging
import click
from scripts import _utils
logger = logging.getLogger(__name__)
# main function
@click.command(
context_settings={
'help_option_names': ['-h', '--help']
})
@click.argument(
'xml_dir',
type=click.Path(exists=True, file_okay=False, dir_okay=True))
@click.argument(
'output_path',
type=click.Path(exists=False, file_okay=True, dir_okay=False))
def extractquestions(xml_dir, output_path):
"""Extract questions from XML_DIR and write to OUTPUT_PATH.
Extract all unique subject-question-answer triples from a batch of
20 Questions HITs. XML_DIR should be the XML directory of one of
the 20 Questions HIT batches, extracted with AMTI. OUTPUT_PATH is
the location to which the data will be written.
"""
# submissions : the form data submitted from the twentyquestions
# HITs as a list of dictionaries mapping the question identifiers
# to the free text, i.e.:
#
# [{'gameRoomJson': game_room_json_string}, ...]
#
submissions = _utils.extract_xml_dir(xml_dir)
# extract the rows from the game room jsons
row_strs = set()
for submission in submissions:
data = json.loads(submission['gameRoomJson'])
# generate all the subject-question-answer triples created
# during the game.
subject = data['game']['round']['subject']
for questionAndAnswer in data['game']['round']['questionAndAnswers']:
# use an OrderedDict so the keys appear in the right order
# in the JSON.
row = collections.OrderedDict([
('subject', subject),
('question', questionAndAnswer['question']['questionText']),
('answer', questionAndAnswer['answer']['answerValue'])
])
row_strs.add(json.dumps(row))
# write out the data
with click.open_file(output_path, 'w') as output_file:
output_file.write('\n'.join(sorted(row_strs)))
if __name__ == '__main__':
extractquestions()
|
import re
PATH_TO_STOPWORDS = 'alfred/resources/stopwords.txt'
def sterilize(text):
"""Sterilize input `text`. Remove proceeding and preeceding spaces, and replace spans of
multiple spaces with a single space.
Args:
text (str): text to sterilize.
Returns:
sterilized message `text`.
"""
return re.sub(r'\s+', r' ', text.strip())
def remove_stopwords(tokens):
"""
Returns a list of all words in tokens not found in `PATH_TO_STOPWORDS`.
Args:
tokens (list): tokens to remove stopwords from.
Returns:
`tokens` with stopwords in `PATH_TO_STOPWORDS` removed.
"""
filtered_list = []
with open(PATH_TO_STOPWORDS, 'r') as f:
stopwords_list = [x.strip() for x in f.readlines()]
# use a set, lookup is quicker
stopwords_set = set(stopwords_list)
for word in tokens:
if word not in stopwords_set:
filtered_list.append(word)
return filtered_list
|
import torch
import torch.nn as nn
class ExpWarpLoss(nn.Module):
def __init__(self, yaw_warp, pitch_warp):
super().__init__()
assert yaw_warp * pitch_warp == 0
assert yaw_warp + pitch_warp > 0
self._Pwarp = pitch_warp > 0
print(f'[{self.__class__.__name__}] PitchWarp:{self._Pwarp}')
def forward(self, warpless, lwarp, rwarp, target):
# For Yaw, lwarp > warpless > rwarp
# For Pitch, lwarp ~ upwarp, rwarp ~ downwarp.
warpless = warpless[:, int(self._Pwarp)]
lwarp = lwarp[:, int(self._Pwarp)]
rwarp = rwarp[:, int(self._Pwarp)]
loss_1 = torch.clamp(torch.exp(warpless - lwarp) - 1, min=0)
loss_2 = torch.clamp(torch.exp(rwarp - warpless) - 1, min=0)
return (torch.mean(loss_1) + torch.mean(loss_2)) / 2
class ExpWarp2Loss(ExpWarpLoss):
def forward(self, warpless, lwarp, rwarp, target):
# For Yaw, lwarp > warpless > rwarp
warpless_ord = warpless[:, int(self._Pwarp)]
lwarp_ord = lwarp[:, int(self._Pwarp)]
rwarp_ord = rwarp[:, int(self._Pwarp)]
loss_1 = torch.clamp(torch.exp(warpless_ord - lwarp_ord) - 1, min=0)
loss_2 = torch.clamp(torch.exp(rwarp_ord - warpless_ord) - 1, min=0)
# consistency loss
warpless_cons = warpless[:, 1 - self._Pwarp]
lwarp_cons = lwarp[:, 1 - self._Pwarp]
rwarp_cons = rwarp[:, 1 - self._Pwarp]
loss_3 = (torch.abs(warpless_cons - lwarp_cons) + torch.abs(warpless_cons - rwarp_cons)) / 2
return (torch.mean(loss_1) + torch.mean(loss_2) + torch.mean(loss_3)) / 3
class ExpWarp4Loss(ExpWarpLoss):
def forward(self, warpless, lwarp, rwarp, target):
# For Yaw, lwarp > warpless > rwarp
# For Pitch, lwarp ~ upwarp, rwarp ~ downwarp.
warpless_ord = warpless[:, int(self._Pwarp)]
lwarp_ord = lwarp[:, int(self._Pwarp)]
rwarp_ord = rwarp[:, int(self._Pwarp)]
loss_1 = torch.clamp(warpless_ord - lwarp_ord, min=0)
loss_2 = torch.clamp(rwarp_ord - warpless_ord, min=0)
# consistency loss
warpless_cons = warpless[:, 1 - self._Pwarp]
lwarp_cons = lwarp[:, 1 - self._Pwarp]
rwarp_cons = rwarp[:, 1 - self._Pwarp]
loss_3 = (torch.abs(warpless_cons - lwarp_cons) + torch.abs(warpless_cons - rwarp_cons)) / 2
return (torch.mean(loss_1) + torch.mean(loss_2) + torch.mean(loss_3)) / 3
class ExpWarp5Loss(ExpWarp2Loss):
def __init__(self, yaw_warp, pitch_warp, yaw_range, pitch_range):
super().__init__(yaw_warp, pitch_warp)
assert yaw_range is None or all([len(elem) == 2 for elem in yaw_range])
assert pitch_range is None or all([len(elem) == 2 for elem in pitch_range])
self._y_range = yaw_range
self._p_range = pitch_range
print(f'[{self.__class__.__name__}] Yrange:{self._y_range} Prange:{self._p_range}')
def in_range(self, for_pitch, target, angle_range):
filtr = None
assert for_pitch in [0, 1]
for low, high in angle_range:
temp_filtr = (target[:, for_pitch] >= low) * (target[:, for_pitch] <= high)
if filtr is None:
filtr = temp_filtr
else:
filtr = filtr + temp_filtr
return filtr > 0
def relevant_yaw(self, target):
if self._y_range is None:
return True
return self.in_range(0, target, self._y_range)
def relevant_pitch(self, target):
if self._p_range is None:
return True
return self.in_range(1, target, self._p_range)
def forward(self, warpless, lwarp, rwarp, target):
# find relevant entries
yaw_filtr = self.relevant_yaw(target)
pitch_filtr = self.relevant_pitch(target)
filtr = yaw_filtr * pitch_filtr
if filtr.int().max() == 0:
return torch.mean(torch.Tensor([0]).to(target.device))
filtr = filtr.bool()
warpless = warpless[filtr]
lwarp = lwarp[filtr]
rwarp = rwarp[filtr]
return super().forward(warpless, lwarp, rwarp, None)
class ExpWarp3Loss(ExpWarpLoss):
def forward(self, actual, lwarp, rwarp):
actual_P = actual[:, 1 - self._Pwarp]
lwarp_P = lwarp[:, 1 - self._Pwarp]
rwarp_P = rwarp[:, 1 - self._Pwarp]
loss = (torch.abs(actual_P - lwarp_P) + torch.abs(actual_P - rwarp_P)) / 2
return torch.mean(loss)
class ExpWarp6Loss(ExpWarpLoss):
def __init__(self, yaw_warp, pitch_warp, target_mean=3, target_std=1):
super().__init__(yaw_warp, pitch_warp)
self._mean = target_mean
self._std = target_std
print(f'[{self.__class__.__name__}] Mean:{self._mean} Std:{self._std}')
def forward(self, warpless, lwarp, rwarp, target):
# For Yaw, lwarp > warpless > rwarp
diff = torch.normal(self._mean, self._std, (len(target), )).to(target.device)
# import pdb
# pdb.set_trace()
# warpless_ord = warpless[:, int(self._Pwarp)]
# TODO: Check with shifted prediction as well
lwarp_target = target[:, int(self._Pwarp)] + diff
rwarp_target = target[:, int(self._Pwarp)] - diff
lwarp_pred = lwarp[:, int(self._Pwarp)]
rwarp_pred = rwarp[:, int(self._Pwarp)]
loss_1 = torch.mean(torch.abs(lwarp_target - lwarp_pred))
loss_2 = torch.mean(torch.abs(rwarp_target - rwarp_pred))
# consistency loss
warpless_cons = warpless[:, 1 - self._Pwarp]
lwarp_cons = lwarp[:, 1 - self._Pwarp]
rwarp_cons = rwarp[:, 1 - self._Pwarp]
loss_3 = (torch.abs(warpless_cons - lwarp_cons) + torch.abs(warpless_cons - rwarp_cons)) / 2
return (torch.mean(loss_1) + torch.mean(loss_2) + torch.mean(loss_3)) / 3
|
import curve, defaults, glyphs, pathdata, plot, svg, trans
# Only bring into the namespace the functions and classes that the user will need
# This distinguishes user interface from internal functions
# (Though the user can still access them, it intentionally requires more typing)
# Internal class members are preceeded by an underscore
from defaults import BBox
from svg import SVG, template, load, load_stream, rgb, randomid, shortcut
from glyphs import latex
from trans import clone, tonumber, transform, evaluate, Delay, Freeze, Pin, window, rotation, transformation_angle, transformation_jacobian
from pathdata import poly, bezier, velocity, foreback, smooth
from curve import Curve, format_number, unicode_number, ticks, logticks
from plot import Fig, Canvas
|
"""
# Definition for a Node.
class Node:
def __init__(self, val, left, right, next):
self.val = val
self.left = left
self.right = right
self.next = next
"""
class Solution:
def connect(self, root: 'Node') -> 'Node':
def add_children(queue, node):
if node.left != None:
queue.append(node.left)
if node.right != None:
queue.append(node.right)
if root == None:
return root
queue = [root]
while len(queue) != 0:
new_queue = list()
prev = queue.pop(0)
add_children(new_queue, prev)
while len(queue) != 0:
new_node = queue.pop(0)
prev.next = new_node
add_children(new_queue, new_node)
prev = new_node
queue = new_queue
return root
|
from abc import ABCMeta, abstractmethod, abstractproperty
import os
import sqlite3
import time
from .. import db
class Task(metaclass=ABCMeta):
@abstractproperty
def name(self):
"""Name for the task"""
pass
@abstractmethod
def should_run(self):
"""Check whether the function should run. Should be fast."""
pass
@abstractmethod
def run(self):
"""Run the task and return a list of strings."""
pass
class SQLiteTask(db.DBMixin, Task):
host = None
user = None
password = None
format_mark = "?"
def __init__(self, *args, **kwargs):
if not os.path.exists(self.database):
with self.get_connection() as connection:
self.create_table(connection)
super().__init__(*args, **kwargs)
def get_connection(self):
"""Get configured sqlite connection"""
return sqlite3.connect(self.database)
class BasicSQLiteTask(SQLiteTask):
columns = [
('timestamp', 'FLOAT'),
('message_number', 'INT'),
('message', 'TEXT')
]
@abstractproperty
def database(self):
pass
@abstractproperty
def table(self):
pass
def get_last_message_number_sent(self):
query = "SELECT MAX(message_number) FROM {table}".format(table=self.table)
with self.get_connection() as connection:
last_message, = self.query_single_row(connection, query)
if last_message is None:
return -1
else:
return last_message
def register_message(self, message):
message_number = self.get_last_message_number_sent()
with self.get_connection() as connection:
self.insert_row(connection, (time.time(), message_number + 1, message))
def time_since_last_message(self):
query = "SELECT MAX(timestamp) FROM {table}".format(table=self.table)
with self.get_connection() as connection:
last_timestamp, = self.query_single_row(connection, query)
if last_timestamp is None:
return time.time()
else:
return time.time() - last_timestamp
def is_message_new(self, message):
query = "SELECT COUNT(*) FROM {table} WHERE message = ?".format(table=self.table)
with self.get_connection() as connection:
count, = self.query_single_row(connection, query, (message,))
return count == 0
|
# -*- coding: iso-8859-15 -*-
"""
Grundmodul der Entwicklungsumgebung
"""
from optparse import OptionParser
import sys
import os
import re
import shutil
endSkeletor = False
options = None
args = None
def readTemplateFile(tmpname):
fpath = sys.argv[0]
fpath = os.path.split(fpath)[0]
f = open(fpath+'/{0}'.format(tmpname))
b = f.read()
f.close()
return b
def makeFile(path,filename):
path = path.rstrip('/')
fPath = '{0}/{1}'.format(path,filename)
f = open(fPath,'w')
f.close()
def doClone(options,args):
SHEBANG5 = r"#!c:\Python25\python.exe"
SHEBANG6 = r"#!c:\Python26\python.exe"
SHEBANG7 = r"#!c:\Python27\python.exe"
SHEBANGUX= r'#!/usr/bin/python'
shebang = SHEBANG7
cwd = os.path.split(os.getcwd())[0]
appname = ''
if len(args) < 1:
appname = raw_input("Anwendungsname (leer = exit): ")
appname = appname.strip(' ')
if appname == '': return
else:
appname = args[1]
current = os.getcwd()
goal = os.path.split(os.getcwd())[0]+'/'+appname
current = current.replace('\\','/')
goal = goal.replace('\\','/')
if os.path.exists(goal):
if options.force:
shutil.rmtree(goal)
else:
print ("Anwendung "+appname+" Bereits vorhanden. Verwenden -f um zu ueberschreiben.")
return
shutil.copytree(current, goal)
if shebang != '':
sF = open(goal+'/scripts/start.py','r')
buffer = sF.readlines()
sF.close()
buffer[0] = shebang
sF = open(goal+'/scripts/start.py','w')
sF.write(''.join(buffer))
sF.close()
rFile = open('{0}/WEB-INF/mvc/root/view.tpl'.format(goal),'w')
rFile.write ("""<!-- Viewer -->
<%
import time
%>
<h1>Willkommen bei {0}</h1>
Datum <%out(time.strftime('%d.%m.%Y'))%>
<pre style="font-family:courier">
__ _______
___ __ __/ |/ / __/______ ___ _ ___
/ _ \/ // / /|_/ / _// __/ _ `/ ' \/ -_)
/ .__/\_, /_/ /_/_/ /_/ \_,_/_/_/_/\__/
/_/ /___/
</pre>""".format(appname))
rFile.close()
rFile = open('{0}/WEB-INF/templates/login.tpl'.format(goal),'r')
buffer = rFile.read()
rFile.close()
welcome=r' <h1 style="font-size:smaller;text-align:center;margin:auto;width:90%;">Willkommen bei pyMFrame</h1>'
newWelcome = ' <h1 style="font-size:smaller;text-align:center;margin:auto;width:90%;">Willkommen bei {0}</h1>'.format(appname)
buffer = buffer.replace(welcome,newWelcome)
rFile = open('{0}/WEB-INF/templates/login.tpl'.format(goal),'w')
rFile.write(buffer)
rFile.close()
rFile = open('{0}/WEB-INF/templates/default.tpl'.format(goal),'r')
buffer = rFile.read()
rFile.close()
buffer = buffer.replace(welcome,newWelcome)
rFile = open('{0}/WEB-INF/templates/default.tpl'.format(goal),'w')
rFile.write(buffer)
rFile.close()
print """
Die Datenbank befindet sich in ./WEB-INF/datastore/database/database.db3
"""
def doHelp(options,args):
try:
what = args[1]
except:
print 'Hilfe fuer was?'
print 'help [quit|exit|create|clone]'
return
if what in ['create','make']:
print 'Erzeugen [controller|domain]'
try:
what = args[2]
except:
print 'help make|create [controller|domain]'
return
if what=='controller':
print "\nusage: make|creat controller --path=<pfad> --domain=<Domainname>"
print "\nIst der Controler bereits vorhanden, wird die Funktion abgebrochen.\n"
print "\nWird die option -f (force) notiert, wird keine Ueberpruefung vorgenommen ob der Controller bereits vorhanden ist."
print "HINT: Das Programm kann keine Ueberpruefungen durchfuehren,\nob die Domain oder der Eintrag im Menue vorhanden ist!"
print "Der Pfad muss im ./conf/menu.py selbstaendig eingetragen werden!\n"
if what=='domain':
print "\nusage: make|create domain <domainname> --key=<primarikey-name> --table=<domain-name>"
print "\nIst die Domain bereits vorhanden, wird die Funktion abgebrochen.\n"
elif what in ['quit','exit']:
print 'Beenden des Skeletors'
elif what == 'clone':
print 'clone erzeugt eine 100% Kopie der aktellen Anwendung.'+\
'Ist an sich nur in der Anwendung pymframe brauchbar. Wenn man weiss was man tut auch bei anderen moeglich.'+\
'clone <neuer-app-name>'+\
'Wenn die Anwendung bereits vorhanden ist wird das Programm abgebrochen.'+\
'Die option -f LOESCHT! die Anwendung und legt diese neue an.'
def createMenu(path,controller,text):
cwd = os.getcwd()
mFileName = cwd+'/WEB-INF/conf/menu.py'
mF = open(mFileName,'r')
regEndEntry = re.compile('}\s*,\s*\]',re.M)
src = mF.read()
mF.close()
if re.search(path,src):
print " [warning] Menueeintrag {0} bereits vorhanden, nicht angelegt".format(path)
return
e = "},\n {\n 'path':'%(path)s',\n 'controller':'%(controller)sController',\n 'text':'%(text)s',\n 'display':True,\n 'rights':None,\n },\n ]\n" % {
'path':path,
'controller':controller,
'text':text
}
print " +Controller erzeugt"
if re.search(regEndEntry,src):
src = re.sub(regEndEntry,e,src)
mF = open(mFileName,'w')
mF.write(src)
mF.close()
print " +Controller in Menu eingetragen\n"
else:
print "** Kann in ./conf/menu.py' das Menuendekennzeuchen '},]' nicht finden.\n"
print " Menueintrag:\n {0}".format(e)
def makeController(options,argv):
if len(args) < 3:
print 'Es fehlt der Controllername'
return
controllername = args[2]
path = options.path
if path == '': print 'Es fehlt die --path option'; return
domain = options.domain
if domain == None: print "--domain nicht angegeben"; return
path = path.rstrip('/')
cwd = os.getcwd()
goalDir = "./WEB-INF/mvc{0}".format(path)
goalDir = goalDir.replace('\\','/')
if not options.force:
if os.path.exists(goalDir):
print "*** Controller bereits vorhanden"
return
try:
os.makedirs(goalDir)
makeFile(goalDir,'__init__.py')
except: pass
controllername = os.path.split(goalDir)[1]
controllername = controllername.title()
f = open('{0}/{1}Controller.py'.format(goalDir,controllername),'w')
buffer = readTemplateFile('controller.tpl')
buffer = buffer.replace('{0}',controllername)
buffer = buffer.replace('{1}',domain)
buffer = buffer.replace('{2}',domain.lower())
print>>f,buffer
f.close()
f = open('{0}/grid.tpl'.format(goalDir),'w')
buffer = readTemplateFile('viewer.tpl')
buffer = buffer.replace('{0}',domain.replace('Domain',''))
print>>f,buffer
f.close()
createMenu(path,controllername,controllername)
def createDomain(options,args):
sDomain = args[2]
print sDomain
sPrimarykey = options.primarykey
if sPrimarykey == None:
print "Kein Primary Key (--key) angegeben"
return
sTable = options.table
if sTable == None :
print "Kein Tabellenname (--table) angegeben"
return
filename = "./WEB-INF/mvc/domain/{0}.py".format(sDomain.lower())
print "Erzeuge:{0}\n Tabelle: {1}\n PK: {2}\n in {3}".format(sDomain,sTable,sPrimarykey,filename)
if os.path.exists(filename):
if not options.force:
print "Domain {0} existier bereits".format(sDomain)
return
tpl = readTemplateFile('domain.tpl')
buffer = tpl % {'domain':sDomain, 'key':sPrimarykey, 'table':sTable}
f = open(filename,'w')
print >>f,buffer
f.close()
def doMake(options,args):
subcom = {
'controller':makeController,
'domain':createDomain,
'menu':None
}
if not args[1] in subcom.keys():
print "Ungueltiges Subkommandoe {} in make".format(args[1])
return
subcom[args[1]](options,args)
parser = OptionParser('python skeletor.py COMMAND [options...]')
parser.add_option('-p','--path',dest='path')
parser.add_option('-d','--domain',dest='domain')
parser.add_option('-k','--key',dest='primarykey')
parser.add_option('-t','--table',dest='table')
parser.add_option('-e','--exclude',dest='exclude')
parser.add_option('-f','--force',dest='force',action='store_true')
(options, args) = parser.parse_args()
commands = {
'help' :doHelp,
'make' :doMake,
'create' :doMake,
'clone' :doClone
}
def checkCommand(options,args):
if len(args) < 1: return
if args[0] in commands:
commands[args[0]](options,args)
else:
print "ungueltiges Kommando"
if len(args) < 1:
# Interaktiver Modus
print '''
__ _______
___ __ __/ |/ / __/______ ___ _ ___
/ _ \/ // / /|_/ / _// __/ _ `/ ' \/ -_)
/ .__/\_, /_/ /_/_/ /_/ \_,_/_/_/_/\__/
/_/ /___/
Einrichten einer pyMframe Anwendung
W. Nagy
quit|exit beendet den interaktiven Modus
help bietet Hilfe
'''
while not endSkeletor:
cli = raw_input('skeletor>')
cli = cli.strip()
if cli in ['quit','exit']:
endSkeletor = True
continue
if cli == '': continue
save0=sys.argv[0]
sys.argv = [save0]+cli.split(' ')
(options, args) = parser.parse_args()
checkCommand(options,args)
else:
checkCommand(options,args)
|
# -*- coding: utf-8 -*-
"""
Created with IntelliJ IDEA.
Description:
User: jinhuichen
Date: 3/20/2018 11:12 AM
Description:
"""
from fetchman.pipeline.console_pipeline import ConsolePipeline
from fetchman.pipeline.pic_pipeline import PicPipeline
from fetchman.pipeline.test_pipeline import TestPipeline
POSTGRESQL_PIPELINE = 'postgresql'
MONGODB_PIPELINE = 'mongodb'
MYSQL_PIPELINE = 'mysql'
SQL_SERVER_PIPELINE = 'sql_server'
SQLITE_PIPELINE = 'sqlite'
CONSOLE_PIPELINE = 'console'
PIPEINE_MAP = {
CONSOLE_PIPELINE: ConsolePipeline,
MONGODB_PIPELINE: None,
MYSQL_PIPELINE: None,
SQL_SERVER_PIPELINE: None,
SQLITE_PIPELINE: None,
POSTGRESQL_PIPELINE: None
}
|
"""
Base Class for HTTP Client we use to interact with APIs
"""
import json
import requests
import requests.packages.urllib3 # pylint: disable=E0401
import logging
from random import choice
from string import ascii_uppercase
requests.packages.urllib3.disable_warnings() # pylint: disable=E1101
line_separator = '\n' + 80 * '_'
def request_to_curl(req, linesep='\\\n '):
parts = ['curl', ]
add = parts.append
add('-X')
add(req.method)
add(linesep)
for hdr, val in list(req.headers.items()):
if hdr != 'Accept-Encoding':
add('-H')
add('"{}: {}"'.format(hdr, val))
add(linesep)
if req.body:
add('-d')
add("'{}'".format(req.body))
add('"{}"'.format(req.url))
return ' '.join(parts)
def response_to_curl(resp, linesep='\n'):
parts = []
add = parts.append
parts.append(request_to_curl(resp.request))
parts.append('HTTP {} {}'.format(resp.status_code, resp.reason))
for hdr, val in list(resp.headers.items()):
add('{}: {}'.format(hdr, val))
add('{!r}'.format(resp.content))
return linesep.join(parts)
class HttpClientBase(object):
"""
:param auth_token: a 32 char uppercase hex string representing API key
:type auth_token: str
:param base_url: the base url to connect to
:type base_url: str
:param http_timeout: the amount of time to timeout
:type http_timeout: int
"""
def __init__(self,
base_url,
logger,
auth_token=None,
http_timeout=(6.05, 30),
content_type='application/json',
max_retries=0,
extra_headers={}):
self.base_url = base_url
# http headers
self.headers = {}
if content_type:
self.headers['Content-Type'] = content_type
if auth_token is not None:
self.headers['Authorization'] = auth_token
if extra_headers is not None:
self.headers.update(extra_headers)
self.http_timeout = http_timeout
self.session = requests.Session()
http_adapter = requests.adapters.HTTPAdapter(max_retries=max_retries)
https_adapter = requests.adapters.HTTPAdapter(max_retries=max_retries)
self.session.mount('http://', http_adapter)
self.session.mount('https://', https_adapter)
self.logger = logger
def _get(self, url, params=None):
"""HTTP GET with params"""
url = self.base_url + '/' + url
self.logger.info(line_separator)
resp = self.session.get(url, headers=self.headers, verify=False, timeout=self.http_timeout,
params=params)
self.logger.debug(response_to_curl(resp))
self.logger.info("GET URL = %s" % resp.request.url)
self.logger.info("STATUS CODE = %s" % resp.status_code)
self.logger.info("TIME ELAPSED = %s" % resp.elapsed.total_seconds())
return resp
def _patch(self, url, data):
"""HTTP PATCH with params"""
url = self.base_url + '/' + url
self.logger.info(line_separator)
resp = self.session.patch(url, headers=self.headers, data=json.dumps(data), verify=False,
timeout=self.http_timeout)
self.logger.debug(response_to_curl(resp))
self.logger.info("PATCH URL = %s" % resp.request.url)
self.logger.info("STATUS CODE = %s" % resp.status_code)
self.logger.info("TIME ELAPSED = %s" % resp.elapsed.total_seconds())
return resp
def _put(self, url, data=None, params=None, json_data_type=True):
"""HTTP PUT with params"""
url = self.base_url + '/' + url
self.logger.info(line_separator)
if (json_data_type):
resp = self.session.put(url, headers=self.headers, data=json.dumps(data), verify=False,
timeout=self.http_timeout)
else:
resp = self.session.put(url, headers=self.headers, data=data, verify=False,
timeout=self.http_timeout)
self.logger.debug(response_to_curl(resp))
self.logger.info("PUT URL = %s" % resp.request.url)
self.logger.info("STATUS CODE =", resp.status_code)
self.logger.info("TIME ELAPSED = %s" % resp.elapsed.total_seconds())
return resp
def _post(self, url, data=None, params=None, files=None, payload_binary=False):
"""HTTP PUT with params"""
url = self.base_url + '/' + url
self.logger.info(line_separator)
if payload_binary:
resp = self.session.post(url,
headers=self.headers,
data=data,
verify=False,
timeout=self.http_timeout,
params=params,
files=files)
elif data is not None:
resp = self.session.post(url,
headers=self.headers,
data=json.dumps(data),
verify=False,
timeout=self.http_timeout,
params=params,
files=files)
else:
resp = self.session.post(url,
headers=self.headers,
verify=False,
timeout=self.http_timeout,
params=params,
files=files)
self.logger.debug(response_to_curl(resp))
self.logger.info("POST URL = %s" % resp.request.url)
self.logger.info("STATUS CODE = %s" % resp.status_code)
self.logger.info("TIME ELAPSED = %s" % resp.elapsed.total_seconds())
return resp
def _delete(self, url, data=None):
url = self.base_url + '/' + url
self.logger.info(line_separator)
headers = self.headers
resp = self.session.delete(url, headers=headers, verify=False, data='' if data is None else json.dumps(data),
timeout=self.http_timeout)
self.logger.debug(response_to_curl(resp))
self.logger.info("DELETE URL = %s" % url)
self.logger.info("STATUS CODE = %s" % resp.status_code)
self.logger.info("RESPONSE: %s" % resp.text)
self.logger.info("TIME ELAPSED = %s" % resp.elapsed.total_seconds())
return resp
def _get_diagnostics(self, servicename="service", adminurl=None):
"""HTTP PUT with params"""
if adminurl != None:
if "v3" in adminurl:
adminurl = adminurl[:adminurl.index("/v3")]
url = adminurl + "/" + "v3/" + servicename + '/_/' + 'diagnostics'
else:
if "v3" in self.base_url:
self.base_url = self.base_url[:self.base_url.index("/v3")]
url = self.base_url + "/" + "v3/" + servicename + '/_/diagnostics'
self.logger.info(line_separator)
resp = self.session.get(url,
headers=self.headers,
verify=False,
timeout=self.http_timeout)
self.logger.debug(response_to_curl(resp))
self.logger.info("GET URL = %s" % resp.request.url)
self.logger.info("STATUS CODE = %s" % resp.status_code)
self.logger.info("TIME ELAPSED = %s" % resp.elapsed.total_seconds())
return resp
def _get_health_status(self, status="health", servicename="service", adminurl=None):
"""HTTP GET health status
Args:
status: "health" OR "metrics"
admin_url : if admin_end points are different than url
"""
if adminurl != None:
if "v3" in adminurl:
adminurl = adminurl[:adminurl.index("/v3")]
url = adminurl + "/" + "v3/" + servicename + '/_/' + status
else:
if "v3" in self.base_url:
self.base_url = self.base_url[:self.base_url.index("/v3")]
url = self.base_url + "/v3/" + servicename + '/_/' + status
self.logger.info(line_separator)
resp = self.session.get(url, headers=self.headers, verify=False, timeout=self.http_timeout)
self.logger.debug(response_to_curl(resp))
self.logger.info(response_to_curl(resp).split("{")[1])
self.logger.info("STATUS CODE = %s" % resp.status_code)
self.logger.info("TIME ELAPSED = %s" % resp.elapsed.total_seconds())
return resp
def _post_queries(self, query_url, data):
if "v3" in self.base_url:
self.base_url = self.base_url[:self.base_url.index("/v3")]
url = self.base_url + '/' + query_url
else:
url = self.base_url + '/' + query_url
self.logger.info(line_separator)
if data is not None and data is not {}:
resp = self.session.post(url, headers=self.headers,
data=json.dumps(data),
verify=False,
timeout=self.http_timeout)
else:
resp = self.session.post(url, headers=self.headers,
verify=False,
timeout=self.http_timeout)
self.logger.info(response_to_curl(resp))
self.logger.info("GET URL = %s" % resp.request.url)
self.logger.info("STATUS CODE = %s" % resp.status_code)
self.logger.info("TIME ELAPSED = %s" % resp.elapsed.total_seconds())
return resp
@staticmethod
def _extended_text(rng):
return ''.join(choice(ascii_uppercase) for i in range(rng))
|
import os
import zipfile
import pandas as pd
from ..datasets import core
BASE_URL = "https://ti.arc.nasa.gov/"
def load_turbofan_engine():
"""
Load Turbofan Engine Degradation Simulation Dataset
from https://ti.arc.nasa.gov/tech/dash/groups/pcoe/prognostic-data-repository/
:return:
"""
save_dir = core.SAVE_DIR + "/NASA Turbofan/"
if not os.path.isdir(save_dir):
url = BASE_URL + "m/project/prognostic-repository/CMAPSSData.zip"
print("Downloading NASA Turbofan Engine Degradation Simulation Dataset ...")
core.fetch_dataset(url, extract=False)
with zipfile.ZipFile("CMAPSSData.zip") as f:
f.extractall("NASA Turbofan")
os.remove("CMAPSSData.zip")
modes = ["train", "test"]
def load_per_type(mode):
files = ["_FD001", "_FD002", "_FD003", "_FD004"]
loaded = []
for file in files:
tmp = pd.read_csv("NASA Turbofan/" + mode + file + ".txt", header=None, delim_whitespace=True).values
loaded.append(tmp)
return loaded
train_data = load_per_type(modes[0])
test_data = load_per_type(modes[1])
return train_data, test_data
def load_phm08(load_all=False):
"""
Load PHM08 Challenge Data Set
from https://ti.arc.nasa.gov/tech/dash/groups/pcoe/prognostic-data-repository/
:return:
"""
save_dir = core.SAVE_DIR + "/PHM08/"
if not os.path.isdir(save_dir):
url = BASE_URL + "m/project/prognostic-repository/Challenge_Data.zip"
print("Downloading NASA PHM08 Dataset ...")
core.fetch_dataset(url, extract=False)
with zipfile.ZipFile("Challenge_Data.zip") as f:
f.extractall("PHM08")
os.remove("Challenge_Data.zip")
if load_all:
train = pd.read_csv("PHM08/train.txt", header=None, delim_whitespace=True).values
test = pd.read_csv("PHM08/test.txt", header=None, delim_whitespace=True).values
final_test = pd.read_csv("PHM08/final_test.txt", header=None, delim_whitespace=True).values
return train, test, final_test
else:
train = pd.read_csv("PHM08/train.txt", header=None, delim_whitespace=True).values
test = pd.read_csv("PHM08/test.txt", header=None, delim_whitespace=True).values
return train, test
|
from datetime import datetime
from django.test import SimpleTestCase
from data_schema.models import FieldSchemaType
from data_schema.convert_value import convert_value
class ConvertValueExceptionTest(SimpleTestCase):
def test_get_value_exception(self):
"""
Tests that when we fail to parse a value, we get a ValueError with additional information attached.
"""
bad_value = '-'
with self.assertRaises(ValueError) as ctx:
convert_value(FieldSchemaType.INT, bad_value)
self.assertEquals(bad_value, ctx.exception.bad_value)
self.assertEquals(FieldSchemaType.INT, ctx.exception.expected_type)
class BooleanConverterTest(SimpleTestCase):
def test_convert_value_true(self):
"""
Verifies true string values are True
"""
self.assertTrue(convert_value(FieldSchemaType.BOOLEAN, 't'))
self.assertTrue(convert_value(FieldSchemaType.BOOLEAN, 'T'))
self.assertTrue(convert_value(FieldSchemaType.BOOLEAN, 'true'))
self.assertTrue(convert_value(FieldSchemaType.BOOLEAN, 'True'))
self.assertTrue(convert_value(FieldSchemaType.BOOLEAN, 'TRUE'))
self.assertTrue(convert_value(FieldSchemaType.BOOLEAN, True))
self.assertTrue(convert_value(FieldSchemaType.BOOLEAN, 1))
self.assertTrue(convert_value(FieldSchemaType.BOOLEAN, '1'))
def test_convert_value_false(self):
"""
Verifies false string values are False
"""
self.assertFalse(convert_value(FieldSchemaType.BOOLEAN, 'f'))
self.assertFalse(convert_value(FieldSchemaType.BOOLEAN, 'F'))
self.assertFalse(convert_value(FieldSchemaType.BOOLEAN, 'false'))
self.assertFalse(convert_value(FieldSchemaType.BOOLEAN, 'False'))
self.assertFalse(convert_value(FieldSchemaType.BOOLEAN, 'FALSE'))
self.assertFalse(convert_value(FieldSchemaType.BOOLEAN, False))
self.assertFalse(convert_value(FieldSchemaType.BOOLEAN, 0))
self.assertFalse(convert_value(FieldSchemaType.BOOLEAN, '0'))
def test_convert_value_empty(self):
"""
Verifies that any other value returns None
"""
self.assertIsNone(convert_value(FieldSchemaType.BOOLEAN, None))
self.assertIsNone(convert_value(FieldSchemaType.BOOLEAN, ''))
self.assertIsNone(convert_value(FieldSchemaType.BOOLEAN, 'string'))
self.assertIsNone(convert_value(FieldSchemaType.BOOLEAN, 5))
def test_convert_datetime(self):
"""
Verifies that datetime field type attempts to coerce to timestamp before
attempting to parse the string as a date string
"""
# still
self.assertIsInstance(convert_value(FieldSchemaType.DATETIME, 1447251508), datetime)
self.assertIsInstance(convert_value(FieldSchemaType.DATETIME, 1447251508.1234), datetime)
self.assertIsInstance(convert_value(FieldSchemaType.DATETIME, 1.447251508e9), datetime)
self.assertIsInstance(convert_value(FieldSchemaType.DATETIME, '1447251508'), datetime)
self.assertIsInstance(convert_value(FieldSchemaType.DATETIME, '1447251508.1234'), datetime)
self.assertIsInstance(convert_value(FieldSchemaType.DATETIME, '1.447251508e9'), datetime)
# parses date strings
self.assertIsInstance(convert_value(FieldSchemaType.DATETIME, '2015-11-09 15:30:00'), datetime)
def test_convert_value_default(self):
"""
Verifies that the default value will be used if the passed value is null
"""
self.assertTrue(convert_value(FieldSchemaType.BOOLEAN, None, default_value=True))
self.assertIsNone(convert_value(FieldSchemaType.BOOLEAN, 'invalid', default_value=True))
|
# -*- coding: utf-8 -*-
"""Dummy test case that invokes all of the C++ unit tests."""
import testify as T
import moe.build.GPP as C_GP
class CppUnitTestWrapperTest(T.TestCase):
"""Calls a C++ function that runs all C++ unit tests.
TODO(GH-115): Remove/fix this once C++ gets a proper unit testing framework.
"""
def test_run_cpp_unit_tests(self):
"""Call C++ function that runs all C++ unit tests and assert 0 errors."""
number_of_cpp_test_errors = C_GP.run_cpp_tests()
T.assert_equal(number_of_cpp_test_errors, 0)
if __name__ == "__main__":
T.run()
|
from .ichart import iChart
|
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.http import HttpResponse
from django.shortcuts import get_object_or_404, redirect, render
from django.views.generic import DetailView, ListView
from dfirtrack_main.forms import EntryForm
from dfirtrack_main.logger.default_logger import debug_logger
from dfirtrack_main.models import Entry
class Entrys(LoginRequiredMixin, ListView):
login_url = '/login'
model = Entry
template_name = 'dfirtrack_main/entry/entrys_list.html'
def get_queryset(self):
debug_logger(str(self.request.user), " ENTRY_ENTERED")
return Entry.objects.order_by('entry_id')
class EntrysDetail(LoginRequiredMixin, DetailView):
login_url = '/login'
model = Entry
template_name = 'dfirtrack_main/entry/entrys_detail.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
entry = self.object
entry.logger(str(self.request.user), " ENTRYDETAIL_ENTERED")
return context
@login_required(login_url="/login")
def entrys_add(request):
if request.method == 'POST':
form = EntryForm(request.POST)
if form.is_valid():
entry = form.save(commit=False)
entry.entry_created_by_user_id = request.user
entry.entry_modified_by_user_id = request.user
entry.save()
entry.logger(str(request.user), " ENTRY_ADD_EXECUTED")
messages.success(request, 'Entry added')
return redirect('/systems/' + str(entry.system.system_id))
else:
if request.method == 'GET' and 'system' in request.GET:
system = request.GET['system']
form = EntryForm(initial={
'system': system,
})
else:
form = EntryForm()
debug_logger(str(request.user), " ENTRY_ADD_ENTERED")
return render(request, 'dfirtrack_main/entry/entrys_add.html', {'form': form})
@login_required(login_url="/login")
def entrys_edit(request, pk):
entry = get_object_or_404(Entry, pk=pk)
if request.method == 'POST':
form = EntryForm(request.POST, instance=entry)
if form.is_valid():
entry = form.save(commit=False)
entry.entry_modified_by_user_id = request.user
entry.save()
entry.logger(str(request.user), " ENTRY_EDIT_EXECUTED")
messages.success(request, 'Entry edited')
return redirect('/systems/' + str(entry.system.system_id))
else:
form = EntryForm(instance=entry)
entry.logger(str(request.user), " ENTRY_EDIT_ENTERED")
return render(request, 'dfirtrack_main/entry/entrys_edit.html', {'form': form})
|
"""
Models for configuration of the feature flags
controlling persistent grades.
"""
from config_models.models import ConfigurationModel
from django.conf import settings
from django.db.models import BooleanField, IntegerField, TextField
from opaque_keys.edx.django.models import CourseKeyField
from openedx.core.lib.cache_utils import request_cached
class PersistentGradesEnabledFlag(ConfigurationModel):
"""
Enables persistent grades across the platform.
When this feature flag is set to true, individual courses
must also have persistent grades enabled for the
feature to take effect.
.. no_pii:
.. toggle_name: PersistentGradesEnabledFlag.enabled
.. toggle_implementation: ConfigurationModel
.. toggle_default: False
.. toggle_description: When enabled, grades are persisted. This means that PersistentCourseGrade objects are
created for student grades. In order for this to take effect, CoursePersistentGradesFlag objects must also be
created individually for each course. Alternatively, the PersistentGradesEnabledFlag.enabled_for_all_courses
waffle flag or the PERSISTENT_GRADES_ENABLED_FOR_ALL_TESTS feature flag can be set to True to enable this
feature for all courses.
.. toggle_use_cases: temporary
.. toggle_creation_date: 2016-08-26
.. toggle_target_removal_date: None
.. toggle_tickets: https://github.com/edx/edx-platform/pull/13329
"""
# this field overrides course-specific settings to enable the feature for all courses
enabled_for_all_courses = BooleanField(default=False)
@classmethod
@request_cached()
def feature_enabled(cls, course_id=None):
"""
Looks at the currently active configuration model to determine whether
the persistent grades feature is available.
If the flag is not enabled, the feature is not available.
If the flag is enabled and the provided course_id is for an course
with persistent grades enabled, the feature is available.
If the flag is enabled and no course ID is given,
we return True since the global setting is enabled.
"""
if settings.FEATURES.get('PERSISTENT_GRADES_ENABLED_FOR_ALL_TESTS'):
return True
if not PersistentGradesEnabledFlag.is_enabled():
return False
elif not PersistentGradesEnabledFlag.current().enabled_for_all_courses and course_id:
effective = CoursePersistentGradesFlag.objects.filter(course_id=course_id).order_by('-change_date').first()
return effective.enabled if effective is not None else False
return True
class Meta:
app_label = "grades"
def __str__(self):
current_model = PersistentGradesEnabledFlag.current()
return "PersistentGradesEnabledFlag: enabled {}".format(
current_model.is_enabled()
)
class CoursePersistentGradesFlag(ConfigurationModel):
"""
Enables persistent grades for a specific
course. Only has an effect if the general
flag above is set to True.
.. no_pii:
"""
KEY_FIELDS = ('course_id',)
class Meta:
app_label = "grades"
# The course that these features are attached to.
course_id = CourseKeyField(max_length=255, db_index=True)
def __str__(self):
not_en = "Not "
if self.enabled:
not_en = ""
return f"Course '{str(self.course_id)}': Persistent Grades {not_en}Enabled"
class ComputeGradesSetting(ConfigurationModel):
"""
.. no_pii:
"""
class Meta:
app_label = "grades"
batch_size = IntegerField(default=100)
course_ids = TextField(
blank=False,
help_text="Whitespace-separated list of course keys for which to compute grades."
)
|
from collections import OrderedDict
from .states import DefaultState
from lewis.devices import StateMachineDevice
class SampleHolderMaterials(object):
ALUMINIUM = 0
GLASSY_CARBON = 1
GRAPHITE = 2
QUARTZ = 3
SINGLE_CRYSTAL_SAPPHIRE = 4
STEEL = 5
VANADIUM = 6
class SimulatedIndfurn(StateMachineDevice):
def _initialize_data(self):
"""
Initialize all of the device's attributes.
"""
self.setpoint = 20
self.pipe_temperature = 25.1
self.capacitor_bank_temperature = 30.3
self.fet_temperature = 35.8
self.p, self.i, self.d = 0, 0, 0
self.sample_time = 100
self.direction_heating = True
self.pid_lower_limit, self.pid_upper_limit = 0, 0
self.pid_mode_automatic = True
self.running = True
self.psu_voltage, self.psu_current, self.output = 0, 0, 0
self.remote_mode = True
self.power_supply_on = True
self.sample_area_led_on = True
self.hf_on = False
self.psu_overtemp, self.psu_overvolt = False, False
self.cooling_water_flow = 100
self.sample_holder_material = SampleHolderMaterials.ALUMINIUM
self.thermocouple_1_fault, self.thermocouple_2_fault = 0, 0
def _get_state_handlers(self):
return {'default': DefaultState()}
def _get_initial_state(self):
return 'default'
def _get_transition_handlers(self):
return OrderedDict([])
def is_cooling_water_flow_ok(self):
return self.cooling_water_flow >= 100
|
__author__ = 'Meemaw'
vhodnaTabela = []
indeks = 0
with open("odpri.txt") as file:
for line in file:
indeks+=1
line = line.split()
vhodnaTabela.append(map(int,line))
vhodnaTabela.append([0]*(indeks+1))
for i in range(indeks-1,-1,-1):
for x in range(len(vhodnaTabela[i])):
vhodnaTabela[i][x] += max(vhodnaTabela[i+1][x],vhodnaTabela[i+1][x+1])
print(vhodnaTabela[0])
|
import pandas as pd
# df = pd.read_csv('Data/main_dataset.csv')
df = pd.read_csv('Data/personality_questions_answers.csv')
# personality_features = ['Openness', 'Conscientiousness', 'Extraversion', 'Agreeableness', 'Emotional range']
personality_features = ['openness', 'conscientiousness', 'extraversion', 'agreeableness', 'emotional range']
arr = []
for x in personality_features:
arr.append([x, df[x].mean()])
tf = pd.DataFrame(arr, columns=['Big5', 'mean'])
# tf.to_csv('Data/twitter_big5_mean.csv', index=False)
tf.to_csv('Data/stackoverflow_big5_mean.csv', index=False)
|
from common import * # NOQA
from cattle import ApiError
RESOURCE_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'resources/certs')
def test_create_cert_basic(client):
cert = _read_cert("san_domain_com.crt")
key = _read_cert("san_domain_com.key")
cert1 = client. \
create_certificate(name=random_str(),
cert=cert,
key=key)
cert1 = client.wait_success(cert1)
assert cert1.state == 'active'
assert cert1.cert == cert
assert cert1.certFingerprint is not None
assert cert1.expiresAt is not None
assert cert1.CN is not None
assert cert1.issuer is not None
assert cert1.issuedAt is not None
assert cert1.algorithm is not None
assert cert1.version is not None
assert cert1.serialNumber is not None
assert cert1.keySize == 2048
assert cert1.subjectAlternativeNames is not None
def test_dup_names(super_client, client):
cert_input = _read_cert("san_domain_com.crt")
key = _read_cert("san_domain_com.key")
name = random_str()
cert1 = super_client. \
create_certificate(name=name,
cert=cert_input,
key=key)
super_client.wait_success(cert1)
assert cert1.name == name
cert2 = client. \
create_certificate(name=name,
cert=cert_input,
key=key)
cert2 = super_client.wait_success(cert2)
assert cert2.name == name
assert cert2.accountId != cert1.accountId
with pytest.raises(ApiError) as e:
super_client. \
create_certificate(name=name,
cert=cert_input,
key=key)
assert e.value.error.status == 422
assert e.value.error.code == 'NotUnique'
def test_create_cert_invalid_cert(client):
cert = _read_cert("cert_invalid.pem")
key = _read_cert("key.pem")
with pytest.raises(ApiError) as e:
client. \
create_certificate(name=random_str(),
cert=cert,
key=key)
assert e.value.error.status == 422
assert e.value.error.code == 'InvalidFormat'
def test_create_cert_chain(client):
cert = _read_cert("enduser-example.com.crt")
key = _read_cert("enduser-example.com.key")
chain = _read_cert("enduser-example.com.chain")
cert1 = client. \
create_certificate(name=random_str(),
cert=cert,
key=key,
certChain=chain)
cert1 = client.wait_success(cert1)
assert cert1.state == 'active'
assert cert1.cert == cert
return cert1
def _read_cert(name):
with open(os.path.join(RESOURCE_DIR, name)) as f:
return f.read()
def test_update_cert(client):
cert1 = _read_cert("enduser-example.com.crt")
key1 = _read_cert("enduser-example.com.key")
c1 = client. \
create_certificate(name=random_str(),
cert=cert1,
key=key1)
c1 = client.wait_success(c1)
cert2 = _read_cert("san_domain_com.crt")
key2 = _read_cert("san_domain_com.key")
c2 = client.update(c1, cert=cert2, key=key2)
c2 = client.wait_success(c2, 120)
assert c2.certFingerprint is not None
assert c2.expiresAt is not None
assert c2.CN is not None
assert c2.issuer is not None
assert c2.issuedAt is not None
assert c2.algorithm is not None
assert c2.version is not None
assert c2.serialNumber is not None
assert c2.keySize == 2048
assert c2.subjectAlternativeNames is not None
assert c2.cert == cert2
assert c2.certFingerprint != c1.certFingerprint
assert c2.expiresAt != c1.expiresAt
assert c2.CN != c1.CN
assert c2.issuer != c1.issuer
assert c2.issuedAt != c1.issuedAt
assert c2.serialNumber != c1.serialNumber
|
{%extends 'setup.py.jj2'%}
{%block platform_block%}
{%endblock%}
{%block morefiles%} 'CONTRIBUTORS.rst',{%endblock%}
|
from graphene_django import DjangoObjectType
import graphene
from dashboard.models import Course
from dashboard.graphql.objects import CourseType
from dashboard.rules import is_admin_or_enrolled_in_course, is_admin
from graphql import GraphQLError
import logging
logger = logging.getLogger(__name__)
class Query(graphene.ObjectType):
course = graphene.Field(CourseType, course_id=graphene.ID(), canvas_id=graphene.ID())
courses = graphene.List(CourseType)
@staticmethod
def resolve_course(parent, info, course_id=None, canvas_id=None):
user = info.context.user
if not user.is_authenticated:
raise GraphQLError('You must be logged in to access this resource!')
course = None
if canvas_id:
course = Course.objects.get(canvas_id=canvas_id)
elif course_id:
course = Course.objects.get(id=course_id)
if not course or not is_admin_or_enrolled_in_course.test(user, course):
raise GraphQLError('You do not have permission to access this resource!')
return course
@staticmethod
def resolve_courses(parent, info):
user = info.context.user
if not user.is_authenticated:
raise GraphQLError('You must be logged in to access these resource!')
if is_admin.test(user):
return Course.objects.all()
else:
courses = Course.objects.raw(f"""
SELECT course.*
FROM course join user on course.id = user.course_id
where user.sis_name = '{user.username}'
""")
return courses
|
"""Tests for avro2py/avro_types.py"""
import avro2py.avro_types as avro_types
def test_parsing_permits_metadata_attributes():
"""
https://avro.apache.org/docs/1.10.2/spec.html#schemas states: "Attributes
not defined in this document are permitted as metadata, but must not
affect the format of serialized data". Ensure that metadata keys are
permitted during avro2py schema parsing, and do not affect output (modulo
original_schema field).
"""
def schema(embedded_type):
return dict(
type="record",
name="ExampleRecord",
namespace="messages.example",
doc="Example record",
fields=[
dict(
name="foo",
type=[
"null",
embedded_type
],
doc="Foo field",
default=None,
),
]
)
metadata_schema = schema(
embedded_type={
"type": "map",
"values": {
"type": "string",
"avro.java.string": "String"
},
"avro.java.string": "String"
}
)
non_metadata_schema = schema(
embedded_type={
"type": "map",
"values": {
"type": "string",
}
}
)
parsed_metadata_schema = avro_types.parse_into_types(metadata_schema)._replace(original_schema=None)
parsed_non_metadata_schema = avro_types.parse_into_types(non_metadata_schema)._replace(original_schema=None)
assert parsed_metadata_schema == parsed_non_metadata_schema
|
import pydub,os,PIL,ffmpeg,shutil,time,textwrap
from pathlib import Path
from gtts import gTTS
from PIL import Image, ImageDraw, ImageFont
from pydub import AudioSegment
from moviepy.editor import *
import snatch
try:
shutil.rmtree("out")
except:
pass
os.mkdir("out")
def insert_newlines(string, every=100):
#https://stackoverflow.com/questions/2657693/insert-a-newline-character-every-64-characters-using-python
return textwrap.fill(string, width=every)
def tts(speak, n, post):
tts = gTTS(speak, lang="en", tld="co.uk")
tts.save(f'out/{n}.mp3')
image = Image.new("RGB", (1920, 1080), "black")
draw = ImageDraw.Draw(image)
font = ImageFont.truetype("casual.ttf", size=42)
text = insert_newlines(f"u/{post['User']}\n{post['Content']}").replace(f"u/{post['User']} ", f"u/{post['User']}\n")
print(text)
draw.text((10, 25), text, font=font)
image.save(f'out/{n}.jpg')
posts = snatch.getRedditPosts('askreddit')
print(posts)
n=0
tts(posts[0] + ".; ", n, {'User': "", 'Content': posts[0]})
for i in posts[1:]:
n+=1
tts("u/slash/{}; {}.".format(i["User"], i["Content"]), n, i)
print("Generated:" + str(n))
audios = []
n=0
#old PyDub based code
'''for file in os.listdir("out"):
print(file)
if not file == "0.mp3" and file.endswith(".mp3"):
audios += [AudioSegment.from_mp3("out/" + file), AudioSegment.from_mp3("beep.mp3")]
out = AudioSegment.from_mp3("out/0.mp3") + AudioSegment.from_mp3("beep.mp3")
for i in audios[:-1]:
out = out + i
out.export("out.mp3", format="mp3")
'''
files = []
#n=0
pic = ffmpeg.input("beep.jpg")
vid = ffmpeg.input("beep.mp3")
#print(n, file)
(
ffmpeg
.concat(pic, vid, v=1, a=1)
.output(f"beep.mp4")
.run(overwrite_output=True)
)
for file in os.listdir("out"):
if file.endswith(".mp3"):
n = file.replace(".mp3", "")
pic = ffmpeg.input(f"out/{n}.jpg")
vid = ffmpeg.input(f"out/{file}")
#print(n, file)
(
ffmpeg
.concat(pic, vid, v=1, a=1)
.output(f"out/{n}-fixed.mp4")
.run(overwrite_output=True)
)
files.append(f"out/{n}-fixed.mp4")
#n+=1
files = sorted(files)
newfiles = []
for i in files:
newfiles.append(VideoFileClip(i))
newfiles.append(VideoFileClip("beep.mp4"))
files = newfiles
video = concatenate_videoclips(files)
video.write_videofile("out.mp4")
|
from models.mobilenet import *
def get_mobilenet(config):
"""
Get mobile based on config.py
"""
model = None
if config.backbone == 'mobilenetv2':
model = mobilenet_v2()
return model
|
from os.path import dirname, abspath
from imdbmovie.utilities.settings.site_config import SiteConfig
site_config = SiteConfig(config_file='development.yaml').get_config()
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = dirname(dirname(abspath(__file__)))
# JWT Authentication Configuration
SECRET_KEY = site_config.get('secrets', {}).get('SECRET_KEY', '')
JWT_CONFIG = site_config.get('jwt_config', {})
PROPAGATE_EXCEPTIONS = site_config.get('exception', {}).get('PROPAGATE_EXCEPTIONS', True)
# imdb url
IMDBURL = site_config.get('imdb_url', {}).get('IMDB_URL', 'https://www.imdb.com/')
DEBUG = site_config.get('debug', {}).get('DEBUG', False)
SERVICE_HOST = site_config.get('running_host', {}).get('host', '0.0.0.0')
SERVICE_PORT = site_config.get('running_host', {}).get('port', 8080)
# Database configurations
DATABASE = site_config.get('database', {}).get('NAME', '')
ENGINE = site_config.get('database', {}).get('ENGINE', '')
USERNAME = site_config.get('database', {}).get('USER', '')
PASSWORD = site_config.get('database', {}).get('PASSWORD', '')
HOST = site_config.get('database', {}).get('HOST', '0.0.0.0')
PORT = site_config.get('database', {}).get('PORT', 3306)
# SQLAlchemy Configurations
SQLALCHEMY_DATABASE_URI = f'{ENGINE}://{USERNAME}:{PASSWORD}@{HOST}/{DATABASE}'
SQLALCHEMY_TRACK_MODIFICATIONS = site_config.get('database', {}).get('SQLALCHEMY_TRACK_MODIFICATIONS', False)
# Application definition
INSTALLED_APPS = site_config.get('installed_apps', [])
THIS_URL = str(SERVICE_HOST) + ":" + str(SERVICE_PORT)
|
PROJECT_ROOT = ".."
import os
import sys
sys.path.insert(0, os.path.abspath(PROJECT_ROOT))
project = "mulberry"
copyright = "2020, Hunter Damron"
author = "Hunter Damron"
with open(os.path.join(PROJECT_ROOT, "VERSION"), "r") as fh:
release = fh.read().strip()
version = ".".join(release.split(".")[:2]) # Get major version
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.viewcode",
]
templates_path = ["_templates"]
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
html_theme = "alabaster"
html_static_path = ["_static"]
|
from math import fabs
a = float(input("Enter a number: "))
b = float(input("Enter another number: "))
D = fabs(a - b)
if D <= 0.001:
print("Close.")
else:
print("Not Close.")
|
from django.forms import ModelForm
from .models import *
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from django import forms
class KundeForm(ModelForm):
"""
Eine Klasse zur Repräsentation einer Kundenform
...
Classes
----------
Meta:
Felderdefinitionen der Form
"""
class Meta:
"""
Eine Klasse zur Repräsentation der Metadaten
...
Attributes
----------
model : Kunde
Kundenobjekt
fields : string
Felder des Kunden
widgets : dictionary
Die unsichtbaren Felder
"""
model = Kunde
fields = '__all__'
widgets = {'user': forms.HiddenInput(),
'template': forms.HiddenInput()}
class MitarbeiterForm(ModelForm):
"""
Eine Klasse zur Repräsentation einer Mitarbeiterform
...
Classes
----------
Meta:
Felderdefinitionen der Form
"""
class Meta:
"""
Eine Klasse zur Repräsentation der Metadaten
...
Attributes
----------
model : Kunde
Mitarbeiterobjekt
fields : string
Felder des Mitarbeiters
"""
model = Mitarbeiter
fields = '__all__'
class AuftragForm(ModelForm):
"""
Eine Klasse zur Repräsentation einer Auftragform
...
Classes
----------
Meta:
Felderdefinitionen der Form
"""
class Meta:
"""
Eine Klasse zur Repräsentation der Metadaten
...
Attributes
----------
model : Auftrag
Kundenobjekt
fields : string
Felder des Auftrags
"""
model = Auftrag
fields = "__all__"
class RechnungForm(ModelForm):
"""
Eine Klasse zur Repräsentation einer Rechnungform
...
Classes
----------
Meta:
Felderdefinitionen der Form
"""
class Meta:
"""
Eine Klasse zur Repräsentation der Metadaten
...
Attributes
----------
model : Rechnung
Kundenobjekt
fields : string
Felder der Rechnung
"""
model = Rechnung
fields = "__all__"
class CreateUserForm(UserCreationForm):
"""
Eine Klasse zur Repräsentation einer Django-Userform
...
Attributes
----------
first_name : charfield
Vorname des Users
last_name : charfield
Nachname des Users
email : emailfield
Email-Adresse des Users
Classes
----------
Meta:
Felderdefinitionen der Form
"""
first_name = forms.CharField(required=True, label='Vorname')
last_name = forms.CharField(required=True, label='Nachname')
email = forms.EmailField(required=True, label='E-Mail')
class Meta:
"""
Eine Klasse zur Repräsentation der Metadaten
...
Attributes
----------
model : User
Kundenobjekt
fields : string
Felder des Users
"""
model = User
fields = ["username", "first_name", "last_name", "email", "password1", "password2"]
|
import contextlib
import logging
import os
import subprocess
import time
import torch
import torch.distributed as dist
import seq2seq.data.config as config
from seq2seq.inference.beam_search import SequenceGenerator
from seq2seq.utils import AverageMeter
from seq2seq.utils import barrier
from seq2seq.utils import get_rank
from seq2seq.utils import get_world_size
import seq2seq.inference.bleu
def gather_predictions(preds):
world_size = get_world_size()
if world_size > 1:
all_preds = preds.new(world_size * preds.size(0), preds.size(1))
all_preds_list = all_preds.chunk(world_size, dim=0)
dist.all_gather(all_preds_list, preds)
preds = all_preds
return preds
class Translator:
"""
Translator runs validation on test dataset, executes inference, optionally
computes BLEU score using sacrebleu.
"""
def __init__(self,
model,
tokenizer,
loader,
beam_size=5,
len_norm_factor=0.6,
len_norm_const=5.0,
cov_penalty_factor=0.1,
max_seq_len=50,
cuda=False,
print_freq=1,
dataset_dir=None,
save_path=None,
target_bleu=None):
self.model = model
self.tokenizer = tokenizer
self.loader = loader
self.insert_target_start = [config.BOS]
self.insert_src_start = [config.BOS]
self.insert_src_end = [config.EOS]
self.batch_first = model.batch_first
self.cuda = cuda
self.beam_size = beam_size
self.print_freq = print_freq
self.dataset_dir = dataset_dir
self.target_bleu = target_bleu
self.save_path = save_path
self.generator = SequenceGenerator(
model=self.model,
beam_size=beam_size,
max_seq_len=max_seq_len,
cuda=cuda,
len_norm_factor=len_norm_factor,
len_norm_const=len_norm_const,
cov_penalty_factor=cov_penalty_factor)
def run(self, calc_bleu=True, epoch=None, iteration=None, summary=False,
reference_path=None):
"""
Runs translation on test dataset.
:param calc_bleu: if True compares results with reference and computes
BLEU score
:param epoch: index of the current epoch
:param iteration: index of the current iteration
:param summary: if True prints summary
:param reference_path: path to the file with reference translation
"""
test_bleu = 0.
break_training = False
logging.info(f'Running evaluation on test set')
self.model.eval()
output = self.evaluate(epoch, iteration, summary)
# detokenize (BPE)
detok_output = []
for idx, pred in output:
pred = pred.tolist()
detok = self.tokenizer.detokenize(pred)
detok_output.append((idx, detok + '\n'))
if calc_bleu:
if detok_output:
indices, output = zip(*detok_output)
else:
indices, output = [], []
output = self.run_detokenizer(output)
reference_path = os.path.join(self.dataset_dir,
config.TGT_TEST_TARGET_FNAME)
bleu = seq2seq.inference.bleu.compute_bleu(output, indices,
reference_path)
logging.info(bleu)
test_bleu = round(bleu.score, 2)
if summary:
logging.info(f'BLEU on test dataset: {test_bleu:.2f}')
if self.target_bleu and test_bleu >= self.target_bleu:
logging.info(f'Target accuracy reached')
break_training = True
logging.info(f'Finished evaluation on test set')
return test_bleu, break_training
def evaluate(self, epoch, iteration, summary):
"""
Runs evaluation on test dataset.
:param epoch: index of the current epoch
:param iteration: index of the current iteration
:param summary: if True prints summary
"""
batch_time = AverageMeter(False)
tot_tok_per_sec = AverageMeter(False)
iterations = AverageMeter(False)
enc_seq_len = AverageMeter(False)
dec_seq_len = AverageMeter(False)
stats = {}
output = []
for i, (src, indices) in enumerate(self.loader):
translate_timer = time.time()
src, src_length = src
if self.batch_first:
batch_size = src.shape[0]
else:
batch_size = src.shape[1]
global_batch_size = batch_size * get_world_size()
beam_size = self.beam_size
bos = [self.insert_target_start] * (batch_size * beam_size)
bos = torch.LongTensor(bos)
if self.batch_first:
bos = bos.view(-1, 1)
else:
bos = bos.view(1, -1)
src_length = torch.LongTensor(src_length)
stats['total_enc_len'] = int(src_length.sum())
if self.cuda:
src = src.cuda()
src_length = src_length.cuda()
bos = bos.cuda()
with torch.no_grad():
context = self.model.encode(src, src_length)
context = [context, src_length, None]
if beam_size == 1:
generator = self.generator.greedy_search
else:
generator = self.generator.beam_search
preds, lengths, counter = generator(batch_size, bos, context)
stats['total_dec_len'] = lengths.sum().item()
stats['iters'] = counter
for idx, pred in zip(indices, preds):
output.append((idx, pred))
elapsed = time.time() - translate_timer
batch_time.update(elapsed, batch_size)
total_tokens = stats['total_dec_len'] + stats['total_enc_len']
ttps = total_tokens / elapsed
tot_tok_per_sec.update(ttps, batch_size)
iterations.update(stats['iters'])
enc_seq_len.update(stats['total_enc_len'] / batch_size, batch_size)
dec_seq_len.update(stats['total_dec_len'] / batch_size, batch_size)
if i % self.print_freq == 0:
log = []
log += f'TEST '
if epoch is not None:
log += f'[{epoch}]'
if iteration is not None:
log += f'[{iteration}]'
log += f'[{i}/{len(self.loader)}]\t'
log += f'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
log += f'Decoder iters {iterations.val:.1f} ({iterations.avg:.1f})\t'
log += f'Tok/s {tot_tok_per_sec.val:.0f} ({tot_tok_per_sec.avg:.0f})'
log = ''.join(log)
logging.info(log)
tot_tok_per_sec.reduce('sum')
enc_seq_len.reduce('mean')
dec_seq_len.reduce('mean')
batch_time.reduce('mean')
iterations.reduce('sum')
if summary and get_rank() == 0:
time_per_sentence = (batch_time.avg / global_batch_size)
log = []
log += f'TEST SUMMARY:\n'
log += f'Lines translated: {len(self.loader.dataset)}\t'
log += f'Avg total tokens/s: {tot_tok_per_sec.avg:.0f}\n'
log += f'Avg time per batch: {batch_time.avg:.3f} s\t'
log += f'Avg time per sentence: {1000*time_per_sentence:.3f} ms\n'
log += f'Avg encoder seq len: {enc_seq_len.avg:.2f}\t'
log += f'Avg decoder seq len: {dec_seq_len.avg:.2f}\t'
log += f'Total decoder iterations: {int(iterations.sum)}'
log = ''.join(log)
logging.info(log)
return output
def run_detokenizer(self, data):
"""
Executes moses detokenizer.
:param data: list of sentences to detokenize
"""
data = ''.join(data)
detok_path = os.path.join(self.dataset_dir, config.DETOKENIZER)
cmd = f'perl {detok_path}'
logging.info('Running moses detokenizer')
z = subprocess.run(cmd, shell=True, input=data.encode(),
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL)
output = z.stdout.decode().splitlines()
return output
|
'''
You are given a positive integer. Your function should calculate the
product of the digits excluding any zeroes.
For example: The number given is 123405. The result will be
1*2*3*4*5=120 (don't forget to exclude zeroes).
Input: A positive integer.
Output: The product of the digits as an integer.
Example:
checkio(123405) == 120
checkio(999) == 729
checkio(1000) == 1
checkio(1111) == 1
How it is used: This task can teach you how to solve a problem with
simple data type conversion.
Precondition: 0 < number < 106
'''
from functools import reduce
def checkio(number):
return reduce(lambda x,y: x*y, [int(i) for i in str(number) if int(i)>0])
inputs = [
123405,
999,
1000,
1111,
1
]
outputs = map(checkio, inputs)
print(list(outputs))
|
from ClickablePopup import *
from MarginPopup import *
from otp.otpbase import OTPLocalizer
import NametagGlobals
class WhisperPopup(ClickablePopup, MarginPopup):
WTNormal = 0
WTQuickTalker = 1
WTSystem = 2
WTBattleSOS = 3
WTEmote = 4
WTToontownBoardingGroup = 5
WTMagicWord = 6
WTGlobal = 7
def __init__(self, text, font, type):
ClickablePopup.__init__(self)
MarginPopup.__init__(self)
self.m_text = text
self.m_font = font
self.m_type = type
self.m_np_balloon = None
self.m_avname = ''
self.m_region = None
self.m_mouse_watcher = None
self.m_manager = None
self.cbNode = CallbackNode(self.getName() + '-cbNode')
self.cbNode.setCullCallback(PythonCallbackObject(self.cullCallback))
self.addChild(self.cbNode)
self.m_time = 0
self.m_culled = False
self.m_clickable = False
self.m_avid = 0
self.m_is_player = False
self.m_is_player_id = None
self.m_state = 3
self.m_objcode = 0
return
def setClickable(self, avatar_name, avatar_id, is_player_id=False):
self.m_clickable = True
self.m_avname = avatar_name
self.m_avid = avatar_id
self.m_is_player_id = is_player_id
self.m_state = 0
def click(self):
messenger.send('clickedWhisper', [self.m_avid, self.m_is_player])
def considerVisible(self):
if self.m_clickable and self.m_visible and self.m_mouse_watcher != NametagGlobals._mouse_watcher:
return False
if self.m_seq != NametagGlobals._margin_prop_seq:
self.m_seq = NametagGlobals._margin_prop_seq
self.updateContents()
return True
def manage(self, manager):
self.m_manager = manager
manager.managePopup(self)
if self.getType() in NametagGlobals._whisper_colors:
cc = self.getType()
else:
cc = self.WTSystem
fgColor, bgColor = NametagGlobals._whisper_colors[cc][self.m_state]
newBgColor = list(bgColor)
newBgColor[3] = 1.0
if self.m_type not in (self.WTNormal, self.WTGlobal):
messenger.send('addChatHistory', [None, None, None, newBgColor, self.m_text, self.m_type])
return
def unmanage(self, manager):
manager.unmanagePopup(self)
del self.m_manager
def cullCallback(self, *args):
if not self.m_culled:
self.m_culled = True
self.m_time = globalClock.getFrameTime()
def setVisible(self, value):
MarginPopup.setVisible(self, value)
self.updateContents()
if self.m_clickable:
if self.m_region:
if self.m_visible:
self.m_mouse_watcher = NametagGlobals._mouse_watcher
self.m_mouse_watcher.addRegion(self.m_region)
elif self.m_mouse_watcher:
self.m_mouse_watcher.removeRegion(self.m_region)
self.m_mouse_watcher = None
return
def setRegion(self, frame, sort):
if self.m_region:
self.m_region.setFrame(frame)
else:
self.m_region = self._createRegion(frame)
self.m_region.setSort(sort)
def updateContents(self):
if self.m_np_balloon:
self.m_np_balloon.removeNode()
self.m_np_balloon = None
if self.m_visible:
self.generateText(NametagGlobals._speech_balloon_2d, self.m_text, self.m_font)
return
def generateText(self, balloon, text, font):
text_color = Vec4(NametagGlobals.getWhisperFg(self.m_type, self.m_state))
balloon_color = Vec4(NametagGlobals.getWhisperBg(self.m_type, self.m_state))
balloon_color[3] = max(balloon_color[3], NametagGlobals._min_2d_alpha)
balloon_color[3] = min(balloon_color[3], NametagGlobals._max_2d_alpha)
balloon_result = balloon.generate(text, font, 8.0, text_color, balloon_color, False, False, 0, None, False, False, None)
self.m_np_balloon = self.m_np.attachNewNode(balloon_result)
v34 = self.m_cell_width * 0.22222222
v35 = balloon.m_text_height * balloon.m_hscale * 0.5
v57 = -balloon.m_hscale * 5.5
v16 = -(NametagGlobals._balloon_text_origin[2] + v35)
v64 = Mat4(v34, 0, 0, 0, 0, v34, 0, 0, 0, 0, v34, 0, v57 * v34, 0, v16 * v34, 1.0)
self.m_np_balloon.setMat(v64)
reducer = SceneGraphReducer()
reducer.applyAttribs(self.m_np_balloon.node())
if self.m_clickable:
v22 = self.m_np.getNetTransform().getMat()
v39, _, v41 = v22.xformPoint(Point3(v57 * v34, 0.0, v16 * v34))
v27, _, v28 = v22.xformPoint(Point3(-v57 * v34, 0.0, -v16 * v34))
self.setRegion(Vec4(v39, v27, v41, v28), 0)
return
def setObjectCode(self, objcode):
self.m_objcode = objcode
def getObjectCode(self):
return self.m_objcode
def getScore(self):
result = 2000
if self.m_culled:
elapsed = globalClock.getFrameTime() - self.m_time
result -= elapsed * 200
if elapsed > 15.0:
self.unmanage(self.m_manager)
return result
|
import sys
from math import sqrt
EPS = 1e-9
class sp:
def __init__(self):
x, y = 0, 0
x_l, x_r = 0.0, 0.0
if __name__ == '__main__':
for line in sys.stdin:
n, l, w = map(int, line.strip('\n').split())
sprinkler = [sp() for _ in range(n)]
for i in range(n):
sprinkler[i].x, sprinkler[i].r = map(int, sys.stdin.readline().strip('\n').split())
if 2*sprinkler[i].r >= w:
d_x = sqrt(sprinkler[i].r*sprinkler[i].r - (w/2.0)*(w/2.0))
sprinkler[i].x_l = sprinkler[i].x-d_x
sprinkler[i].x_r = sprinkler[i].x+d_x
else:
sprinkler[i].x_l = sprinkler[i].x_r = sprinkler[i].x
def cmp(a: sp):
return a.x_l, -a.x_r
sprinkler = sorted(sprinkler, key=cmp)
possible = True
covered = 0.0
ans = 0
skip = 0
for i in range(n):
if not possible:
break
if covered > l:
break
if i < skip:
continue
if sprinkler[i].x_r < covered+EPS:
continue
if sprinkler[i].x_l < covered+EPS:
max_r = -1.0
skip = i
for j in range(i, n):
if not (sprinkler[j].x_l < covered+EPS):
break
if sprinkler[j].x_r > max_r:
max_r = sprinkler[j].x_r
skip = j
ans += 1
covered = max_r
else:
possible = False
if not possible or covered < l:
print(-1)
else:
print(ans)
|
from updater_test import DBManager
import numpy as np
df = DBManager('final_datasets/all_rirs_final.csv', reg_time=True, time=True, step='second')
df.first_policy()
df = DBManager('final_datasets/all_standard_policy.csv', reg_time=True, time=True, step='second')
df.apply_afrinic_policy()
df = DBManager('final_datasets/administrative_lifetimes.csv').df
cutted_df = df[df.startdate <= '2021-03-01']
cutted_df['enddate'] = np.where((cutted_df.enddate > '2021-03-01'), '2021-03-01', cutted_df.enddate)
cutted_df.to_csv('final_datasets/administrative_lifetimes.csv', index=False)
|
# coding=utf-8
# Copyright 2022 The ML Fairness Gym Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
from absl.testing import absltest
import core
import rewards
import test_util
from environments import attention_allocation
from environments import lending
from metrics import value_tracking_metrics
from gym import spaces
import numpy as np
def _modifier_fn(selected_variable, group_id, env):
del env # Unused argument
if group_id == 0:
return -0.2 * (1 + selected_variable)
else:
return 0.3 * (1 + selected_variable)
def _selection_fn(history_step):
return history_step.state.x
def _stratify_fn(history_step):
return [1 - x for x in history_step.state.x]
def _setup_test_simulation(dim=1, calc_mean=False, modifier_fn=_modifier_fn):
env = test_util.DeterministicDummyEnv(test_util.DummyParams(dim=dim))
env.set_scalar_reward(rewards.NullReward())
metric = value_tracking_metrics.AggregatorMetric(
env=env,
modifier_fn=modifier_fn,
selection_fn=_selection_fn,
stratify_fn=_stratify_fn,
calc_mean=calc_mean)
return env, metric
class AggregatingMetricsTest(absltest.TestCase):
"""Tests for aggregating metrics.
These tests use the DeterministicDummyEnv, which alternates between 1 and 0
or a list of 0's and 1's with length given by dim.
To values for tests are calculated for each group as:
sum/mean over the values passed by the modifier_fn. The modifier_fn receives
values from the selection function.
For example:
For getting the sum from a list with dim=10 and over 10 steps, with
modifier function as defined, we would expect values to be:
group 0 = sum([-0.2 * (1 + 0)] * 10 for 5 steps) = -20
group 1 = sum([0.3 * (1 + 1)] * 10 for 5 steps) = 15
Similarly, without modifier function for a list it would be:
group 0 = sum([0] * 10 for 5 steps) = 0
group 1 = sum([1] * 10 for 5 steps) = 50
"""
def test_aggregate_metric_give_correct_sum_value_for_list(self):
"""Test aggregate metric with sum for a list.
Expected values:
group 0 = sum([-0.2 * (1 + 0)] * 10 for 5 steps) = -20
group 1 = sum([0.3 * (1 + 1)] * 10 for 5 steps) = 15
"""
env, metric = _setup_test_simulation(dim=10, calc_mean=False)
measurement = test_util.run_test_simulation(
env=env, agent=None, metric=metric, num_steps=10)
logging.info('Measurement result: %s.', measurement)
self.assertSequenceAlmostEqual(
sorted(measurement.values()), [-20, 15], delta=1e-4)
def test_aggregate_metric_give_correct_sum_value_for_atomic_value(self):
"""Test aggregate metric with sum for a atomic values.
Expected values:
group 0 = sum([-0.2 * (1 + 0)] * 1 for 5 steps) = -2
group 1 = sum([0.3 * (1 + 1)] * 1 for 5 steps) = 1.5
"""
env, metric = _setup_test_simulation(dim=1, calc_mean=False)
measurement = test_util.run_test_simulation(
env=env, agent=None, metric=metric, num_steps=10)
logging.info('Measurement result: %s.', measurement)
self.assertSequenceAlmostEqual(
sorted(measurement.values()), [-2, 1.5], delta=1e-4)
def test_aggregate_metric_give_correct_mean_value_for_list(self):
"""Test aggregate metric with mean for a list.
Expected values:
group 0 = mean([-0.2 * (1 + 0)] * 10 for 5 steps) = -0.4
group 1 = mean([0.3 * (1 + 1)] * 10 for 5 steps) = 0.3
"""
env, metric = _setup_test_simulation(dim=10, calc_mean=True)
measurement = test_util.run_test_simulation(
env=env, agent=None, metric=metric, num_steps=10)
logging.info('Measurement result: %s.', measurement)
self.assertSequenceAlmostEqual(
sorted(measurement.values()), [-0.4, 0.3], delta=1e-4)
def test_aggregate_metric_give_correct_mean_value_for_atomic_value(self):
"""Test aggregate metric with mean for a atomic values.
Expected values:
group 0 = mean([-0.2 * (1 + 0)] * 10 for 5 steps) = -0.4
group 1 = mean([0.3 * (1 + 1)] * 10 for 5 steps) = 0.3
"""
env, metric = _setup_test_simulation(dim=1, calc_mean=True)
measurement = test_util.run_test_simulation(
env=env, agent=None, metric=metric, num_steps=10)
logging.info('Measurement result: %s.', measurement)
self.assertSequenceAlmostEqual(
sorted(measurement.values()), [-0.4, 0.3], delta=1e-4)
def test_aggregate_metric_give_correct_result_for_list_no_modifier(self):
"""Test aggregate metric with mean for a list with no modifier function.
Expected values:
group 0 = sum([0] * 10 for 5 steps) = 0
group 1 = sum([1] * 10 for 5 steps) = 50
"""
env, metric = _setup_test_simulation(
dim=10, calc_mean=False, modifier_fn=None)
measurement = test_util.run_test_simulation(
env=env, agent=None, metric=metric, num_steps=10)
logging.info('Measurement result: %s.', measurement)
self.assertSequenceAlmostEqual(
sorted(measurement.values()), [0, 50], delta=1e-4)
def test_aggregate_metric_give_correct_result_for_atomic_value_no_modifier(
self):
"""Test aggregate metric with sum for a atomic values with no modifier fn.
Expected values:
group 0 = sum([0] * 1 for 5 steps) = 0
group 1 = sum([1] * 1 for 5 steps) = 5
"""
env, metric = _setup_test_simulation(
dim=1, calc_mean=False, modifier_fn=None)
measurement = test_util.run_test_simulation(
env=env, agent=None, metric=metric, num_steps=10)
logging.info('Measurement result: %s.', measurement)
self.assertSequenceAlmostEqual(
sorted(measurement.values()), [0, 5], delta=1e-4)
class SummingMetricsTest(absltest.TestCase):
def test_summing_metric_give_correct_sum_dummy_env(self):
env = test_util.DeterministicDummyEnv(test_util.DummyParams(dim=1))
env.set_scalar_reward(rewards.NullReward())
metric = value_tracking_metrics.SummingMetric(
env=env, selection_fn=_selection_fn)
measurement = test_util.run_test_simulation(
env, agent=None, metric=metric, seed=0)
self.assertTrue(np.all(np.equal(measurement, [5])))
def test_summing_metric_give_correct_sum_alloc_env(self):
env = attention_allocation.LocationAllocationEnv()
def _attn_alloc_selection_fn(step):
state, _ = step
return state.incidents_seen
metric = value_tracking_metrics.SummingMetric(
env=env, selection_fn=_attn_alloc_selection_fn)
measurement = test_util.run_test_simulation(
env, agent=None, metric=metric, seed=0)
self.assertTrue(np.all(np.equal(measurement, [4, 5])))
class _XState(core.State):
"""State with a single variable x."""
def __init__(self):
self.x = 0
class IncreasingEnv(core.FairnessEnv):
"""Environment with a single state variable that increase at each step."""
def __init__(self):
self.action_space = spaces.Discrete(1)
super(IncreasingEnv, self).__init__()
self.state = _XState()
def _step_impl(self, state, action):
"""Increase state.x by 1."""
del action
state.x += 1
return state
class ValueChangeTest(absltest.TestCase):
def test_value_change_measures_correctly_unnormalized(self):
env = IncreasingEnv()
metric = value_tracking_metrics.ValueChange(
env, 'x', normalize_by_steps=False)
# Running step 11 times records 10 steps in history because the 11th is
# stored in current state.
for _ in range(11):
env.step(env.action_space.sample())
self.assertEqual(metric.measure(env), 10)
def test_value_change_measures_correctly_normalized(self):
env = IncreasingEnv()
metric = value_tracking_metrics.ValueChange(
env, 'x', normalize_by_steps=True)
for _ in range(17):
env.step(action=env.action_space.sample())
self.assertAlmostEqual(metric.measure(env), 1.0)
def test_metric_can_interact_with_lending(self):
env = lending.DelayedImpactEnv()
metric = value_tracking_metrics.ValueChange(env, 'bank_cash')
test_util.run_test_simulation(env=env, metric=metric)
class FinalValueMetricTest(absltest.TestCase):
def test_returns_correct_final_value(self):
env = IncreasingEnv()
metric = value_tracking_metrics.FinalValueMetric(env, 'x')
for _ in range(5):
env.step(env.action_space.sample())
self.assertEqual(metric.measure(env), 4)
def test_returns_correct_final_value_with_realign(self):
env = IncreasingEnv()
metric = value_tracking_metrics.FinalValueMetric(
env, 'x', realign_fn=lambda h: h[1:] + [h[0]])
for _ in range(5):
env.step(env.action_space.sample())
self.assertEqual(metric.measure(env), 0)
if __name__ == '__main__':
absltest.main()
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'e:\git\softmech\nanoindentation\nano.ui'
#
# Created by: PyQt5 UI code generator 5.12.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1680, 917)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("../../.designer/backup/ico.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
MainWindow.setWindowIcon(icon)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout_14 = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout_14.setObjectName("verticalLayout_14")
self.verticalLayout_13 = QtWidgets.QVBoxLayout()
self.verticalLayout_13.setObjectName("verticalLayout_13")
self.verticalLayout_9 = QtWidgets.QVBoxLayout()
self.verticalLayout_9.setObjectName("verticalLayout_9")
self.horizontalLayout_9 = QtWidgets.QHBoxLayout()
self.horizontalLayout_9.setObjectName("horizontalLayout_9")
self.groupBox_7 = QtWidgets.QGroupBox(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox_7.sizePolicy().hasHeightForWidth())
self.groupBox_7.setSizePolicy(sizePolicy)
self.groupBox_7.setObjectName("groupBox_7")
self.horizontalLayout_10 = QtWidgets.QHBoxLayout(self.groupBox_7)
self.horizontalLayout_10.setObjectName("horizontalLayout_10")
self.consolle = QtWidgets.QPushButton(self.groupBox_7)
self.consolle.setObjectName("consolle")
self.horizontalLayout_10.addWidget(self.consolle)
self.b_protocol = QtWidgets.QPushButton(self.groupBox_7)
self.b_protocol.setObjectName("b_protocol")
self.horizontalLayout_10.addWidget(self.b_protocol)
self.b_load = QtWidgets.QPushButton(self.groupBox_7)
self.b_load.setObjectName("b_load")
self.horizontalLayout_10.addWidget(self.b_load)
self.horizontalLayout_9.addWidget(self.groupBox_7)
spacerItem = QtWidgets.QSpacerItem(1110, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_9.addItem(spacerItem)
self.groupBox_8 = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox_8.setObjectName("groupBox_8")
self.horizontalLayout_11 = QtWidgets.QHBoxLayout(self.groupBox_8)
self.horizontalLayout_11.setObjectName("horizontalLayout_11")
self.b_saveprotocol = QtWidgets.QPushButton(self.groupBox_8)
self.b_saveprotocol.setObjectName("b_saveprotocol")
self.horizontalLayout_11.addWidget(self.b_saveprotocol)
self.b_saveEdata = QtWidgets.QPushButton(self.groupBox_8)
self.b_saveEdata.setObjectName("b_saveEdata")
self.horizontalLayout_11.addWidget(self.b_saveEdata)
self.b_saveFdata = QtWidgets.QPushButton(self.groupBox_8)
self.b_saveFdata.setObjectName("b_saveFdata")
self.horizontalLayout_11.addWidget(self.b_saveFdata)
self.b_saveexperiment = QtWidgets.QPushButton(self.groupBox_8)
self.b_saveexperiment.setObjectName("b_saveexperiment")
self.horizontalLayout_11.addWidget(self.b_saveexperiment)
self.horizontalLayout_9.addWidget(self.groupBox_8)
self.verticalLayout_9.addLayout(self.horizontalLayout_9)
spacerItem1 = QtWidgets.QSpacerItem(20, 10, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
self.verticalLayout_9.addItem(spacerItem1)
self.splitter = QtWidgets.QSplitter(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.splitter.sizePolicy().hasHeightForWidth())
self.splitter.setSizePolicy(sizePolicy)
self.splitter.setOrientation(QtCore.Qt.Horizontal)
self.splitter.setObjectName("splitter")
self.layoutWidget = QtWidgets.QWidget(self.splitter)
self.layoutWidget.setObjectName("layoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.layoutWidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.g_fz_all = PlotWidget(self.layoutWidget)
self.g_fz_all.setObjectName("g_fz_all")
self.verticalLayout.addWidget(self.g_fz_all)
self.g_fz_single = PlotWidget(self.layoutWidget)
self.g_fz_single.setObjectName("g_fz_single")
self.verticalLayout.addWidget(self.g_fz_single)
self.layoutWidget1 = QtWidgets.QWidget(self.splitter)
self.layoutWidget1.setObjectName("layoutWidget1")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.layoutWidget1)
self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.g_fizi_all = PlotWidget(self.layoutWidget1)
self.g_fizi_all.setObjectName("g_fizi_all")
self.verticalLayout_2.addWidget(self.g_fizi_all)
self.g_fizi_single = PlotWidget(self.layoutWidget1)
self.g_fizi_single.setEnabled(True)
self.g_fizi_single.setInteractive(True)
self.g_fizi_single.setObjectName("g_fizi_single")
self.verticalLayout_2.addWidget(self.g_fizi_single)
self.layoutWidget2 = QtWidgets.QWidget(self.splitter)
self.layoutWidget2.setObjectName("layoutWidget2")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.layoutWidget2)
self.verticalLayout_3.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.label_13 = QtWidgets.QLabel(self.layoutWidget2)
self.label_13.setObjectName("label_13")
self.horizontalLayout_3.addWidget(self.label_13)
self.es_interpolate = QtWidgets.QCheckBox(self.layoutWidget2)
self.es_interpolate.setText("")
self.es_interpolate.setChecked(True)
self.es_interpolate.setObjectName("es_interpolate")
self.horizontalLayout_3.addWidget(self.es_interpolate)
self.horizontalLayout_4.addLayout(self.horizontalLayout_3)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.label_8 = QtWidgets.QLabel(self.layoutWidget2)
self.label_8.setObjectName("label_8")
self.horizontalLayout_2.addWidget(self.label_8)
self.es_win = QtWidgets.QSpinBox(self.layoutWidget2)
self.es_win.setMinimum(3)
self.es_win.setMaximum(9999)
self.es_win.setProperty("value", 21)
self.es_win.setObjectName("es_win")
self.horizontalLayout_2.addWidget(self.es_win)
self.horizontalLayout_4.addLayout(self.horizontalLayout_2)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.label_21 = QtWidgets.QLabel(self.layoutWidget2)
self.label_21.setObjectName("label_21")
self.horizontalLayout.addWidget(self.label_21)
self.es_order = QtWidgets.QSpinBox(self.layoutWidget2)
self.es_order.setMinimum(1)
self.es_order.setMaximum(9)
self.es_order.setProperty("value", 3)
self.es_order.setObjectName("es_order")
self.horizontalLayout.addWidget(self.es_order)
self.horizontalLayout_4.addLayout(self.horizontalLayout)
self.verticalLayout_3.addLayout(self.horizontalLayout_4)
self.g_eze_all = PlotWidget(self.layoutWidget2)
self.g_eze_all.setEnabled(True)
font = QtGui.QFont()
font.setBold(True)
self.g_eze_all.setFont(font)
self.g_eze_all.setAcceptDrops(True)
self.g_eze_all.setInteractive(True)
self.g_eze_all.setObjectName("g_eze_all")
self.verticalLayout_3.addWidget(self.g_eze_all)
self.g_eze_single = PlotWidget(self.layoutWidget2)
self.g_eze_single.setEnabled(True)
self.g_eze_single.setObjectName("g_eze_single")
self.verticalLayout_3.addWidget(self.g_eze_single)
self.layoutWidget3 = QtWidgets.QWidget(self.splitter)
self.layoutWidget3.setObjectName("layoutWidget3")
self.verticalLayout_8 = QtWidgets.QVBoxLayout(self.layoutWidget3)
self.verticalLayout_8.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_8.setObjectName("verticalLayout_8")
self.g_scatter1 = PlotWidget(self.layoutWidget3)
self.g_scatter1.setEnabled(True)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.g_scatter1.sizePolicy().hasHeightForWidth())
self.g_scatter1.setSizePolicy(sizePolicy)
self.g_scatter1.setLineWidth(0)
self.g_scatter1.setMidLineWidth(0)
self.g_scatter1.setAlignment(QtCore.Qt.AlignCenter)
self.g_scatter1.setObjectName("g_scatter1")
self.verticalLayout_8.addWidget(self.g_scatter1)
self.g_scatter2 = PlotWidget(self.layoutWidget3)
self.g_scatter2.setEnabled(True)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.g_scatter2.sizePolicy().hasHeightForWidth())
self.g_scatter2.setSizePolicy(sizePolicy)
self.g_scatter2.setLineWidth(0)
self.g_scatter2.setMidLineWidth(0)
self.g_scatter2.setAlignment(QtCore.Qt.AlignCenter)
self.g_scatter2.setObjectName("g_scatter2")
self.verticalLayout_8.addWidget(self.g_scatter2)
self.verticalLayout_9.addWidget(self.splitter)
self.verticalLayout_13.addLayout(self.verticalLayout_9)
self.horizontalLayout_8 = QtWidgets.QHBoxLayout()
self.horizontalLayout_8.setObjectName("horizontalLayout_8")
self.horizontalLayout_5 = QtWidgets.QHBoxLayout()
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.groupBox_5 = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox_5.setObjectName("groupBox_5")
self.verticalLayout_5 = QtWidgets.QVBoxLayout(self.groupBox_5)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.sel_filter = QtWidgets.QComboBox(self.groupBox_5)
self.sel_filter.setLayoutDirection(QtCore.Qt.LeftToRight)
self.sel_filter.setAutoFillBackground(False)
self.sel_filter.setFrame(True)
self.sel_filter.setObjectName("sel_filter")
self.sel_filter.addItem("")
self.verticalLayout_5.addWidget(self.sel_filter)
self.tabfilters = QtWidgets.QTabWidget(self.groupBox_5)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.tabfilters.sizePolicy().hasHeightForWidth())
self.tabfilters.setSizePolicy(sizePolicy)
self.tabfilters.setTabPosition(QtWidgets.QTabWidget.North)
self.tabfilters.setUsesScrollButtons(True)
self.tabfilters.setDocumentMode(False)
self.tabfilters.setTabsClosable(True)
self.tabfilters.setMovable(True)
self.tabfilters.setTabBarAutoHide(False)
self.tabfilters.setObjectName("tabfilters")
self.verticalLayout_5.addWidget(self.tabfilters)
self.horizontalLayout_5.addWidget(self.groupBox_5)
self.boxCP = QtWidgets.QGroupBox(self.centralwidget)
self.boxCP.setObjectName("boxCP")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.boxCP)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.sel_cp = QtWidgets.QComboBox(self.boxCP)
self.sel_cp.setObjectName("sel_cp")
self.sel_cp.addItem("")
self.verticalLayout_4.addWidget(self.sel_cp)
self.box_cp = QtWidgets.QGroupBox(self.boxCP)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.box_cp.sizePolicy().hasHeightForWidth())
self.box_cp.setSizePolicy(sizePolicy)
self.box_cp.setObjectName("box_cp")
self.verticalLayout_4.addWidget(self.box_cp)
self.setZeroForce = QtWidgets.QCheckBox(self.boxCP)
self.setZeroForce.setChecked(True)
self.setZeroForce.setObjectName("setZeroForce")
self.verticalLayout_4.addWidget(self.setZeroForce)
self.horizontalLayout_5.addWidget(self.boxCP)
self.horizontalLayout_8.addLayout(self.horizontalLayout_5)
self.horizontalLayout_6 = QtWidgets.QHBoxLayout()
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.groupBox_10 = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox_10.setObjectName("groupBox_10")
self.verticalLayout_6 = QtWidgets.QVBoxLayout(self.groupBox_10)
self.verticalLayout_6.setObjectName("verticalLayout_6")
self.sel_fmodel = QtWidgets.QComboBox(self.groupBox_10)
self.sel_fmodel.setObjectName("sel_fmodel")
self.sel_fmodel.addItem("")
self.verticalLayout_6.addWidget(self.sel_fmodel)
self.box_fmodel = QtWidgets.QGroupBox(self.groupBox_10)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.box_fmodel.sizePolicy().hasHeightForWidth())
self.box_fmodel.setSizePolicy(sizePolicy)
self.box_fmodel.setObjectName("box_fmodel")
self.verticalLayout_6.addWidget(self.box_fmodel)
self.formLayout = QtWidgets.QFormLayout()
self.formLayout.setObjectName("formLayout")
self.label_9 = QtWidgets.QLabel(self.groupBox_10)
self.label_9.setObjectName("label_9")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.label_9)
self.zi_min = QtWidgets.QSpinBox(self.groupBox_10)
self.zi_min.setMinimum(0)
self.zi_min.setMaximum(9999)
self.zi_min.setProperty("value", 0)
self.zi_min.setObjectName("zi_min")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.zi_min)
self.label_11 = QtWidgets.QLabel(self.groupBox_10)
self.label_11.setObjectName("label_11")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label_11)
self.zi_max = QtWidgets.QSpinBox(self.groupBox_10)
self.zi_max.setMinimum(0)
self.zi_max.setMaximum(9999)
self.zi_max.setProperty("value", 800)
self.zi_max.setObjectName("zi_max")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.zi_max)
self.verticalLayout_6.addLayout(self.formLayout)
self.horizontalLayout_6.addWidget(self.groupBox_10)
self.groupBox_11 = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox_11.setObjectName("groupBox_11")
self.verticalLayout_7 = QtWidgets.QVBoxLayout(self.groupBox_11)
self.verticalLayout_7.setObjectName("verticalLayout_7")
self.sel_emodel = QtWidgets.QComboBox(self.groupBox_11)
self.sel_emodel.setObjectName("sel_emodel")
self.sel_emodel.addItem("")
self.verticalLayout_7.addWidget(self.sel_emodel)
self.box_emodel = QtWidgets.QGroupBox(self.groupBox_11)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.box_emodel.sizePolicy().hasHeightForWidth())
self.box_emodel.setSizePolicy(sizePolicy)
self.box_emodel.setObjectName("box_emodel")
self.verticalLayout_7.addWidget(self.box_emodel)
self.formLayout_3 = QtWidgets.QFormLayout()
self.formLayout_3.setObjectName("formLayout_3")
self.label_12 = QtWidgets.QLabel(self.groupBox_11)
self.label_12.setObjectName("label_12")
self.formLayout_3.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.label_12)
self.ze_min = QtWidgets.QSpinBox(self.groupBox_11)
self.ze_min.setMinimum(0)
self.ze_min.setMaximum(9999)
self.ze_min.setProperty("value", 0)
self.ze_min.setObjectName("ze_min")
self.formLayout_3.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.ze_min)
self.label_14 = QtWidgets.QLabel(self.groupBox_11)
self.label_14.setObjectName("label_14")
self.formLayout_3.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label_14)
self.ze_max = QtWidgets.QSpinBox(self.groupBox_11)
self.ze_max.setMinimum(0)
self.ze_max.setMaximum(9999)
self.ze_max.setProperty("value", 800)
self.ze_max.setObjectName("ze_max")
self.formLayout_3.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.ze_max)
self.verticalLayout_7.addLayout(self.formLayout_3)
self.horizontalLayout_6.addWidget(self.groupBox_11)
self.horizontalLayout_8.addLayout(self.horizontalLayout_6)
self.horizontalLayout_7 = QtWidgets.QHBoxLayout()
self.horizontalLayout_7.setObjectName("horizontalLayout_7")
self.f_params = QtWidgets.QGroupBox(self.centralwidget)
self.f_params.setObjectName("f_params")
self.horizontalLayout_7.addWidget(self.f_params)
self.e_params = QtWidgets.QGroupBox(self.centralwidget)
self.e_params.setObjectName("e_params")
self.horizontalLayout_7.addWidget(self.e_params)
self.horizontalLayout_8.addLayout(self.horizontalLayout_7)
self.groupBox_9 = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox_9.setObjectName("groupBox_9")
self.verticalLayout_12 = QtWidgets.QVBoxLayout(self.groupBox_9)
self.verticalLayout_12.setObjectName("verticalLayout_12")
self.label = QtWidgets.QLabel(self.groupBox_9)
self.label.setObjectName("label")
self.verticalLayout_12.addWidget(self.label)
self.slid_cv = QtWidgets.QSlider(self.groupBox_9)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.slid_cv.sizePolicy().hasHeightForWidth())
self.slid_cv.setSizePolicy(sizePolicy)
self.slid_cv.setOrientation(QtCore.Qt.Horizontal)
self.slid_cv.setObjectName("slid_cv")
self.verticalLayout_12.addWidget(self.slid_cv)
self.label_2 = QtWidgets.QLabel(self.groupBox_9)
self.label_2.setObjectName("label_2")
self.verticalLayout_12.addWidget(self.label_2)
self.slid_alpha = QtWidgets.QSlider(self.groupBox_9)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.slid_alpha.sizePolicy().hasHeightForWidth())
self.slid_alpha.setSizePolicy(sizePolicy)
self.slid_alpha.setMaximum(255)
self.slid_alpha.setSingleStep(1)
self.slid_alpha.setProperty("value", 100)
self.slid_alpha.setOrientation(QtCore.Qt.Horizontal)
self.slid_alpha.setObjectName("slid_alpha")
self.verticalLayout_12.addWidget(self.slid_alpha)
self.horizontalLayout_8.addWidget(self.groupBox_9)
self.verticalLayout_13.addLayout(self.horizontalLayout_8)
self.verticalLayout_14.addLayout(self.verticalLayout_13)
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
self.tabfilters.setCurrentIndex(-1)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "SoftMech2021"))
self.groupBox_7.setTitle(_translate("MainWindow", "Load"))
self.consolle.setText(_translate("MainWindow", "Consolle"))
self.b_protocol.setText(_translate("MainWindow", "Load protocol"))
self.b_load.setText(_translate("MainWindow", "Load experiment"))
self.groupBox_8.setTitle(_translate("MainWindow", "Save"))
self.b_saveprotocol.setText(_translate("MainWindow", "Save protocol"))
self.b_saveEdata.setText(_translate("MainWindow", "Save Elastography analysis"))
self.b_saveFdata.setText(_translate("MainWindow", "Save Indentation Analysis"))
self.b_saveexperiment.setText(_translate("MainWindow", "Save experiment"))
self.label_13.setText(_translate("MainWindow", "Interpolate"))
self.label_8.setText(_translate("MainWindow", "Window"))
self.label_21.setText(_translate("MainWindow", "Order"))
self.groupBox_5.setTitle(_translate("MainWindow", "Filters"))
self.sel_filter.setItemText(0, _translate("MainWindow", "-- add --"))
self.boxCP.setTitle(_translate("MainWindow", "Contact Point "))
self.sel_cp.setItemText(0, _translate("MainWindow", "-- none --"))
self.setZeroForce.setText(_translate("MainWindow", "Set CP force to 0"))
self.groupBox_10.setTitle(_translate("MainWindow", "Force-ind model"))
self.sel_fmodel.setItemText(0, _translate("MainWindow", "-- none --"))
self.label_9.setText(_translate("MainWindow", "Min ind [nm]"))
self.label_11.setText(_translate("MainWindow", "Max ind [nm]"))
self.groupBox_11.setTitle(_translate("MainWindow", "Elasticity Spectra model"))
self.sel_emodel.setItemText(0, _translate("MainWindow", "-- none --"))
self.label_12.setText(_translate("MainWindow", "Min ind [nm]"))
self.label_14.setText(_translate("MainWindow", "Max ind [nm]"))
self.f_params.setTitle(_translate("MainWindow", "Force-ind model params"))
self.e_params.setTitle(_translate("MainWindow", "Elasticity Spectra model params"))
self.groupBox_9.setTitle(_translate("MainWindow", "Sliders"))
self.label.setText(_translate("MainWindow", "Slide through curves"))
self.label_2.setText(_translate("MainWindow", "Trasnparency "))
from pyqtgraph import PlotWidget
|
from django.shortcuts import render, render_to_response
from blog.models import Author, Book
# Create your views here.
def show_author(request):
authors = Author.objects.all()
return render_to_response('show_author.html', {'authors': authors})
def show_book(request):
books = Book.objects.all()
return render_to_response('show_book.html', {'books': books})
|
import numpy as np
from scipy.optimize import linear_sum_assignment
from scipy.stats import kendalltau
import torch
is_cuda = torch.cuda.is_available()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def sinkhorn_unrolled(c, a, b, num_sink, lambd_sink):
"""
An implementation of a Sinkhorn layer with Automatic Differentiation (AD).
The format of input parameters and outputs is equivalent to the 'Sinkhorn' module below.
"""
log_p = -c / lambd_sink
log_a = torch.log(a).unsqueeze(dim=-1)
log_b = torch.log(b).unsqueeze(dim=-2)
for _ in range(num_sink):
log_p = log_p - (torch.logsumexp(log_p, dim=-2, keepdim=True) - log_b)
log_p = log_p - (torch.logsumexp(log_p, dim=-1, keepdim=True) - log_a)
p = torch.exp(log_p)
return p
class Sinkhorn(torch.autograd.Function):
"""
An implementation of a Sinkhorn layer with our custom backward module, based on implicit differentiation
:param c: input cost matrix, size [*,m,n], where * are arbitrarily many batch dimensions
:param a: first input marginal, size [*,m]
:param b: second input marginal, size [*,n]
:param num_sink: number of Sinkhorn iterations
:param lambd_sink: entropy regularization weight
:return: optimized soft permutation matrix
"""
@staticmethod
def forward(ctx, c, a, b, num_sink, lambd_sink):
log_p = -c / lambd_sink
log_a = torch.log(a).unsqueeze(dim=-1)
log_b = torch.log(b).unsqueeze(dim=-2)
for _ in range(num_sink):
log_p -= (torch.logsumexp(log_p, dim=-2, keepdim=True) - log_b)
log_p -= (torch.logsumexp(log_p, dim=-1, keepdim=True) - log_a)
p = torch.exp(log_p)
ctx.save_for_backward(p, torch.sum(p, dim=-1), torch.sum(p, dim=-2))
ctx.lambd_sink = lambd_sink
return p
@staticmethod
def backward(ctx, grad_p):
p, a, b = ctx.saved_tensors
m, n = p.shape[-2:]
batch_shape = list(p.shape[:-2])
grad_p *= -1 / ctx.lambd_sink * p
K = torch.cat((torch.cat((torch.diag_embed(a), p), dim=-1),
torch.cat((p.transpose(-2, -1), torch.diag_embed(b)), dim=-1)), dim=-2)[..., :-1, :-1]
t = torch.cat((grad_p.sum(dim=-1), grad_p[..., :, :-1].sum(dim=-2)), dim=-1).unsqueeze(-1)
grad_ab, _ = torch.solve(t, K)
grad_a = grad_ab[..., :m, :]
grad_b = torch.cat((grad_ab[..., m:, :], torch.zeros(batch_shape + [1, 1], device=device, dtype=torch.float32)), dim=-2)
U = grad_a + grad_b.transpose(-2, -1)
grad_p -= p * U
grad_a = -ctx.lambd_sink * grad_a.squeeze(dim=-1)
grad_b = -ctx.lambd_sink * grad_b.squeeze(dim=-1)
return grad_p, grad_a, grad_b, None, None, None
def to_var(x):
if is_cuda:
x = x.cuda()
return x
def my_sample_gumbel(shape, eps=1e-20):
"""Samples arbitrary-shaped standard gumbel variables.
Args:
shape: list of integers
eps: float, for numerical stability
Returns:
A sample of standard Gumbel random variables
"""
#Sample from Gumbel(0, 1)
U = torch.rand(shape).float()
return -torch.log(eps - torch.log(U + eps))
def simple_sinkhorn(MatrixA, n_iter = 20):
#performing simple Sinkhorn iterations.
for i in range(n_iter):
MatrixA /= MatrixA.sum(dim=1, keepdim=True)
MatrixA /= MatrixA.sum(dim=2, keepdim=True)
return MatrixA
def sinkhorn(log_alpha, n_iters = 20):
# torch version
"""Performs incomplete Sinkhorn normalization to log_alpha.
By a theorem by Sinkhorn and Knopp [1], a sufficiently well-behaved matrix
with positive entries can be turned into a doubly-stochastic matrix
(i.e. its rows and columns add up to one) via the successive row and column
normalization.
-To ensure positivity, the effective input to sinkhorn has to be
exp(log_alpha) (element wise).
-However, for stability, sinkhorn works in the log-space. It is only at
return time that entries are exponentiated.
[1] Sinkhorn, Richard and Knopp, Paul.
Concerning nonnegative matrices and doubly stochastic
matrices. Pacific Journal of Mathematics, 1967
Args:
log_alpha: a 2D tensor of shape [N, N]
n_iters: number of sinkhorn iterations (in practice, as little as 20
iterations are needed to achieve decent convergence for N~100)
Returns:
A 3D tensor of close-to-doubly-stochastic matrices (2D tensors are
converted to 3D tensors with batch_size equals to 1)
"""
n = log_alpha.size()[1]
log_alpha = log_alpha.view(-1, n, n)
for i in range(n_iters):
log_alpha = log_alpha - (torch.logsumexp(log_alpha, dim=2, keepdim=True)).view(-1, n, 1)
log_alpha = log_alpha - (torch.logsumexp(log_alpha, dim=1, keepdim=True)).view(-1, 1, n)
return torch.exp(log_alpha)
def gumbel_sinkhorn(log_alpha, temp=1.0, n_samples=1, noise_factor=1.0, n_iters=20, squeeze=True):
"""Random doubly-stochastic matrices via gumbel noise.
In the zero-temperature limit sinkhorn(log_alpha/temp) approaches
a permutation matrix. Therefore, for low temperatures this method can be
seen as an approximate sampling of permutation matrices, where the
distribution is parameterized by the matrix log_alpha
The deterministic case (noise_factor=0) is also interesting: it can be
shown that lim t->0 sinkhorn(log_alpha/t) = M, where M is a
permutation matrix, the solution of the
matching problem M=arg max_M sum_i,j log_alpha_i,j M_i,j.
Therefore, the deterministic limit case of gumbel_sinkhorn can be seen
as approximate solving of a matching problem, otherwise solved via the
Hungarian algorithm.
Warning: the convergence holds true in the limit case n_iters = infty.
Unfortunately, in practice n_iter is finite which can lead to numerical
instabilities, mostly if temp is very low. Those manifest as
pseudo-convergence or some row-columns to fractional entries (e.g.
a row having two entries with 0.5, instead of a single 1.0)
To minimize those effects, try increasing n_iter for decreased temp.
On the other hand, too-low temperature usually lead to high-variance in
gradients, so better not choose too low temperatures.
Args:
log_alpha: 2D tensor (a matrix of shape [N, N])
or 3D tensor (a batch of matrices of shape = [batch_size, N, N])
temp: temperature parameter, a float.
n_samples: number of samples
noise_factor: scaling factor for the gumbel samples. Mostly to explore
different degrees of randomness (and the absence of randomness, with
noise_factor=0)
n_iters: number of sinkhorn iterations. Should be chosen carefully, in
inverse correspondence with temp to avoid numerical instabilities.
squeeze: a boolean, if True and there is a single sample, the output will
remain being a 3D tensor.
Returns:
sink: a 4D tensor of [batch_size, n_samples, N, N] i.e.
batch_size *n_samples doubly-stochastic matrices. If n_samples = 1 and
squeeze = True then the output is 3D.
log_alpha_w_noise: a 4D tensor of [batch_size, n_samples, N, N] of
noisy samples of log_alpha, divided by the temperature parameter. Ifmy_invert_listperm
n_samples = 1 then the output is 3D.
"""
n = log_alpha.size()[1]
log_alpha = log_alpha.view(-1, n, n)
batch_size = log_alpha.size()[0]
log_alpha_w_noise = log_alpha.repeat(n_samples, 1, 1)
if noise_factor == 0:
noise = 0.0
else:
noise = to_var(my_sample_gumbel([n_samples*batch_size, n, n])*noise_factor)
log_alpha_w_noise = log_alpha_w_noise + noise
log_alpha_w_noise = log_alpha_w_noise / temp
my_log_alpha_w_noise = log_alpha_w_noise.clone()
sink = sinkhorn(my_log_alpha_w_noise)
if n_samples > 1 or squeeze is False:
sink = sink.view(n_samples, batch_size, n, n)
sink = torch.transpose(sink, 1, 0)
log_alpha_w_noise = log_alpha_w_noise.view(n_samples, batch_size, n, n)
log_alpha_w_noise = torch.transpose(log_alpha_w_noise, 1, 0)
return sink, log_alpha_w_noise
def sample_uniform_and_order(n_lists, n_numbers, prob_inc):
"""Samples uniform random numbers, return sorted lists and the indices of their original values
Returns a 2-D tensor of n_lists lists of n_numbers sorted numbers in the [0,1]
interval, each of them having n_numbers elements.
Lists are increasing with probability prob_inc.
It does so by first sampling uniform random numbers, and then sorting them.
Therefore, sorted numbers follow the distribution of the order statistics of
a uniform distribution.
It also returns the random numbers and the lists of permutations p such
p(sorted) = random.
Notice that if one ones to build sorted numbers in different intervals, one
might just want to re-scaled this canonical form.
Args:
n_lists: An int,the number of lists to be sorted.
n_numbers: An int, the number of elements in the permutation.
prob_inc: A float, the probability that a list of numbers will be sorted in
increasing order.
Returns:
ordered: a 2-D float tensor with shape = [n_list, n_numbers] of sorted lists
of numbers.
random: a 2-D float tensor with shape = [n_list, n_numbers] of uniform random
numbers.
permutations: a 2-D int tensor with shape = [n_list, n_numbers], row i
satisfies ordered[i, permutations[i]) = random[i,:].
"""
# sample n_lists samples from Bernoulli with probability of prob_inc
my_bern = torch.distributions.Bernoulli(torch.tensor([prob_inc])).sample([n_lists])
sign = -1*((my_bern * 2) -torch.ones([n_lists,1]))
sign = sign.type(torch.float32)
random =(torch.empty(n_lists, n_numbers).uniform_(0, 1))
random =random.type(torch.float32)
# my change
#random_with_sign = random * sign
#Finds sorted values and indices of the k largest entries for the last dimension.
#sorted – controls whether to return the elements in sorted order
#ordered, permutations = torch.topk(random_with_sign, k = n_numbers, sorted = True)
# my change
ordered, permutations = torch.sort(random, descending=True)
#my change
#ordered = ordered * sign
return (ordered, random, permutations)
def sample_permutations(n_permutations, n_objects):
"""Samples a batch permutations from the uniform distribution.
Returns a sample of n_permutations permutations of n_objects indices.
Permutations are assumed to be represented as lists of integers
(see 'listperm2matperm' and 'matperm2listperm' for conversion to alternative
matricial representation). It does so by sampling from a continuous
distribution and then ranking the elements. By symmetry, the resulting
distribution over permutations must be uniform.
Args:
n_permutations: An int, the number of permutations to sample.
n_objects: An int, the number of elements in the permutation.
the embedding sources.
Returns:
A 2D integer tensor with shape [n_permutations, n_objects], where each
row is a permutation of range(n_objects)
"""
random_pre_perm = torch.empty(n_permutations, n_objects).uniform_(0, 1)
_, permutations = torch.topk(random_pre_perm, k = n_objects)
return permutations
def permute_batch_split(batch_split, permutations):
"""Scrambles a batch of objects according to permutations.
It takes a 3D tensor [batch_size, n_objects, object_size]
and permutes items in axis=1 according to the 2D integer tensor
permutations, (with shape [batch_size, n_objects]) a list of permutations
expressed as lists. For many dimensional-objects (e.g. images), objects have
to be flattened so they will respect the 3D format, i.e. tf.reshape(
batch_split, [batch_size, n_objects, -1])
Args:
batch_split: 3D tensor with shape = [batch_size, n_objects, object_size] of
splitted objects
permutations: a 2D integer tensor with shape = [batch_size, n_objects] of
permutations, so that permutations[n] is a permutation of range(n_objects)
Returns:
A 3D tensor perm_batch_split with the same shape as batch_split,
so that perm_batch_split[n, j,:] = batch_split[n, perm[n,j],:]
"""
batch_size= permutations.size()[0]
n_objects = permutations.size()[1]
permutations = permutations.view(batch_size, n_objects, -1)
perm_batch_split = torch.gather(batch_split, 1, permutations)
return perm_batch_split
def listperm2matperm(listperm):
"""Converts a batch of permutations to its matricial form.
Args:
listperm: 2D tensor of permutations of shape [batch_size, n_objects] so that
listperm[n] is a permutation of range(n_objects).
Returns:
a 3D tensor of permutations matperm of
shape = [batch_size, n_objects, n_objects] so that matperm[n, :, :] is a
permutation of the identity matrix, with matperm[n, i, listperm[n,i]] = 1
"""
n_objects = listperm.size()[1]
eye = np.eye(n_objects)[listperm]
eye= torch.tensor(eye, dtype=torch.int32)
return eye
def matperm2listperm(matperm):
"""Converts a batch of permutations to its enumeration (list) form.
Args:
matperm: a 3D tensor of permutations of
shape = [batch_size, n_objects, n_objects] so that matperm[n, :, :] is a
permutation of the identity matrix. If the input is 2D, it is reshaped
to 3D with batch_size = 1.
dtype: output_type (int32, int64)
Returns:
A 2D tensor of permutations listperm, where listperm[n,i]
is the index of the only non-zero entry in matperm[n, i, :]
"""
batch_size = matperm.size()[0]
n_objects = matperm.size()[1]
matperm = matperm.view(-1, n_objects, n_objects)
#argmax is the index location of each maximum value found(argmax)
_, argmax = torch.max(matperm, dim=2, keepdim= True)
argmax = argmax.view(batch_size, n_objects)
return argmax
def invert_listperm(listperm):
"""Inverts a batch of permutations.
Args:
listperm: a 2D integer tensor of permutations listperm of
shape = [batch_size, n_objects] so that listperm[n] is a permutation of
range(n_objects)
Returns:
A 2D tensor of permutations listperm, where listperm[n,i]
is the index of the only non-zero entry in matperm[n, i, :]
"""
return matperm2listperm(torch.transpose(listperm2matperm(listperm), 1, 2))
def matching(matrix_batch):
"""Solves a matching problem for a batch of matrices.
This is a wrapper for the scipy.optimize.linear_sum_assignment function. It
solves the optimization problem max_P sum_i,j M_i,j P_i,j with P a
permutation matrix. Notice the negative sign; the reason, the original
function solves a minimization problem
Args:
matrix_batch: A 3D tensor (a batch of matrices) with
shape = [batch_size, N, N]. If 2D, the input is reshaped to 3D with
batch_size = 1.
Returns:
listperms, a 2D integer tensor of permutations with shape [batch_size, N]
so that listperms[n, :] is the permutation of range(N) that solves the
problem max_P sum_i,j M_i,j P_i,j with M = matrix_batch[n, :, :].
"""
def hungarian(x):
if x.ndim == 2:
x = np.reshape(x, [1, x.shape[0], x.shape[1]])
sol = np.zeros((x.shape[0], x.shape[1]), dtype=np.int32)
for i in range(x.shape[0]):
sol[i, :] = linear_sum_assignment(-x[i, :])[1].astype(np.int32)
return sol
listperms = hungarian(matrix_batch.detach().cpu().numpy())
listperms = torch.from_numpy(listperms)
return listperms
def kendall_tau(batch_perm1, batch_perm2):
"""Wraps scipy.stats kendalltau function.
Args:
batch_perm1: A 2D tensor (a batch of matrices) with
shape = [batch_size, N]
batch_perm2: same as batch_perm1
Returns:
A list of Kendall distances between each of the elements of the batch.
"""
def kendalltau_batch(x, y):
if x.ndim == 1:
x = np.reshape(x, [1, x.shape[0]])
if y.ndim == 1:
y = np.reshape(y, [1, y.shape[0]])
kendall = np.zeros((x.shape[0], 1), dtype=np.float32)
for i in range(x.shape[0]):
kendall[i, :] = kendalltau(x[i, :], y[i, :])[0]
return kendall
listkendall = kendalltau_batch(batch_perm1.cpu().numpy(), batch_perm2.cpu().numpy())
listkendall = torch.from_numpy(listkendall)
return listkendall
|
from .ReportDaily import *
# Find all forks in organizations that have a parent in an organization.
# Organizational repositories should have only one source of truth on
# the entire instance.
#
# c.f. https://medium.com/@larsxschneider/talk-dont-fork-743a1253b8d5
class ReportForksToOrgs(ReportDaily):
def name(self):
return "forks-to-organizations"
def updateDailyData(self):
self.detailedHeader, self.detailedData = self.parseData(self.executeQuery(self.query()))
if len(self.data) == 0:
self.header = ["date", "forks to organizations"]
self.data.append([str(self.yesterday()), len(self.detailedData)])
self.truncateData(self.timeRangeTotal())
self.sortDataByDate()
# Collects the number of forks in organizations
def query(self):
query = '''
SELECT
CONCAT(orgs.login, "/", repos.name) AS fork,
CAST(repos.created_at AS date) AS "creation date"
FROM
users AS orgs,
repositories AS repos,
users AS parentOrgs,
repositories AS parentRepos
WHERE
orgs.type = "organization" AND
repos.owner_id = orgs.id AND
parentOrgs.type = "organization" AND
parentRepos.owner_id = parentOrgs.id AND
parentRepos.id = repos.parent_id
''' + self.andExcludedEntities("orgs.login") \
+ self.andExcludedEntities("repos.name") \
+ self.andExcludedEntities("parentOrgs.login") \
+ self.andExcludedEntities("parentRepos.name") + '''
ORDER BY
repos.created_at DESC
'''
return query
|
#! /usr/bin/env python3
import os
import time
codes = {
'power': '16658433',
'mode_plus': '16658437',
'mode_minus': '16658443',
'speed_plus': '16658441',
'speed_minus': '16658439',
'demo': '16658440',
'color_plus': '16658442',
'color_minus': '16658445',
'bright_plus': '16658444',
'bright_minus': '16658447',
'white': '16658446',
'red': '16658448',
'green': '16658449',
'purple': '16658450',
'yellow': '16658451',
'blue': '16658452',
'pink': '16658453'
}
def remote_code(code):
os.system('/usr/bin/codesend {}'.format(code))
time.sleep(.25)
while True:
remote_code(codes["power"])
remote_code(codes["red"])
remote_code(codes["yellow"])
remote_code(codes["green"])
remote_code(codes["blue"])
remote_code(codes["purple"])
remote_code(codes["pink"])
remote_code(codes["white"])
|
# Number of Recent Calls: https://leetcode.com/problems/number-of-recent-calls/
# You have a RecentCounter class which counts the number of recent requests within a certain time frame.
# Implement the RecentCounter class:
# RecentCounter() Initializes the counter with zero recent requests.
# int ping(int t) Adds a new request at time t, where t represents some time in milliseconds, and returns the number of requests that has happened in the past 3000 milliseconds (including the new request). Specifically, return the number of requests that have happened in the inclusive range [t - 3000, t].
# It is guaranteed that every call to ping uses a strictly larger value of t than the previous call.
# There is a simple way of solving this problem if you use a double ended queue you can simple push
# any values on and then pop off anything that is below t - 3000 then return the length
from collections import deque
class RecentCounter:
def __init__(self):
self.q = deque()
return
def ping(self, t: int) -> int:
if t > 3000:
while len(self.q) > 0 and self.q[-1] < (t - 3000):
self.q.pop()
self.q.appendleft(t)
return len(self.q)
# Your RecentCounter object will be instantiated and called as such:
# obj = RecentCounter()
# param_1 = obj.ping(t)
# This problem is really simple and is just a basic deque usage honestly it is kind of like a window solution as well
# just using a ping instead of an array of nums this wil run in o(1) time and space as the t doesn't affect
# what is being added or removed we have an arbitrary number of values to remove
# To improve this code we could try to use an array and the bs throught he numbers since time is always sorted
# but we would run into space limitation constantly moving all the numbers in the array
# Score Card
# Did I need hints? N
# Did you finish within 30 min? 10
# Was the solution optimal? This is optimal
# Were there any bugs? No
# 5 5 5 5 = 5
|
"""
Author: Andreas Finkler
Created: 23.12.2020
Statistics for qualifying times.
"""
from operator import attrgetter
import numpy as np
from granturismo_stats.entities.ranking import Leaderboard
class QualifyingTimes:
"""
Statistics of the qualifying results for a given race.
"""
def __init__(self, leaderboard: Leaderboard):
self.leaderboard = leaderboard
@property
def scores(self):
return [entry.score for entry in self.leaderboard]
@property
def fastest(self):
return min(self.leaderboard, key=attrgetter("score"))
@property
def slowest(self):
return max(self.leaderboard, key=attrgetter("score"))
@property
def median(self):
return np.median(self.scores)
@property
def mean(self):
return np.mean(self.scores)
@property
def standard_deviation(self):
return np.std(self.scores)
def percentile(self, percent):
return np.percentile(self.scores, percent)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
nwid.widget
~~~~~~~~~~~
This module contains nwid widget objects and data structures.
"""
from __future__ import absolute_import
from .base import BaseWidget
|
#required !pip install googleads -q
import pandas as pd
import numpy as np
from google.ads.googleads.client import GoogleAdsClient
from google.ads.googleads.errors import GoogleAdsException
from google.protobuf.json_format import MessageToDict
import _locale
"""
MANDATORY INPUT:
start_date, end_date as string "yyyy-mm-dd"
OPTIONAL INPUTS:
- yaml_file_path: the path of 'googleads.yaml'.
Optional, but mandatory to have it in same folder script if used the default parameter.
"""
def get_google_ads(start_date, end_date, admin_account,
## optional
yaml_file_path = 'googleads.yaml'
):
_locale._getdefaultlocale = (lambda *args: ['en_US', 'UTF-8'])
googleads_client = GoogleAdsClient.load_from_storage('googleads.yaml')
ga_service = googleads_client.get_service("GoogleAdsService")
query = f"""
SELECT
campaign_budget.amount_micros,
campaign.id,
customer.id,
customer.descriptive_name,
customer.currency_code,
metrics.cost_micros,
segments.month,
segments.date,
campaign.name,
metrics.impressions,
metrics.clicks,
metrics.ctr,
metrics.average_cpc,
metrics.conversions,
metrics.view_through_conversions,
metrics.cost_per_conversion,
campaign.status,
metrics.all_conversions_from_interactions_rate,
metrics.average_cpm
FROM campaign
WHERE
segments.date BETWEEN '{start_date}' AND '{end_date}' """
search_request = googleads_client.get_type("SearchGoogleAdsStreamRequest")
google_ads = pd.DataFrame(columns = ['Customer ID','Brand','Currency','Campaign state','Campaign','Campaign ID','Clicks','View-through conv.','Conversions','Cost','Cost / conv.','CTR','Conv. rate','Avg. CPC','Avg. CPM','Impressions','Budget','Day','Month'])
for key, acc_id in admin_account.items():
stream = ga_service.search(customer_id = acc_id, query=query)
st = next(stream.pages)
dictobj = MessageToDict(st)
#print(dictobj)
if 'results' in dictobj:
df = pd.json_normalize(dictobj,record_path=['results'])
df = df.drop(columns = ['customer.resourceName', 'campaign.resourceName', 'campaignBudget.resourceName'])
df = df.rename(columns = {'customer.id':'Customer ID',
'customer.descriptiveName':'Brand',
'customer.currencyCode':'Currency',
'campaign.status':'Campaign state',
'campaign.name':'Campaign',
'campaign.id':'Campaign ID',
'metrics.clicks':'Clicks',
'metrics.viewThroughConversions':'View-through conv.',
'metrics.conversions':'Conversions',
'metrics.costMicros':'Cost',
'metrics.costPerConversion':'Cost / conv.',
'metrics.ctr':'CTR',
'metrics.allConversionsFromInteractionsRate':'Conv. rate',
'metrics.averageCpc':'Avg. CPC',
'metrics.averageCpm':'Avg. CPM',
'metrics.impressions':'Impressions',
'campaignBudget.amountMicros':'Budget',
'segments.date':'Day',
'segments.month':'Month'} )
#print(df.head())
print(f'Extracted data for {key}')
else:
print(f'No data for {key}')
google_ads = pd.concat([google_ads, df])
google_ads = google_ads[['Month', 'Day', 'Campaign ID', 'Customer ID', 'Campaign',
'Campaign state', 'Budget', 'Currency', 'Clicks', 'Impressions', 'CTR',
'Avg. CPC', 'Cost', 'Conversions', 'View-through conv.', 'Cost / conv.',
'Conv. rate', 'Avg. CPM', 'Brand']].reset_index(drop = True)
to_float = ['Clicks', 'Impressions', 'Cost', 'CTR', 'View-through conv.', 'Conversions', 'Cost / conv.', 'Conv. rate', 'Avg. CPC', 'Avg. CPM']
google_ads['Campaign ID'] = google_ads['Campaign ID'].astype(int)
for c in to_float:
google_ads[c] = google_ads[c].astype(float)
# micro amount 1000000
google_ads['Cost'] = google_ads['Cost']/1000000
google_ads['CTR'] = round(google_ads['CTR']*100, 2).astype(str) + '%'
google_ads['Conv. rate'] = round(google_ads['Conv. rate']*100, 2).astype(str) + '%'
google_ads['Campaign state'] = google_ads['Campaign state'].str.lower()
google_ads['Avg. CPC'] = round(google_ads['Avg. CPC'], 1)
google_ads['Avg. CPM'] = round(google_ads['Avg. CPM'], 1)
google_ads['Cost / conv.'] = round(google_ads['Cost / conv.'], 1)
return google_ads
|
import json
def text_file_to_list(filename):
with open(filename, 'r') as file:
return file.read().splitlines()
def list_to_text_file(filename, string_list):
with open(filename, 'w') as text_file:
for line in string_list:
text_file.write(f'{line}\n')
def json_to_dict(filename):
with open(filename, 'r') as file:
return json.load(file)
def dict_to_json(filename, dictionary):
with open(filename, 'w') as json_file:
json.dump(dictionary, json_file)
|
# Copyright (C) Mesosphere, Inc. See LICENSE file for details.
"""Implement handler for the /uiconfig endpoint.
In a DC/OS cluster it is exposed via /dcos-metadata/ui-config.json. It is there
so that the UI can display a special message to visitors of the login page ("you
will be the superuser!") before the first regular user has been created in the
database.
"""
import logging
import json
from bouncer.app import config
from bouncer.app.models import User, UserType, dbsession
UI_CONFIG_FILEPATH = '/opt/mesosphere/etc/ui-config.json'
CLUSTER_ID_FILEPATH = '/var/lib/dcos/cluster-id'
def get_module_route_handlers():
return {
'/uiconfig': UIConfig,
}
def read_ui_config():
if config['TESTING']:
return {'dummy': 'yes'}
# Expect that this code is integration-tested. Not reached in unit tests.
with open(UI_CONFIG_FILEPATH, 'rb') as f:
return json.loads(f.read().decode('utf-8'))
def read_cluster_id():
if config['TESTING']:
return 'a-dummy-cluster-id'
# Expect that this code is integration-tested. Not reached in unit tests.
with open(CLUSTER_ID_FILEPATH, 'rb') as f:
return f.read().decode('utf-8').strip()
class UIConfig:
def __init__(self):
self.log = logging.getLogger(
'bouncer.app.uiconfig.' + self.__class__.__name__)
def on_get(self, req, resp):
# The legacy code behavior (dcos-oauth) is to emit a 500 Internal Server
# Error when reading the file(s) or when decoding their contents fails,
# and also when the interaction with the database fails.
cluster_id = read_cluster_id()
ui_config = read_ui_config()
is_first_regular_user = dbsession.query(User).filter_by(
utype=UserType.regular).count() == 0
ui_config['clusterConfiguration'] = {
'firstUser': is_first_regular_user,
'id': cluster_id
}
req.context['odata'] = ui_config
|
"""
This is the main program for making the ATM forcing file.
Testing on my mac: need to use this day to find stored files:
run make_forcing_main.py -g cas6 -t v3 -d 2017.04.20
Note: we set rain to zero because its units are uncertain and
we don't currently use it in the simulations.
2021.09.16: I finally added a planB, to test it, first do a forecast:
run make_forcing_main.py -g cas6 -t v3 -d 2017.04.20 -r forecast
and then try to do a forecast for the next day - for which we don't have
WRF files:
run make_forcing_main.py -g cas6 -t v3 -d 2017.04.21 -r forecast
"""
import os; import sys
sys.path.append(os.path.abspath('../'))
import forcing_functions as ffun
Ldir, Lfun = ffun.intro()
# ****************** CASE-SPECIFIC CODE *****************
from datetime import datetime, timedelta
import time
import shutil
import netCDF4 as nc
import numpy as np
import seawater as sw
from scipy.interpolate import griddata
from scipy.spatial import cKDTree
import matplotlib.path as mpath
import zfun
import zrfun
import atm_fun as afun
from importlib import reload
reload(afun)
start_time = datetime.now()
# where are files located, and other situational choices
do_d3 = True; do_d4 = True
if Ldir['lo_env'] == 'pm_mac':
wrf_dir = Ldir['data'] + 'wrf/'
Ldir['run_type'] == 'backfill'
testing = False
#do_d3 = True; do_d4 = False
import matplotlib.pyplot as plt
sys.path.append(os.path.abspath('../../plotting'))
import pfun
else:
wrf_dir = '/data1/darr/wrf_crons/wrfout/'
testing = False
# creat list of hours
if Ldir['run_type'] == 'backfill':
hr_vec = range(0,25)
elif Ldir['run_type'] == 'forecast':
hr_max = Ldir['forecast_days'] * 24
hr_vec = range(0, hr_max + 1)
# Create lists of input files. These will be the full lists
# regardless of whether or not the files exist.
d_str = Ldir['date_string'].replace('.','')
in_dir = wrf_dir + d_str + '00/'
d2_list = []
d3_list = []
d4_list = []
forecast_hour = []
for hr in hr_vec:
hr_str = ('0' + str(hr))[-2:]
d2_list.append(in_dir + 'wrfout.ocean_d2.' + d_str + '00.f' + hr_str + '.0000')
d3_list.append(in_dir + 'wrfout.ocean_d3.' + d_str + '00.f' + hr_str + '.0000')
d4_list.append(in_dir + 'wrfout.ocean_d4.' + d_str + '00.f' + hr_str + '.0000')
# Create dicts that relate a filename to a time index
d2i_dict = {}
for i, v in enumerate(d2_list):
d2i_dict[v] = i
# check for existence, and if any d2 are missing then exit
planB = False
for fn in d2_list:
if not os.path.isfile(fn):
print('** Missing file: ' + fn)
planB = True
break
#sys.exit() # this would be the place to invoke a Plan B
if planB == False:
# for d3 and d4 just make sure we have the first one,
# so that we can get the grid
for fn in [d3_list[0]]:
if not os.path.isfile(fn):
print('** Missing file: ' + fn)
do_d3 = False
for fn in [d4_list[0]]:
if not os.path.isfile(fn):
print('** Missing file: ' + fn)
do_d4 = False
# create vector of time, in model format
dt0 = datetime.strptime(Ldir['date_string'], '%Y.%m.%d')
mod_time_list = []
for hr in hr_vec:
dt = dt0 + timedelta(days=hr/24)
mod_time = Lfun.datetime_to_modtime(dt)
mod_time_list.append(mod_time)
mod_time_vec = np.array(mod_time_list)
# get model grid
gds = nc.Dataset(Ldir['grid'] + 'grid.nc')
lon = gds['lon_rho'][:]
lat = gds['lat_rho'][:]
gds.close()
# get WRF grid(s)
def get_wrf_grid(fn):
wds = nc.Dataset(fn)
lon = wds['XLONG'][:].squeeze()
lat = wds['XLAT'][:].squeeze()
if False:
print('\n** ' + fn.split('/')[-1])
vn_list = []
for vn in wds.variables:
vn_list.append(vn)
print(vn_list)
wds.close()
# grid size info
NR, NC = lon.shape
jj = int(NR/2); ii = int(NC/2)
dx_km, dd_deg = sw.dist(lat[jj,ii], [lon[jj,ii], lon[jj+1,ii+1]])
return lon, lat, dx_km
# Note: lat, lon are only in the first file of the day (hour zero)
lon2, lat2, dx2_km = get_wrf_grid(d2_list[0])
if do_d3:
try:
lon3, lat3, dx3_km = get_wrf_grid(d3_list[0])
except:
do_d3 = False
if do_d4:
try:
# sometimes there are empty files
lon4, lat4, dx4_km = get_wrf_grid(d4_list[0])
except:
do_d4 = False
# Limit varlist if testing
if testing == True:
outvar_list = ['rain']
#outvar_list = ['Pair','rain','swrad','lwrad_down','Tair','Qair']
else:
outvar_list = afun.outvar_list
# initialize NetCDF output files, one for each variable
NR, NC = lon.shape
NT = len(mod_time_list)
ncformat = 'NETCDF3_64BIT_OFFSET'
tt0 = time.time()
nc_out_dict = {}
for vn in outvar_list:
# name output file
out_fn = Ldir['LOogf_f'] + vn + '.nc'
nc_out_dict[vn] = out_fn
# get rid of the old version, if it exists
try:
os.remove(out_fn)
except OSError:
pass # assume error was because the file did not exist
foo = nc.Dataset(out_fn, 'w', format=ncformat)
# create dimensions
timename = afun.timename_dict[vn]
foo.createDimension(timename, NT) # could use None
foo.createDimension('eta_rho', NR)
foo.createDimension('xi_rho', NC)
# add time data
vv = foo.createVariable(timename, float, (timename,))
vv.units = 'seconds since 1970.01.01 UTC'
vv[:] = mod_time_vec
# add variable definition
vv = foo.createVariable(vn, float, (timename, 'eta_rho', 'xi_rho'))
vv.long_name = afun.longname_dict[vn]
vv.units = afun.units_dict[vn]
foo.close()
print('Initializing NetCDF files took %0.1f seconds' % (time.time() - tt0))
tt0 = time.time()
# find index to trim Eastern part of WRF fields
lon_max = lon[0,-1]
imax2 = zfun.find_nearest_ind(lon2[0,:], lon_max + .5)
lon2 = lon2[:,:imax2]; lat2 = lat2[:, :imax2]
if do_d3:
imax3 = zfun.find_nearest_ind(lon3[0,:], lon_max + .5)
lon3 = lon3[:,:imax3]; lat3 = lat3[:, :imax3]
if do_d4:
imax4 = zfun.find_nearest_ind(lon4[0,:], lon_max + .5)
lon4 = lon4[:,:imax4]; lat4 = lat4[:, :imax4]
# prepare coordinate arrays for interpolation
XY = np.array((lon.flatten(), lat.flatten())).T
XY2 = np.array((lon2.flatten(), lat2.flatten())).T
if do_d3:
XY3 = np.array((lon3.flatten(), lat3.flatten())).T
if do_d4:
XY4 = np.array((lon4.flatten(), lat4.flatten())).T
# find coordinate rotation matrices
def get_angle(lon, lat):
NR, NC = lon.shape
theta = np.nan * np.ones_like(lon)
for jj in range(NR-1):
junk, theta[jj,:-1] = sw.dist(lat[jj,:], lon[jj,:])
theta[:,-1] = theta[:,-2]
ca = np.cos(-np.pi*theta/180)
sa = np.sin(-np.pi*theta/180)
return ca, sa
ca2, sa2 = get_angle(lon2, lat2)
if do_d3:
ca3, sa3 = get_angle(lon3, lat3)
if do_d4:
ca4, sa4 = get_angle(lon4, lat4)
print('Manipulating grids took %0.1f seconds' % (time.time() - tt0))
# define regions for masking
def get_indices_in_polygon(plon_poly, plat_poly, lon, lat):
# get Boolean mask array "M" that is true for points
# in lon, lat that are in the polygon plon_poly, plat_poly
V = np.ones((len(plon_poly),2))
V[:,0] = plon_poly
V[:,1] = plat_poly
P = mpath.Path(V)
M, L = lon.shape
Rlon = lon.flatten()
Rlat = lat.flatten()
R = np.ones((len(Rlon),2))
R[:,0] = Rlon
R[:,1] = Rlat
M = P.contains_points(R) # boolean
M = M.reshape(lon.shape)
return M
tt0 = time.time()
if do_d3:
plon3_poly = np.concatenate((lon3[0,4:],lon3[:-5,-1],lon3[-5,4::-1],lon3[:-5:-1,4]))
plat3_poly = np.concatenate((lat3[0,4:],lat3[:-5,-1],lat3[-5,4::-1],lat3[:-5:-1,4]))
M3 = get_indices_in_polygon(plon3_poly, plat3_poly, lon, lat)
if do_d4:
plon4_poly = np.concatenate((lon4[0,4:],lon4[:-5,-1],lon4[-5,4::-1],lon4[:-5:-1,4]))
plat4_poly = np.concatenate((lat4[0,4:],lat4[:-5,-1],lat4[-5,4::-1],lat4[:-5:-1,4]))
M4 = get_indices_in_polygon(plon4_poly, plat4_poly, lon, lat)
print('Make grid masks took %0.1f seconds' % (time.time() - tt0))
# get interpolation matrices
IM2 = cKDTree(XY2).query(XY); IM2 = IM2[1]
if do_d3:
IM3 = cKDTree(XY3).query(XY); IM3 = IM3[1]
if do_d4:
IM4 = cKDTree(XY4).query(XY); IM4 = IM4[1]
def gather_and_process_fields(fn, imax, ca, sa):
# This is where we define any transformations to get from WRF to ROMS variables.
ds = nc.Dataset(fn)
iv_dict = dict()
for ivn in afun.invar_list:
# we trim fields to match the trimmed coordinate arrays
iv_dict[ivn] = ds[ivn][0,:,:imax].squeeze()
ds.close()
# then convert to ROMS units/properties, still on the WRF grid
# invar_list = ['Q2', 'T2', 'PSFC', 'U10', 'V10','RAINCV', 'RAINNCV', 'SWDOWN', 'GLW']
# outvar_list = ['Pair','rain','swrad','lwrad_down','Tair','Qair','Uwind','Vwind']
ov_dict = dict()
for ovn in outvar_list:
if ovn == 'Pair':
# convert Pa to mbar
ov_dict[ovn] = iv_dict['PSFC']/100
elif ovn == 'rain':
# set this to zero because (a) we don't really understand the units
# and (b) is it not used in the simulations at this point 2019.05.22
ov_dict[ovn] = 0 * (iv_dict['RAINCV']+iv_dict['RAINNCV'])
elif ovn == 'Tair':
# convert K to C
ov_dict[ovn] = iv_dict['T2'] - 273.15
elif ovn == 'swrad':
# account for reflection
ov_dict[ovn] = iv_dict['SWDOWN'] * (1 - 0.1446)
elif ovn == 'lwrad_down':
# account for reflection
ov_dict[ovn] = iv_dict['GLW']
elif ovn == 'Qair':
# calculate relative humidity [%]
ov_dict[ovn] = afun.Z_wmo_RH(ov_dict['Pair'], ov_dict['Tair'], iv_dict['Q2'])
elif ovn == 'Uwind':
# % rotate velocity to E-W and N-S
ov_dict[ovn] = ca*iv_dict['U10'] + sa*iv_dict['V10']
elif ovn == 'Vwind':
# % rotate velocity to E-W and N-S
ov_dict[ovn] = ca*iv_dict['V10'] - sa*iv_dict['U10']
return ov_dict
def interp_to_roms(ov_dict, outvar_list, XYn):
# Interpolate to the ROMS grid, using nearest neighbor (about twice as fast as linear?)
# Had we used linear interpolation it defaults to having nans outside the convex
# hull of the data, but nearest neighbor interpolation fills everything, so instead we
# use the masks M3 and M4 created above to decide where to add the data for finer grids.
ovi_dict = dict()
for ovn in outvar_list:
v = ov_dict[ovn]
ovi_dict[ovn] = griddata(XYn, v.flatten(), XY, method='nearest').reshape((NR,NC))
return ovi_dict
def interp_to_roms_alt(ov_dict, outvar_list, IMn):
ovi_dict = dict()
for ovn in outvar_list:
v = ov_dict[ovn].flatten()
ovi_dict[ovn] = v[IMn].reshape((NR,NC))
return ovi_dict
# MAIN TASK: loop over all hours
if testing == True:
# 20 = about noon local time
d2_list = d2_list[20:21]
d3_list = d3_list[20:21]
d4_list = d4_list[20:21]
dall_list = zip(d2_list, d3_list, d4_list)
for fn2, fn3, fn4 in dall_list:
print('Working on ' + fn2.split('/')[-1] + ' and etc.')
# new flags to allow processing more files
do_this_d3 = True
do_this_d4 = True
# if we are missing a d3 or d4 file then we don't work on it
if not os.path.isfile(fn3):
print(' - missing ' + fn3)
do_this_d3 = False
if not os.path.isfile(fn4):
print(' - missing ' + fn4)
do_this_d4 = False
tt0 = time.time()
ov2_dict = gather_and_process_fields(fn2, imax2, ca2, sa2)
ovi2_dict = interp_to_roms_alt(ov2_dict, outvar_list, IM2)
print(' - d2: gather, process, and interp took %0.1f seconds' % (time.time() - tt0))
if do_this_d3:
try:
tt0 = time.time()
ov3_dict = gather_and_process_fields(fn3, imax3, ca3, sa3)
ovi3_dict = interp_to_roms_alt(ov3_dict, outvar_list, IM3)
print(' - d3: gather, process, and interp took %0.1f seconds' % (time.time() - tt0))
except:
print(' - could not process ' + fn3)
do_this_d3 = False
if do_this_d4:
try:
tt0 = time.time()
ov4_dict = gather_and_process_fields(fn4, imax4, ca4, sa4)
ovi4_dict = interp_to_roms_alt(ov4_dict, outvar_list, IM4)
print(' - d4: gather, process, and interp took %0.1f seconds' % (time.time() - tt0))
except:
print(' - could not process ' + fn4)
do_this_d4 = False
tt0 = time.time()
# combine the grids
ovc_dict = dict()
for ovn in outvar_list:
v2 = ovi2_dict[ovn]
v = v2.copy()
if do_this_d3:
v3 = ovi3_dict[ovn]
v[M3] = v3[M3]
if do_this_d4:
v4 = ovi4_dict[ovn]
v[M4] = v4[M4]
if np.sum(np.isnan(v)) > 0:
print('** WARNING Nans in combined output ' + ovn)
ovc_dict[ovn] = v
tt0 = time.time()
# save to NetCDF
tt = d2i_dict[fn2]
for vn in outvar_list:
fn = nc_out_dict[vn]
foo = nc.Dataset(fn, 'a')
foo[vn][tt,:,:] = ovc_dict[vn]
foo.close()
print(' - Write to NetCDF took %0.1f seconds' % (time.time() - tt0))
elif planB == True:
print('**** Using planB ****')
ds_today = Ldir['date_string']
dt_today = datetime.strptime(ds_today, '%Y.%m.%d')
dt_yesterday = dt_today - timedelta(days=1)
ds_yesterday = datetime.strftime(dt_yesterday, format='%Y.%m.%d')
LOogf_f_yesterday = (Ldir['LOog'] + 'f' + ds_yesterday + '/'
+ Ldir['frc'] + '/')
LOogf_f_today = Ldir['LOogf_f']
outvar_list = afun.outvar_list
nc_out_dict = dict()
for ovn in outvar_list:
fn_yesterday = LOogf_f_yesterday + ovn + '.nc'
fn_today = LOogf_f_today + ovn + '.nc'
nc_out_dict[ovn] = fn_today
shutil.copyfile(fn_yesterday, fn_today)
ds = nc.Dataset(fn_today, 'a')
# advance the time by one day
ot = ds[afun.timename_dict[ovn]][:]
ot += 86400
ds[afun.timename_dict[ovn]][:] = ot
# and copy data from the previous backfill/forecast
v = ds[ovn][:]
print('%s %s' % (ovn, v.shape))
NT, NR, NC = v.shape
vv = v.copy()
if NT == 25:
# if it is backfill we just repeat the previous day
pass
elif NT == 73:
# if it is forecast we use the last two days of the
# previous forecast, and then repeat the last of these
# as a best guess of day 3
vv[:49,:,:] = v[24:,:,:]
vv[49:,:,:] = v[49:,:,:]
ds[ovn][:] = vv
ds.close()
if Ldir['lo_env'] == 'pm_mac':
ds0 = nc.Dataset(fn_yesterday, 'r')
ds1 = nc.Dataset(fn_today, 'r')
ovn = 'Vwind'
v0 = ds0[ovn][:,10,10]
v1 = ds1[ovn][:,10,10]
t0 = ds0[afun.timename_dict[ovn]][:]
t1 = ds1[afun.timename_dict[ovn]][:]
T0 = (t0-t0[0])/86400
T1 = (t1-t0[0])/86400
import matplotlib.pyplot as plt
plt.close('all')
plt.plot(T0,v0,'*r', T1,v1,'-k')
plt.show()
ds0.close()
ds1.close()
if testing == True:
plt.close('all')
lim_dict = dict(zip(afun.outvar_list, afun.lim_list))
# plot some of the fields in the most recent ovi#_dicts
for ovn in outvar_list:
fig = plt.figure(figsize=(20,8))
aa = [lon[0,0], lon[0,-1], lat[0,0], lat[-1,0]]
ax = fig.add_subplot(131)
ax.set_title('d2 ' + ovn)
vmin, vmax = lim_dict[ovn]
cs = plt.pcolormesh(lon, lat, ovi2_dict[ovn], cmap='rainbow', vmin=vmin, vmax=vmax)
fig.colorbar(cs, ax=ax)
pfun.dar(ax)
pfun.add_coast(ax)
ax.axis(aa)
ax = fig.add_subplot(132)
ax.set_title('Combined')
fld = ovc_dict[ovn]
cs = plt.pcolormesh(lon, lat, fld, cmap='rainbow', vmin=vmin, vmax=vmax)
fig.colorbar(cs, ax=ax)
pfun.dar(ax)
pfun.add_coast(ax)
ax.axis(aa)
ax = fig.add_subplot(133)
ax.set_title('Combined - d2')
fld = ovc_dict[ovn] - ovi2_dict[ovn]
vmax = np.max(np.abs([np.nanmax(fld),np.nanmin(fld)]))
vmin = -vmax
cs = plt.pcolormesh(lon, lat, fld, cmap='bwr', vmin=vmin, vmax=vmax)
fig.colorbar(cs, ax=ax)
pfun.dar(ax)
pfun.add_coast(ax)
ax.axis(aa)
plt.show()
# ===========================================================================
# prepare for finale
import collections
result_dict = collections.OrderedDict()
time_format = '%Y.%m.%d %H:%M:%S'
result_dict['start_time'] = start_time.strftime(time_format)
end_time = datetime.now()
result_dict['end_time'] = end_time.strftime(time_format)
dt_sec = (end_time - start_time).seconds
result_dict['total_seconds'] = str(dt_sec)
result_dict['result'] = 'success'
get_time = True
for vn in outvar_list:
fn = nc_out_dict[vn]
if os.path.isfile(fn):
if get_time == True:
ds = nc.Dataset(fn)
mt0 = ds[afun.timename_dict[vn]][0]
mt1 = ds[afun.timename_dict[vn]][-1]
ds.close()
dt0 = Lfun.modtime_to_datetime(float(mt0))
dt1 = Lfun.modtime_to_datetime(float(mt1))
result_dict['var_start_time'] = dt0.strftime(time_format)
result_dict['var_end_time'] = dt1.strftime(time_format)
get_time == False
else:
result_dict['result'] = 'fail'
#%% ************** END CASE-SPECIFIC CODE *****************
ffun.finale(result_dict, Ldir, Lfun)
|
from django import forms
from overrides.widgets import CustomStylePagedown
class EditProfileForm(forms.Form):
about = forms.CharField(label='About',
max_length=1000,
required=False,
widget=CustomStylePagedown(),)
view_adult = forms.BooleanField(label="View Adult Content",
required=False,
help_text="By selecting this box, you certify that you are age 18 or older and wish "
"to view content marked as adult. Lying about your age is a bannable offense.")
class AcceptTermsForm(forms.Form):
pass
|
import torch
import torch.nn as nn
from torch_geometric.data import InMemoryDataset
import numpy as np
import pandas as pd
import pickle
import csv
import os
from torch_geometric.data import Data
from torch_geometric.data import DataLoader
from sklearn.metrics import roc_auc_score
from torch_geometric.nn import GraphConv, TopKPooling, GatedGraphConv, ASAPooling, SAGPooling
from torch_geometric.nn import global_mean_pool as gap, global_max_pool as gmp, GlobalAttention as GA, Set2Set as Set
import torch.nn.functional as F
from torch.nn import Sequential as Seq, Linear, ReLU
from torch_geometric.nn import MessagePassing, ChebConv, GCNConv, GATConv
from torch_geometric.transforms import LaplacianLambdaMax
from torch_geometric.utils import remove_self_loops, add_self_loops
from torch_geometric.data import Batch as Batch
import time
import math
from torch.nn import Parameter
class pyg_data_creation(InMemoryDataset):
def __init__(self, root, dataset, file_name="dataset", transform=None, pre_transform=None):
self.file_name = file_name
self.dataset = dataset
super(pyg_data_creation, self).__init__(root, transform, pre_transform)
self.data, self.slices = torch.load(self.processed_paths[0])
@property
def raw_file_names(self):
return []
@property
def processed_file_names(self):
return ['../input/'+self.file_name+'.dataset']
def download(self):
pass
def process(self):
data_list = []
# self.data_tensor = self.data_tensor.reshape(self.data_tensor.shape[0] * self.data_tensor.shape[1], -1).contiguous()
# process by session_id
# grouped = df.groupby('session_id')
for subject, label in self.dataset:
n_nodes = subject.shape[0]
node_features = subject.clone()
edge_index = torch.from_numpy(np.arange(0, n_nodes)).int()
a = (edge_index.unsqueeze(0).repeat(n_nodes, 1))
b = (edge_index.unsqueeze(1).repeat(1, n_nodes))
edge_index = torch.cat((edge_index.unsqueeze(1).repeat(1, n_nodes).reshape(1,-1),
edge_index.unsqueeze(0).repeat(n_nodes, 1).reshape(1,-1)),dim=0).long()
x = node_features
y = label.float().view(1)
data = Data(x=x, edge_index=edge_index, y=y, edge_attr="")
data_list.append(data)
data, slices = self.collate(data_list)
torch.save((data, slices), self.processed_paths[0])
class SAGEConv(MessagePassing):
def __init__(self, in_channels, out_channels):
super(SAGEConv, self).__init__(aggr='max') # "Max" aggregation.
self.lin = torch.nn.Linear(in_channels, out_channels)
self.act = torch.nn.ReLU()
self.update_lin = torch.nn.Linear(in_channels + out_channels, in_channels, bias=False)
self.update_act = torch.nn.ReLU()
def forward(self, x, edge_index):
# x has shape [N, in_channels]
# edge_index has shape [2, E]
edge_index, _ = remove_self_loops(edge_index)
edge_index, _ = add_self_loops(edge_index, num_nodes=x.size(0))
return self.propagate(edge_index, size=(x.size(0), x.size(0)), x=x)
def message(self, x_j):
# x_j has shape [E, in_channels]
x_j = self.lin(x_j)
x_j = self.act(x_j)
return x_j
def update(self, aggr_out, x):
# aggr_out has shape [N, out_channels]
new_embedding = torch.cat([aggr_out, x], dim=1)
new_embedding = self.update_lin(new_embedding)
new_embedding = self.update_act(new_embedding)
return new_embedding
embed_dim = 24
class Net(torch.nn.Module):
def __init__(self, n_regions=116):
super(Net, self).__init__()
self.attn = nn.Sequential(
nn.Linear(130, 128),
nn.ReLU(),
nn.Linear(128, 1)
)
self.conv1 = GatedGraphConv(embed_dim, 2, aggr='add', bias=True)
self.final_size = 32
self.size1=n_regions
self.size2=math.ceil(self.size1 * 0.8)
self.size3 = math.ceil(self.size2 * 0.8)
self.size4 = math.ceil(self.size3 * 0.3)
self.pool1 = TopKPooling(embed_dim, ratio=0.8)
self.gp1 = Set(embed_dim, 2, 1)
self.conv2 = GatedGraphConv(embed_dim, 2, aggr='add', bias=True)
self.pool2 = TopKPooling(embed_dim, ratio=0.8)
self.gp2 = Set(embed_dim, 2, 1)
self.conv3 = GatedGraphConv(embed_dim, 2, aggr='add', bias=True)
self.pool3 = TopKPooling(embed_dim, ratio=0.3)
self.gp3 = Set(embed_dim, 2, 1)
self.conv4 = GatedGraphConv(embed_dim, 2, aggr='add', bias=True)
self.gp4 = Set(embed_dim, 2, 1)
#
self.conv5 = GatedGraphConv(embed_dim, 2, aggr='add', bias=True)
self.gp5 = Set(embed_dim, 2, 1)
#
self.conv6 = GatedGraphConv(embed_dim, 2, aggr='add', bias=True)
self.gp6 = Set(embed_dim, 2, 1)
self.lin1 = torch.nn.Linear(96, 32)
self.bn1 = torch.nn.BatchNorm1d(96)
self.act1 = torch.nn.ReLU()
def forward(self, data, epoch=0):
x, edge_index, batch, edge_attr = data.x, data.edge_index, data.batch, data.edge_attr
B=data.y.shape[0]
indices = torch.from_numpy(np.arange(116))
indices = indices.unsqueeze(dim=0).repeat(B,1).reshape(B*116)
x = F.relu(self.conv1(x, edge_index,torch.squeeze(edge_attr)) )
x, edge_index, edge_attr, batch, perm, score_perm = self.pool1(x, edge_index, edge_attr, batch)
x = F.relu(self.conv2(x, edge_index, torch.squeeze(edge_attr)) )
x, edge_index, edge_attr, batch, perm, score_perm = self.pool2(x, edge_index, edge_attr, batch)
x = F.relu(self.conv3(x, edge_index, torch.squeeze(edge_attr)) )
x, edge_index, edge_attr, batch, perm, score_perm = self.pool3(x, edge_index, edge_attr, batch)
x = F.relu(self.conv4(x, edge_index, torch.squeeze(edge_attr)) )
x = F.relu(self.conv5(x, edge_index, torch.squeeze(edge_attr)) )
x = F.relu(self.conv6(x, edge_index, torch.squeeze(edge_attr)) )
x6 = torch.cat([gmp(x, batch), gap(x, batch), self.gp6(x, batch)], dim=1)
x = x6
x = self.lin1(x)
x = self.act1(x)
return x, indices
|
import unittest
import sys
sys.path.append('../lib')
sys.path.append('helpers')
import matrixUtils
import vTableServer
import pTableServer
class vTest(unittest.TestCase):
def test_V_write_read(self):
fileName = "temp.txt"
matrixUtils.write_V_table(vTableServer.get_table(5), fileName)
self.assertEqual(matrixUtils.read_P_table(fileName), vTableServer.get_table(5))
def test_P_write_read(self):
fileName = "temp.txt"
matrixUtils.write_P_table(pTableServer.get_table(5), fileName)
self.assertEqual(matrixUtils.read_P_table(fileName), pTableServer.get_table(5))
if __name__ == '__main__':
unittest.main()
|
from clang.cindex import Index, File
def test_file():
index = Index.create()
tu = index.parse('t.c', unsaved_files = [('t.c', "")])
file = File.from_name(tu, "t.c")
assert str(file) == "t.c"
assert file.name == "t.c"
assert repr(file) == "<File: t.c>"
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. (http://www.facebook.com)
import unittest
import weakref
from unittest.mock import MagicMock, Mock
class WeakRefTests(unittest.TestCase):
def test_ref_dunder_callback_readonly(self):
class C:
pass
def callback(*args):
pass
obj = C()
ref = weakref.ref(obj)
with self.assertRaises(AttributeError):
ref.__callback__ = callback
def test_ref_dunder_callback_with_callback_returns_callback(self):
class C:
pass
def callback(*args):
pass
obj = C()
ref = weakref.ref(obj, callback)
self.assertIs(ref.__callback__, callback)
def test_ref_dunder_callback_without_callback_returns_none(self):
class C:
pass
obj = C()
ref = weakref.ref(obj)
self.assertIsNone(ref.__callback__)
def test_dunder_callback_with_subtype_returns_callback(self):
class SubRef(weakref.ref):
pass
class C:
pass
def callback(wr):
pass
obj = C()
ref = SubRef(obj, callback)
self.assertIs(ref.__callback__, callback)
def test_dunder_callback_with_subtype_passes_subtype(self):
class SubRef(weakref.ref):
pass
class C:
pass
def callback(wr):
wr.callback_arg = wr
ref = SubRef(C(), callback)
try:
from _builtins import _gc
_gc()
except ImportError:
pass
self.assertIs(ref.callback_arg, ref)
def test_ref_dunder_call_with_non_ref_raises_type_error(self):
self.assertRaisesRegex(
TypeError,
"'__call__' .* 'weakref' object.* a 'str'",
weakref.ref.__call__,
"not a weakref",
)
def test_dunder_eq_proxies_dunder_eq(self):
class C:
def __eq__(self, other):
return self is other
obj1 = C()
obj2 = C()
ref1 = weakref.ref(obj1)
ref2 = weakref.ref(obj2)
obj1.__eq__ = MagicMock()
self.assertIs(ref1.__eq__(ref2), False)
obj1.__eq__.called_once_with(obj1, obj2)
self.assertIs(ref1.__eq__(ref1), True)
obj1.__eq__.called_once_with(obj1, obj1)
def test_dunder_eq_with_non_ref_returns_not_implemented(self):
class C:
pass
obj = C()
ref = weakref.ref(obj)
not_a_ref = object()
self.assertIs(ref.__eq__(not_a_ref), NotImplemented)
def test_dunder_ge_always_returns_not_implemented(self):
class C:
pass
obj = C()
ref = weakref.ref(obj)
not_a_ref = object()
self.assertIs(ref.__ge__(ref), NotImplemented)
self.assertIs(ref.__ge__(not_a_ref), NotImplemented)
def test_dunder_gt_always_returns_not_implemented(self):
class C:
pass
obj = C()
ref = weakref.ref(obj)
not_a_ref = object()
self.assertIs(ref.__gt__(ref), NotImplemented)
self.assertIs(ref.__gt__(not_a_ref), NotImplemented)
def test_dunder_le_always_returns_not_implemented(self):
class C:
pass
obj = C()
ref = weakref.ref(obj)
not_a_ref = object()
self.assertIs(ref.__le__(ref), NotImplemented)
self.assertIs(ref.__le__(not_a_ref), NotImplemented)
def test_dunder_lt_always_returns_not_implemented(self):
class C:
pass
obj = C()
ref = weakref.ref(obj)
not_a_ref = object()
self.assertIs(ref.__lt__(ref), NotImplemented)
self.assertIs(ref.__lt__(not_a_ref), NotImplemented)
def test_dunder_ne_proxies_dunder_ne(self):
class C:
def __ne__(self, other):
return self is other
obj1 = C()
obj2 = C()
ref1 = weakref.ref(obj1)
ref2 = weakref.ref(obj2)
obj1.__ne__ = MagicMock()
self.assertIs(ref1.__ne__(ref2), False)
obj1.__ne__.called_once_with(obj1, obj2)
self.assertIs(ref1.__ne__(ref1), True)
obj1.__ne__.called_once_with(obj1, obj1)
def test_dunder_ne_with_non_ref_returns_not_implemented(self):
class C:
pass
obj = C()
ref = weakref.ref(obj)
not_a_ref = object()
self.assertIs(ref.__ne__(not_a_ref), NotImplemented)
def test_dunder_new_with_subtype_return_subtype_instance(self):
class SubRef(weakref.ref):
pass
class C:
def __eq__(self, other):
return "C.__eq__"
c = C()
sub_ref = SubRef(c)
self.assertIsInstance(sub_ref, SubRef)
self.assertIsInstance(sub_ref, weakref.ref)
ref = weakref.ref(c)
self.assertEqual(sub_ref.__eq__(ref), "C.__eq__")
sub_ref.new_attribute = 50
self.assertEqual(sub_ref.new_attribute, 50)
def test_dunder_new_with_non_type_raises_type_error(self):
with self.assertRaises(TypeError):
weakref.ref.__new__("not a type object")
def test_dunder_new_with_non_ref_subtype_raises_type_error(self):
with self.assertRaises(TypeError):
weakref.ref.__new__(list)
def test_dunder_new_with_int_raises_type_error(self):
with self.assertRaisesRegex(
TypeError, "cannot create weak reference to 'int' object"
):
weakref.ref.__new__(weakref.ref, 42)
def test_hash_on_proxy_not_callable_object_raises_type_error(self):
with self.assertRaises(TypeError) as context:
class NotCallable:
def get_name(self):
return "NotCallableObject"
not_callable = NotCallable()
proxy = weakref.proxy(not_callable)
hash(proxy)
self.assertEqual(str(context.exception), "unhashable type: 'weakproxy'")
def test_proxy_not_callable_object_returns_proxy_type(self):
class NotCallable:
def get_name(self):
return "NotCallableObject"
not_callable = NotCallable()
proxy = weakref.proxy(not_callable)
self.assertEqual(type(proxy), weakref.ProxyType)
def test_proxy_calls_to_dunder_functions(self):
class C:
def __add__(self, another):
return 50 + another
c = C()
proxy = weakref.proxy(c)
self.assertEqual(proxy + 5, 55)
def test_proxy_with_hash_raises_type_error(self):
with self.assertRaises(TypeError) as context:
class C:
pass
c = C()
hash(weakref.proxy(c))
self.assertEqual(str(context.exception), "unhashable type: 'weakproxy'")
def test_proxy_dunder_hash_function_access_suceeds(self):
class C:
pass
c = C()
m = c.__hash__()
self.assertNotEqual(m, 0)
def test_proxy_field_access(self):
class C:
def __init__(self):
self.field = "field_value"
c = C()
proxy = weakref.proxy(c)
self.assertEqual(proxy.field, "field_value")
def test_proxy_instance_method_call(self):
class C:
def method(self):
return "method_return"
c = C()
proxy = weakref.proxy(c)
self.assertEqual(proxy.method(), "method_return")
def test_hash_on_proxy_callable_object_raises_type_error(self):
with self.assertRaises(TypeError) as context:
class Callable:
def __call__(self):
return "CallableObject"
callable = Callable()
proxy = weakref.proxy(callable)
hash(proxy)
self.assertEqual(str(context.exception), "unhashable type: 'weakcallableproxy'")
def test_proxy_callable_object_returns_callable_proxy_type(self):
class Callable:
def __call__(self):
return "CallableObject"
callable = Callable()
proxy = weakref.proxy(callable)
self.assertTrue(isinstance(proxy, weakref.CallableProxyType))
def test_proxy_callable_object_returns_callable_object(self):
class Callable:
def __call__(self):
return "CallableObject"
callable_obj = Callable()
proxy = weakref.proxy(callable_obj)
self.assertEqual(proxy(), "CallableObject")
def test_hash_returns_referent_hash(self):
class C:
def __hash__(self):
return 123456
i = C()
r = weakref.ref(i)
self.assertEqual(hash(r), hash(i))
def test_hash_picks_correct_dunder_hash(self):
class C:
def __hash__(self):
raise Exception("Should not pick this")
r = weakref.ref(C)
self.assertEqual(hash(r), hash(C))
def test_hash_caches_referent_hash(self):
m = Mock()
m.__hash__ = MagicMock(return_value=99)
r = weakref.ref(m)
self.assertEqual(hash(r), 99)
self.assertEqual(hash(r), 99)
m.__hash__.assert_called_once()
# TODO(T43270097): Add a test for callback.
if __name__ == "__main__":
unittest.main()
|
from typing import List
class Solution:
def tribonacci(self, n: int) -> int:
self.tribonacci_list: List = [0, 1, 1, 2]
for i in range(4, n+1):
self.tribonacci_list.append(sum(self.tribonacci_list[-3:]))
return self.tribonacci_list[n]
|
# encoding: utf-8
# Copyright 2015 California Institute of Technology. ALL RIGHTS
# RESERVED. U.S. Government Sponsorship acknowledged.
#
# set-rdf-sources - configure RDF URLs
app = globals().get('app', None) # ``app`` comes from ``instance run`` magic.
portalID = 'edrn'
from AccessControl.SecurityManagement import newSecurityManager
from AccessControl.SecurityManager import setSecurityPolicy
from Products.CMFCore.tests.base.security import PermissiveSecurityPolicy, OmnipotentUser
from Testing import makerequest
from zope.component.hooks import setSite
import optparse, logging, sys, transaction
# Defaults
DEF_BODY_SYSTEMS = 'https://edrn-dev.jpl.nasa.gov/cancerdataexpo/rdf-data/body-systems/@@rdf'
DEF_DISEASES = 'https://edrn-dev.jpl.nasa.gov/cancerdataexpo/rdf-data/diseases/@@rdf'
DEF_RESOURCES = 'https://edrn.jpl.nasa.gov/bmdb/rdf/resources'
DEF_PUBLICATIONS = 'https://edrn-dev.jpl.nasa.gov/cancerdataexpo/rdf-data/publications/@@rdf'
DEF_ADD_PUBS = 'http://edrn.jpl.nasa.gov/bmdb/rdf/publications'
DEF_SITES = 'https://edrn-dev.jpl.nasa.gov/cancerdataexpo/rdf-data/sites/@@rdf'
DEF_PEOPLE = 'https://edrn-dev.jpl.nasa.gov/cancerdataexpo/rdf-data/registered-person/@@rdf'
DEF_COMMITTEES = 'https://edrn-dev.jpl.nasa.gov/cancerdataexpo/rdf-data/committees/@@rdf'
DEF_BIOMARKERS = 'https://edrn.jpl.nasa.gov/bmdb/rdf/biomarkers?qastate=all'
DEF_BMO = 'https://edrn.jpl.nasa.gov/bmdb/rdf/biomarkerorgans?qastate=all'
DEF_BIOMUTA = 'https://edrn-dev.jpl.nasa.gov/cancerdataexpo/rdf-data/biomuta/@@rdf'
DEF_PROTOCOLS = 'https://edrn-dev.jpl.nasa.gov/cancerdataexpo/rdf-data/protocols/@@rdf'
DEF_IDAPI = 'https://edrn-dev.jpl.nasa.gov/cancerdataexpo/idsearch'
DEF_PUBLICATIONS_SUMMARY = 'http://edrn-dev.jpl.nasa.gov/cancerdataexpo/summarizer-data/publication/@@summary'
DEF_BIOMARKERS_SUMMARY = 'https://edrn-dev.jpl.nasa.gov/cancerdataexpo/summarizer-data/biomarker/@@summary'
DEF_SITE_SUMMARY = 'http://edrn-dev.jpl.nasa.gov/cancerdataexpo/summarizer-data/collaboration/@@summary'
DEF_ECAS_SUMMARY = 'http://edrn-dev.jpl.nasa.gov/cancerdataexpo/summarizer-data/dataset/@@summary'
# Set up logging
_logger = logging.getLogger('set-rdf-sources')
_logger.setLevel(logging.INFO)
_console = logging.StreamHandler(sys.stderr)
_formatter = logging.Formatter('%(levelname)-8s %(message)s')
_console.setFormatter(_formatter)
_logger.addHandler(_console)
# Set up command-line options
_optParser = optparse.OptionParser(usage='Usage: %prog [options]')
_optParser.add_option(
'--body-systems', default=DEF_BODY_SYSTEMS, metavar='URL',
help='Set body systems RDF source to URL, default "%ddefautl"'
)
_optParser.add_option(
'--diseases', default=DEF_DISEASES, metavar='URL',
help='Set diseases RDF source to URL, default "%default"'
)
_optParser.add_option(
'--resources', default=DEF_RESOURCES, metavar='URL',
help='Set misc resources RDF source to URL, default "%default"'
)
_optParser.add_option(
'--publications', default=DEF_PROTOCOLS, metavar='URL',
help='Set publications RDF source to URL, default "%default"'
)
_optParser.add_option(
'--idapi', default=DEF_IDAPI, metavar='URL',
help='Set biomarker id API source to URL, default "%default"'
)
_optParser.add_option(
'--additional-publications', default=DEF_ADD_PUBS, metavar='URL',
help='Set the additional publications RDF source to URL, default "%default"'
)
_optParser.add_option(
'--sites', default=DEF_SITES, metavar='URL',
help='Set sites RDF source to URL, default "%default"'
)
_optParser.add_option(
'--people', default=DEF_PEOPLE, metavar='URL',
help='Set people RDF source to URL, default "%default"'
)
_optParser.add_option(
'--committees', default=DEF_COMMITTEES, metavar='URL',
help='Set committees RDF source to URL, default "%default"'
)
_optParser.add_option(
'--biomarkers', default=DEF_BIOMARKERS, metavar='URL',
help='Set biomarkers RDF source to URL, default "%default"'
)
_optParser.add_option(
'--biomarker-organs', default=DEF_BMO, metavar='URL',
help='Set biomaker-organs RDF source to URL, default "%default'
)
_optParser.add_option(
'--biomuta', default=DEF_BIOMUTA, metavar='URL',
help='Set biomuta RDF source to URL, default "%default"'
)
_optParser.add_option(
'--protocols', default=DEF_PROTOCOLS, metavar='URL',
help='Set protocols RDF soruce to URL, default "%default"'
)
_optParser.add_option(
'--biomarker_summary', default=DEF_BIOMARKERS_SUMMARY, metavar='URL',
help='Set biomarker summary JSON source to URL, default "%default"'
)
_optParser.add_option(
'--publication_summary', default=DEF_PUBLICATIONS_SUMMARY, metavar='URL',
help='Set biomarker summary JSON source to URL, default "%default"'
)
_optParser.add_option(
'--site_summary', default=DEF_SITE_SUMMARY, metavar='URL',
help='Set biomarker summary JSON source to URL, default "%default"'
)
_optParser.add_option(
'--ecas_summary', default=DEF_ECAS_SUMMARY, metavar='URL',
help='Set science data summary JSON source to URL, default "%default"'
)
_optParser.add_option('-v', '--verbose', action='store_true', help='Be overly verbose')
def setupZopeSecurity(app):
_logger.debug('Setting up Zope security')
acl_users = app.acl_users
setSecurityPolicy(PermissiveSecurityPolicy())
newSecurityManager(None, OmnipotentUser().__of__(acl_users))
def getPortal(app, portalID):
_logger.debug('Getting portal "%s"', portalID)
portal = getattr(app, portalID)
setSite(portal)
return portal
def setRDFSources(
app, portalID, organs, diseases, resources, publications, addPubs, sites, people, committees,
bm, bmo, biomuta, protocols, bmsum, pubsum, sitesum, ecasum, idapi
):
_logger.info('Setting RDF sources on portal "%s"', portalID)
app = makerequest.makerequest(app)
setupZopeSecurity(app)
portal = getPortal(app, portalID)
if 'resources' in portal.keys() and 'body-systems' in portal['resources'].keys():
_logger.info('Setting body-systems to %s', organs)
bodySystems = portal['resources']['body-systems']
bodySystems.rdfDataSource = organs
else:
_logger.debug('No resources/body-systems folder found')
if 'resources' in portal.keys() and 'diseases' in portal['resources'].keys():
_logger.info('Setting diseases to %s', diseases)
d = portal['resources']['diseases']
d.rdfDataSource = diseases
else:
_logger.debug('No resources/diseases folder found')
if 'resources' in portal.keys() and 'miscellaneous-resources' in portal['resources'].keys():
_logger.info('Setting miscellaneous-resources to %s', resources)
misc = portal['resources']['miscellaneous-resources']
misc.rdfDataSource = resources
else:
_logger.debug('No resources/miscellaneous-resources folder found')
if 'publications' in portal.keys():
_logger.info('Setting publications to %s', publications)
pubs = portal['publications']
pubs.rdfDataSource, pubs.additionalDataSources, pubs.pubSumDataSource = publications, [addPubs], pubsum
else:
_logger.debug('No publications folder found')
if 'sites' in portal.keys():
_logger.info('Setting sites and people to %s and %s respectively', sites, people)
s = portal['sites']
s.rdfDataSource, s.peopleDataSource = sites, people
else:
_logger.debug('No sites folder found')
if 'science-data' in portal.keys():
_logger.info('Setting science data summary to %s', ecasum)
s = portal['science-data']
s.dsSumDataSource = ecasum
else:
_logger.debug('No science data folder found')
if 'committees' in portal.keys():
_logger.info('Setting committees to %s', committees)
c = portal['committees']
c.rdfDataSource, c.siteSumDataSource = committees, sitesum
else:
_logger.debug('No committees folder found')
if 'biomarkers' in portal.keys():
_logger.info('Setting sources for biomarkers to %s, %s, and %s', bm, bmo, biomuta)
biomarkers = portal['biomarkers']
biomarkers.rdfDataSource, biomarkers.bmoDataSource, biomarkers.bmuDataSource, biomarkers.bmSumDataSource, biomarkers.idDataSource = bm, bmo, biomuta, bmsum, idapi
else:
_logger.debug('No biomarkers folder found')
if 'protocols' in portal.keys():
_logger.info('Setting protocols to %s', protocols)
p = portal['protocols']
p.rdfDataSource = protocols
else:
_logger.debug('No protocols folder found')
transaction.commit()
def main(argv):
options, args = _optParser.parse_args(argv)
if len(args) > 1:
_optParser.error('This script takes no arguments (only options)')
if options.verbose:
_logger.setLevel(logging.DEBUG)
global app, portalID
setRDFSources(
app,
portalID,
options.body_systems,
options.diseases,
options.resources,
options.publications,
options.additional_publications,
options.sites,
options.people,
options.committees,
options.biomarkers,
options.biomarker_organs,
options.biomuta,
options.protocols,
options.biomarker_summary,
options.publication_summary,
options.site_summary,
options.ecas_summary,
options.idapi
)
return True
if __name__ == '__main__':
# The [2:] works around plone.recipe.zope2instance-4.2.6's lame bin/interpreter script issue
sys.exit(0 if main(sys.argv[2:]) is True else -1)
|
from .asset import *
from .channel import *
from .message import *
from .server import *
from .user import *
|
#!/usr/bin/env python3
"""
Copyright (C) 2018 Rene Rivera.
Use, modification and distribution are subject to the
Boost Software License, Version 1.0. (See accompanying file
LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
"""
from bls.build_tools import BuildB2
if __name__ == "__main__":
BuildB2()
|
# Copyright 2010 Hakan Kjellerstrand hakank@bonetmail.com
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Problem from ECLiPSe
# http:#eclipse.crosscoreop.com/eclipse/examples/nono.ecl.txt
# Problem n3 ( http:#www.pro.or.jp/~fuji/java/puzzle/nonogram/index-eng.html )
# 'Car'
#
rows = 10;
row_rule_len = 4;
row_rules = [
[0,0,0,4],
[0,1,1,6],
[0,1,1,6],
[0,1,1,6],
[0,0,4,9],
[0,0,1,1],
[0,0,1,1],
[0,2,7,2],
[1,1,1,1],
[0,0,2,2]
]
cols = 15;
col_rule_len = 2;
col_rules = [
[0,4],
[1,2],
[1,1],
[5,1],
[1,2],
[1,1],
[5,1],
[1,1],
[4,1],
[4,1],
[4,2],
[4,1],
[4,1],
[4,2],
[0,4]
]
|
"""
Some instructions on writing CLI tests:
1. Look at test_ray_start for a simple output test example.
2. To get a valid regex, start with copy-pasting your output from a captured
version (no formatting). Then escape ALL regex characters (parenthesis,
brackets, dots, etc.). THEN add ".+" to all the places where info might
change run to run.
3. Look at test_ray_up for an example of how to mock AWS, commands,
and autoscaler config.
4. Print your outputs!!!! Tests are impossible to debug if they fail
and you did not print anything. Since command output is captured by click,
MAKE SURE YOU print(result.output) when tests fail!!!
WARNING: IF YOU MOCK AWS, DON'T FORGET THE AWS_CREDENTIALS FIXTURE.
THIS IS REQUIRED SO BOTO3 DOES NOT ACCESS THE ACTUAL AWS SERVERS.
Note: config cache does not work with AWS mocks since the AWS resource ids are
randomized each time.
"""
import glob
import sys
import tempfile
import uuid
import re
import os
from contextlib import contextmanager
from pathlib import Path
import pytest
import moto
from moto import mock_ec2, mock_iam
from unittest.mock import MagicMock, patch
from click.testing import CliRunner
from testfixtures import Replacer
from testfixtures.popen import MockPopen, PopenBehaviour
import ray
import ray.autoscaler._private.aws.config as aws_config
import ray.scripts.scripts as scripts
from ray.test_utils import wait_for_condition
boto3_list = [{
"InstanceType": "t1.micro",
"VCpuInfo": {
"DefaultVCpus": 1
},
"MemoryInfo": {
"SizeInMiB": 627
}
}, {
"InstanceType": "t3a.small",
"VCpuInfo": {
"DefaultVCpus": 2
},
"MemoryInfo": {
"SizeInMiB": 2048
}
}, {
"InstanceType": "m4.4xlarge",
"VCpuInfo": {
"DefaultVCpus": 16
},
"MemoryInfo": {
"SizeInMiB": 65536
}
}, {
"InstanceType": "p3.8xlarge",
"VCpuInfo": {
"DefaultVCpus": 32
},
"MemoryInfo": {
"SizeInMiB": 249856
},
"GpuInfo": {
"Gpus": [{
"Name": "V100",
"Count": 4
}]
}
}]
@pytest.fixture
def configure_lang():
"""Configure output for travis + click."""
if sys.platform != "darwin":
os.environ["LC_ALL"] = "C.UTF-8"
os.environ["LANG"] = "C.UTF-8"
@pytest.fixture
def configure_aws():
"""Mocked AWS Credentials for moto."""
os.environ["LC_ALL"] = "C.UTF-8"
os.environ["LANG"] = "C.UTF-8"
os.environ["AWS_ACCESS_KEY_ID"] = "testing"
os.environ["AWS_SECRET_ACCESS_KEY"] = "testing"
os.environ["AWS_SECURITY_TOKEN"] = "testing"
os.environ["AWS_SESSION_TOKEN"] = "testing"
# moto (boto3 mock) only allows a hardcoded set of AMIs
dlami = moto.ec2.ec2_backends["us-west-2"].describe_images(
filters={"name": "Deep Learning AMI Ubuntu*"})[0].id
aws_config.DEFAULT_AMI["us-west-2"] = dlami
list_instances_mock = MagicMock(return_value=boto3_list)
with patch("ray.autoscaler._private.aws.node_provider.list_ec2_instances",
list_instances_mock):
yield
@pytest.fixture(scope="function")
def _unlink_test_ssh_key():
"""Use this to remove the keys spawned by ray up."""
yield
try:
for path in glob.glob(os.path.expanduser("~/.ssh/__test-cli_key*")):
os.remove(path)
except FileNotFoundError:
pass
def _debug_die(result):
print("!!!!")
print(result.output)
print("!!!!")
assert False
def _die_on_error(result):
if result.exit_code == 0:
return
_debug_die(result)
def _debug_check_line_by_line(result, expected_lines):
output_lines = result.output.split("\n")
i = 0
for out in output_lines:
if i >= len(expected_lines):
i += 1
print("!!!!!! Expected fewer lines")
context = [f"CONTEXT: {line}" for line in output_lines[i - 3:i]]
print("\n".join(context))
extra = [f"-- {line}" for line in output_lines[i:]]
print("\n".join(extra))
break
exp = expected_lines[i]
matched = re.fullmatch(exp + r" *", out) is not None
if not matched:
print(f"{i:>3}: {out}")
print(f"!!! ^ ERROR: Expected (regex): {repr(exp)}")
else:
print(f"{i:>3}: {out}")
i += 1
if i < len(expected_lines):
print("!!! ERROR: Expected extra lines (regex):")
for line in expected_lines[i:]:
print(repr(line))
assert False
@contextmanager
def _setup_popen_mock(commands_mock, commands_verifier=None):
"""
Mock subprocess.Popen's behavior and if applicable, intercept the commands
received by Popen and check if they are as expected using
commands_verifier provided by caller.
TODO(xwjiang): Ideally we should write a lexical analyzer that can parse
in a more intelligent way.
"""
Popen = MockPopen()
Popen.set_default(behaviour=commands_mock)
with Replacer() as replacer:
replacer.replace("subprocess.Popen", Popen)
yield
if commands_verifier:
assert commands_verifier(Popen.all_calls)
def _load_output_pattern(name):
pattern_dir = Path(__file__).parent / "test_cli_patterns"
with open(str(pattern_dir / name)) as f:
# Remove \n from each line.
# Substitute the Ray version in each line containing the string
# {ray_version}.
out = []
for x in f.readlines():
if "{ray_version}" in x:
out.append(x[:-1].format(ray_version=ray.__version__))
else:
out.append(x[:-1])
return out
def _check_output_via_pattern(name, result):
expected_lines = _load_output_pattern(name)
if result.exception is not None:
print(result.output)
raise result.exception from None
expected = r" *\n".join(expected_lines) + "\n?"
if re.fullmatch(expected, result.output) is None:
_debug_check_line_by_line(result, expected_lines)
assert result.exit_code == 0
DEFAULT_TEST_CONFIG_PATH = str(
Path(__file__).parent / "test_cli_patterns" / "test_ray_up_config.yaml")
MISSING_MAX_WORKER_CONFIG_PATH = str(
Path(__file__).parent / "test_cli_patterns" /
"test_ray_up_no_max_worker_config.yaml")
DOCKER_TEST_CONFIG_PATH = str(
Path(__file__).parent / "test_cli_patterns" /
"test_ray_up_docker_config.yaml")
@pytest.mark.skipif(
sys.platform == "darwin" and "travis" in os.environ.get("USER", ""),
reason=("Mac builds don't provide proper locale support"))
def test_ray_start(configure_lang):
runner = CliRunner()
temp_dir = os.path.join("/tmp", uuid.uuid4().hex)
result = runner.invoke(scripts.start, [
"--head", "--log-style=pretty", "--log-color", "False", "--port", "0",
"--temp-dir", temp_dir
])
# Check that --temp-dir arg worked:
assert os.path.isfile(os.path.join(temp_dir, "ray_current_cluster"))
assert os.path.isdir(os.path.join(temp_dir, "session_latest"))
_die_on_error(runner.invoke(scripts.stop))
_check_output_via_pattern("test_ray_start.txt", result)
@pytest.mark.skipif(
sys.platform == "darwin" and "travis" in os.environ.get("USER", ""),
reason=("Mac builds don't provide proper locale support"))
@mock_ec2
@mock_iam
def test_ray_up(configure_lang, _unlink_test_ssh_key, configure_aws):
def commands_mock(command, stdin):
# if we want to have e.g. some commands fail,
# we can have overrides happen here.
# unfortunately, cutting out SSH prefixes and such
# is, to put it lightly, non-trivial
if "uptime" in command:
return PopenBehaviour(stdout=b"MOCKED uptime")
if "rsync" in command:
return PopenBehaviour(stdout=b"MOCKED rsync")
if "ray" in command:
return PopenBehaviour(stdout=b"MOCKED ray")
return PopenBehaviour(stdout=b"MOCKED GENERIC")
with _setup_popen_mock(commands_mock):
# config cache does not work with mocks
runner = CliRunner()
result = runner.invoke(scripts.up, [
DEFAULT_TEST_CONFIG_PATH, "--no-config-cache", "-y",
"--log-style=pretty", "--log-color", "False"
])
_check_output_via_pattern("test_ray_up.txt", result)
@pytest.mark.skipif(
sys.platform == "darwin" and "travis" in os.environ.get("USER", ""),
reason=("Mac builds don't provide proper locale support"))
@mock_ec2
@mock_iam
def test_ray_up_no_head_max_workers(configure_lang, _unlink_test_ssh_key,
configure_aws):
def commands_mock(command, stdin):
# if we want to have e.g. some commands fail,
# we can have overrides happen here.
# unfortunately, cutting out SSH prefixes and such
# is, to put it lightly, non-trivial
if "uptime" in command:
return PopenBehaviour(stdout=b"MOCKED uptime")
if "rsync" in command:
return PopenBehaviour(stdout=b"MOCKED rsync")
if "ray" in command:
return PopenBehaviour(stdout=b"MOCKED ray")
return PopenBehaviour(stdout=b"MOCKED GENERIC")
with _setup_popen_mock(commands_mock):
# config cache does not work with mocks
runner = CliRunner()
result = runner.invoke(scripts.up, [
MISSING_MAX_WORKER_CONFIG_PATH, "--no-config-cache", "-y",
"--log-style=pretty", "--log-color", "False"
])
_check_output_via_pattern("test_ray_up_no_max_worker.txt", result)
@pytest.mark.skipif(
sys.platform == "darwin" and "travis" in os.environ.get("USER", ""),
reason=("Mac builds don't provide proper locale support"))
@mock_ec2
@mock_iam
def test_ray_up_docker(configure_lang, _unlink_test_ssh_key, configure_aws):
def commands_mock(command, stdin):
# if we want to have e.g. some commands fail,
# we can have overrides happen here.
# unfortunately, cutting out SSH prefixes and such
# is, to put it lightly, non-trivial
if ".Config.Env" in command:
return PopenBehaviour(stdout=b"{}")
if "uptime" in command:
return PopenBehaviour(stdout=b"MOCKED uptime")
if "rsync" in command:
return PopenBehaviour(stdout=b"MOCKED rsync")
if "ray" in command:
return PopenBehaviour(stdout=b"MOCKED ray")
return PopenBehaviour(stdout=b"MOCKED GENERIC")
with _setup_popen_mock(commands_mock):
# config cache does not work with mocks
runner = CliRunner()
result = runner.invoke(scripts.up, [
DOCKER_TEST_CONFIG_PATH, "--no-config-cache", "-y",
"--log-style=pretty", "--log-color", "False"
])
_check_output_via_pattern("test_ray_up_docker.txt", result)
@pytest.mark.skipif(
sys.platform == "darwin" and "travis" in os.environ.get("USER", ""),
reason=("Mac builds don't provide proper locale support"))
@mock_ec2
@mock_iam
def test_ray_up_record(configure_lang, _unlink_test_ssh_key, configure_aws):
def commands_mock(command, stdin):
# if we want to have e.g. some commands fail,
# we can have overrides happen here.
# unfortunately, cutting out SSH prefixes and such
# is, to put it lightly, non-trivial
if "uptime" in command:
return PopenBehaviour(stdout=b"MOCKED uptime")
if "rsync" in command:
return PopenBehaviour(stdout=b"MOCKED rsync")
if "ray" in command:
return PopenBehaviour(stdout=b"MOCKED ray")
return PopenBehaviour(stdout=b"MOCKED GENERIC")
with _setup_popen_mock(commands_mock):
# config cache does not work with mocks
runner = CliRunner()
result = runner.invoke(scripts.up, [
DEFAULT_TEST_CONFIG_PATH, "--no-config-cache", "-y",
"--log-style=record"
])
_check_output_via_pattern("test_ray_up_record.txt", result)
@pytest.mark.skipif(
sys.platform == "darwin" and "travis" in os.environ.get("USER", ""),
reason=("Mac builds don't provide proper locale support"))
@mock_ec2
@mock_iam
def test_ray_attach(configure_lang, configure_aws, _unlink_test_ssh_key):
def commands_mock(command, stdin):
# TODO(maximsmol): this is a hack since stdout=sys.stdout
# doesn't work with the mock for some reason
print("ubuntu@ip-.+:~$ exit")
return PopenBehaviour(stdout="ubuntu@ip-.+:~$ exit")
with _setup_popen_mock(commands_mock):
runner = CliRunner()
result = runner.invoke(scripts.up, [
DEFAULT_TEST_CONFIG_PATH, "--no-config-cache", "-y",
"--log-style=pretty", "--log-color", "False"
])
_die_on_error(result)
result = runner.invoke(scripts.attach, [
DEFAULT_TEST_CONFIG_PATH, "--no-config-cache",
"--log-style=pretty", "--log-color", "False"
])
_check_output_via_pattern("test_ray_attach.txt", result)
@pytest.mark.skipif(
sys.platform == "darwin" and "travis" in os.environ.get("USER", ""),
reason=("Mac builds don't provide proper locale support"))
@mock_ec2
@mock_iam
def test_ray_dashboard(configure_lang, configure_aws, _unlink_test_ssh_key):
def commands_mock(command, stdin):
# TODO(maximsmol): this is a hack since stdout=sys.stdout
# doesn't work with the mock for some reason
print("ubuntu@ip-.+:~$ exit")
return PopenBehaviour(stdout="ubuntu@ip-.+:~$ exit")
with _setup_popen_mock(commands_mock):
runner = CliRunner()
result = runner.invoke(scripts.up, [
DEFAULT_TEST_CONFIG_PATH, "--no-config-cache", "-y",
"--log-style=pretty", "--log-color", "False"
])
_die_on_error(result)
result = runner.invoke(scripts.dashboard,
[DEFAULT_TEST_CONFIG_PATH, "--no-config-cache"])
_check_output_via_pattern("test_ray_dashboard.txt", result)
@pytest.mark.skipif(
sys.platform == "darwin" and "travis" in os.environ.get("USER", ""),
reason=("Mac builds don't provide proper locale support"))
@mock_ec2
@mock_iam
def test_ray_exec(configure_lang, configure_aws, _unlink_test_ssh_key):
def commands_mock(command, stdin):
# TODO(maximsmol): this is a hack since stdout=sys.stdout
# doesn't work with the mock for some reason
print("This is a test!")
return PopenBehaviour(stdout=b"This is a test!")
def commands_verifier(calls):
for call in calls:
if len(call[1]) > 0:
if any(" ray stop; " in token for token in call[1][0]):
return True
return False
with _setup_popen_mock(commands_mock, commands_verifier):
runner = CliRunner()
result = runner.invoke(scripts.up, [
DEFAULT_TEST_CONFIG_PATH, "--no-config-cache", "-y",
"--log-style=pretty", "--log-color", "False"
])
_die_on_error(result)
result = runner.invoke(scripts.exec, [
DEFAULT_TEST_CONFIG_PATH, "--no-config-cache",
"--log-style=pretty", "\"echo This is a test!\"", "--stop"
])
_check_output_via_pattern("test_ray_exec.txt", result)
# Try to check if we are running in travis. Bazel overrides and controls
# env vars, so the typical travis env-vars don't help.
# Unfortunately it will not be nice if your username is travis
# and you're running on a Mac.
@pytest.mark.skipif(
sys.platform == "darwin" and "travis" in os.environ.get("USER", ""),
reason=("Mac builds don't provide proper locale support"))
@mock_ec2
@mock_iam
def test_ray_submit(configure_lang, configure_aws, _unlink_test_ssh_key):
def commands_mock(command, stdin):
# TODO(maximsmol): this is a hack since stdout=sys.stdout
# doesn't work with the mock for some reason
if "rsync" not in command:
print("This is a test!")
return PopenBehaviour(stdout=b"This is a test!")
with _setup_popen_mock(commands_mock):
runner = CliRunner()
result = runner.invoke(scripts.up, [
DEFAULT_TEST_CONFIG_PATH, "--no-config-cache", "-y",
"--log-style=pretty", "--log-color", "False"
])
_die_on_error(result)
with tempfile.NamedTemporaryFile(suffix="test.py", mode="w") as f:
f.write("print('This is a test!')\n")
result = runner.invoke(
scripts.submit,
[
DEFAULT_TEST_CONFIG_PATH,
"--no-config-cache",
"--log-style=pretty",
"--log-color",
"False",
# this is somewhat misleading, since the file
# actually never gets run
# TODO(maximsmol): make this work properly one day?
f.name
])
_check_output_via_pattern("test_ray_submit.txt", result)
def test_ray_status():
import ray
address = ray.init().get("redis_address")
runner = CliRunner()
def output_ready():
result = runner.invoke(scripts.status)
result.stdout
return not result.exception and "memory" in result.output
wait_for_condition(output_ready)
result = runner.invoke(scripts.status, [])
_check_output_via_pattern("test_ray_status.txt", result)
result_arg = runner.invoke(scripts.status, ["--address", address])
_check_output_via_pattern("test_ray_status.txt", result_arg)
# Try to check status with RAY_ADDRESS set
os.environ["RAY_ADDRESS"] = address
result_env = runner.invoke(scripts.status)
_check_output_via_pattern("test_ray_status.txt", result_env)
result_env_arg = runner.invoke(scripts.status, ["--address", address])
_check_output_via_pattern("test_ray_status.txt", result_env_arg)
@pytest.mark.skipif(
sys.platform == "darwin" and "travis" in os.environ.get("USER", ""),
reason=("Mac builds don't provide proper locale support"))
@mock_ec2
@mock_iam
def test_ray_cluster_dump(configure_lang, configure_aws, _unlink_test_ssh_key):
def commands_mock(command, stdin):
print("This is a test!")
return PopenBehaviour(stdout=b"This is a test!")
with _setup_popen_mock(commands_mock):
runner = CliRunner()
result = runner.invoke(scripts.up, [
DEFAULT_TEST_CONFIG_PATH, "--no-config-cache", "-y",
"--log-style=pretty", "--log-color", "False"
])
_die_on_error(result)
result = runner.invoke(scripts.cluster_dump,
[DEFAULT_TEST_CONFIG_PATH, "--no-processes"])
_check_output_via_pattern("test_ray_cluster_dump.txt", result)
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from ...spec.attr import AttributeGroup
class ReferenceAttributeGroup(AttributeGroup):
__attributes__ = {
'ref_obj': dict(),
'normalized_ref': dict(),
}
class PathItemAttributeGroup(AttributeGroup):
__attributes__ = {
'normalized_ref': dict(),
'ref_obj': dict(),
'final_obj': dict(),
}
|
import torch.nn as nn
import torch
import torchvision
import torch.nn.functional as F
class EnsembleModel(nn.Module):
def __init__(self,num_classes,layer):
super(EnsembleModel,self).__init__()
# model A: resnet50
if layer==50:
self.modelA=torchvision.models.resnet50(pretrained=True)
elif layer==101:
self.modelA=torchvision.models.resnet101(pretrained=True)
featA=self.modelA.fc.in_features
self.modelA.fc=nn.Identity()
# model B: resnest50
if layer==50:
self.modelB=torch.hub.load('zhanghang1989/ResNeSt', 'resnest50', pretrained=True)
elif layer==101:
self.modelB=torch.hub.load('zhanghang1989/ResNeSt', 'resnest101', pretrained=True)
featB=self.modelB.fc.in_features
self.modelB.fc=nn.Identity()
# classifier
self.classifier=nn.Sequential(
nn.Linear(featA+featB,2048),
nn.ReLU(),
nn.Linear(2048,num_classes)
)
def forward(self,x):
x1=self.modelA(x.clone())
x1=x1.view(x1.size(0),-1)
x2=self.modelB(x)
x2=x2.view(x2.size(0),-1)
out=torch.cat((x1,x2),dim=1)
out=self.classifier(out)
return out
|
import numpy as np
import torch
from torch.distributions import Normal
class Actor(torch.nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(Actor, self).__init__()
# defining fully-connected layers
self.fc_1 = torch.nn.Linear(input_size, hidden_size)
self.fc_2 = torch.nn.Linear(hidden_size, hidden_size)
self.mu_layer = torch.nn.Linear(hidden_size, output_size)
self.std_layer = torch.nn.Linear(hidden_size, output_size)
# get action for given observation
def forward(self, observation, deterministic=False, with_logprob=True):
net_out = torch.relu(self.fc_1(observation))
net_out = self.fc_2(net_out)
mu = self.mu_layer(net_out)
log_std = self.std_layer(net_out)
# minimum log standard deviation is choosen as -20
# maximum log standard deviation is choosen as +2
log_std = torch.clamp(log_std, -20, 2)
std = torch.exp(log_std)
# pre-squash distribution and sample
pi_distribution = Normal(mu, std)
if deterministic:
# only used for evaluating policy at test time.
pi_action = mu
else:
pi_action = pi_distribution.rsample()
if with_logprob:
logp_pi = pi_distribution.log_prob(pi_action).sum(axis=-1)
logp_pi -= (2 * (np.log(2) - pi_action - torch.nn.functional.softplus(-2 * pi_action))).sum(axis=1)
else:
logp_pi = None
# only %60 of the steering command will be used
steer = 0.6 * pi_action[:, 0].reshape(-1, 1)
# acceleration is from 0 ti 1; braking is from 0 to -1
accel_brake = pi_action[:, 1].reshape(-1, 1)
# apply tangent hyperbolic activation functions to actions
steer = torch.tanh(steer)
accel_brake = torch.tanh(accel_brake)
pi_action= torch.cat((steer, accel_brake), 1)
return pi_action, logp_pi
class Critic(torch.nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(Critic, self).__init__()
# defining fully-connected layers
self.fc_1 = torch.nn.Linear(input_size - 2, hidden_size)
self.fc_2 = torch.nn.Linear(hidden_size + output_size, hidden_size)
self.fc_3 = torch.nn.Linear(hidden_size, hidden_size)
self.fc_4 = torch.nn.Linear(hidden_size, 1)
# get value for given state-action pair
def forward(self, state, action):
out = self.fc_1(state)
out = torch.nn.functional.relu(out)
out = torch.nn.functional.relu(self.fc_2(torch.cat([out, action], 1)))
out = torch.nn.functional.relu(self.fc_3(out))
out = self.fc_4(out)
return out
|
import os, logging, sys, subprocess, argparse, time
import xml.etree.ElementTree as xmlparse
from nc_config import *
from exe_cmd import *
from topo import *
from fail_recovery import *
###########################################
## get parameters
###########################################
class Parameters:
def __init__(self, config_file):
root_dir = xmlparse.parse(config_file).getroot()
self.config_file = config_file
self.project_dir = root_dir.find("projectDir").text
self.topology_file = self.project_dir + "/" + root_dir.find("topology").find("topologyFileName").text
self.thrift_base_port = int(root_dir.find("thriftBasePort").text)
self.bmv2 = root_dir.find("bmv2").text
self.p4c_bmv2 = root_dir.find("p4cBmv2").text
self.switch_json = self.project_dir + "/" + root_dir.find("switchJson").text
self.num_vnode = int(root_dir.find("numVirtualNode").text)
self.num_replica = int(root_dir.find("numReplica").text)
self.num_kv = int(root_dir.find("numKeyValue").text)
self.switch_p4 = self.project_dir + "/" + root_dir.find("p4src").text
self.runtime_CLI = root_dir.find("bmv2").text + "/" + root_dir.find("runtimeCLI").text
self.vring_file = self.project_dir + "/" + root_dir.find("topology").find("vringFileName").text
self.register_size = int(root_dir.find("registerSize").text)
self.size_vgroup = int(root_dir.find("sizeVirtualGroup").text)
def compile_p4_switch(parameters):
logging.info("Generate switch json...")
exe_cmd("%s/p4c_bm/__main__.py %s --json %s" % (parameters.p4c_bmv2, parameters.switch_p4, parameters.switch_json))
return
def compile_all(parameters):
compile_p4_switch(parameters)
return
def run(parameters):
compile_all(parameters)
logging.info("Warm up...")
exe_cmd("%s/targets/simple_switch/simple_switch > /dev/null 2>&1" % parameters.bmv2)
exe_cmd("mkdir -p %s/logs/switches" % parameters.project_dir)
logging.info("Start mininet...")
(switches, hosts, net) = config_mininet(parameters)
logging.info("Get chain informations...")
chains = {}
with open(parameters.vring_file, "r") as f:
for i in range(parameters.num_vnode):
line = f.readline().split()
chains[int(line[0])] = line[1:]
return (switches, hosts, net, chains)
def clean():
print "Clean environment..."
exe_cmd("ps -ef | grep nc_socket.py | grep -v grep | awk '{print $2}' | xargs kill -9")
exe_cmd("ps -ef | grep NetKVController.jar | grep -v grep | awk '{print $2}' | xargs kill -9")
exe_cmd("ps -ef | grep tcpdump | grep -v grep | awk '{print $2}' | xargs kill -9")
exe_cmd("ps -ef | grep dist_txn | grep -v grep | awk '{print $2}' | xargs kill -9")
exe_cmd("rm -f *.pcap")
exe_cmd("rm -f *.out *.pyc")
exe_cmd("rm -f *.log.txt *.log.*.txt")
exe_cmd("rm -f tmp_send_cmd_noreply.txt tmp_send_cmd.txt")
exe_cmd("killall lt-simple_switch >/dev/null 2>&1")
exe_cmd("mn -c >/dev/null 2>&1")
#exe_cmd("ps -ef | grep run.py | grep -v grep | awk '{print $2}' | xargs kill -9")
exe_cmd("killall -9 redis-server > /dev/null 2>&1")
exe_cmd("killall -9 redis_proxy > /dev/null 2>&1")
exe_cmd("killall -9 cr_backend > /dev/null 2>&1")
def init_flowtable(parameters):
role = [100, 101, 102]
for switch_id in range(3):
switch_ip = IP_PREFIX + str(switch_id + 1)
switch_port = THRIFT_PORT_OF_SWITCH[switch_id]
logging.info(switch_port)
init_cmd = ""
for i in range(parameters.num_kv):
key = ENTRY[1][i]
table_add_getAddress_cmd = "table_add get_my_address get_my_address_act " + str(key) + " => " + switch_ip + " " + str(role[switch_id])
table_add_findindex_cmd = "table_add find_index find_index_act " + str(key) + " => " + str(i)
register_write_value_cmd = "register_write value_reg " + str(i) + " " + str(i)
init_cmd = init_cmd + table_add_getAddress_cmd + "\n" + table_add_findindex_cmd + "\n" + register_write_value_cmd + "\n"
send_cmd_to_port_noreply(parameters, init_cmd, switch_port)
def send_traffic(parameters, switches, hosts, net):
read_host_id = 0
read_host = net.get('h%d' % (read_host_id + 1))
read_host.sendCmd("sh %s/client/set_arp.sh" % (parameters.project_dir))
print read_host.waitOutput()
read_host.sendCmd("python %s/client/receiver.py 10.0.0.1 > %s/logs/%d.tmp_read_receive.log & python %s/client/nc_socket.py read %d %s %d %d > %s/logs/tmp_read_send.log &"
% (parameters.project_dir, parameters.project_dir, parameters.size_vgroup, parameters.project_dir, parameters.num_vnode, parameters.vring_file, parameters.num_kv, parameters.size_vgroup, parameters.project_dir))
write_host_id = 1
write_host = net.get('h%d' % (write_host_id + 1))
write_host.sendCmd("sh %s/client/set_arp.sh" % (parameters.project_dir))
print write_host.waitOutput()
write_host.sendCmd("python %s/client/receiver.py 10.0.0.2 > %s/logs/%d.tmp_write_receive.log & python %s/client/nc_socket.py write %d %s %d %d > %s/logs/tmp_write_send.log &"
% (parameters.project_dir, parameters.project_dir, parameters.size_vgroup, parameters.project_dir, parameters.num_vnode, parameters.vring_file, parameters.num_kv, parameters.size_vgroup, parameters.project_dir))
return (read_host,write_host)
def stop_switch(fail_switch_id, switches, hosts, net):
fail_switch = net.get('s%d' % (fail_switch_id + 1))
fail_switch.stop()
return
###########################################
## run test in normal case
###########################################
def test_normal(parameters):
(switches, hosts, net, chains) = run(parameters)
init_flowtable(parameters)
logging.info("Run for 60 seconds...")
time.sleep(60)
net.stop()
clean()
return
###########################################
## run test in failure case
###########################################
def test_failure(parameters, fail_switch_id):
### install initial rules
(switches, hosts, net, chains) = run(parameters)
init_flowtable(parameters)
### start sending traffic on host0
logging.info("Sending traffic...")
(read_host, write_host) = send_traffic(parameters, switches, hosts, net)
### wait for 10 seconds
logging.info("Wait for 10 seconds...")
time.sleep(TENSECONDS)
### stop one switch
logging.info("Stop a switch...")
stop_switch(fail_switch_id, switches, hosts, net)
### wait for 10 seconds
logging.info("Assume the failure is discovered in 0.5s...")
time.sleep(0.5)
### update rules for fast failover
logging.info("Start failover...")
failover(parameters, fail_switch_id, chains)
### wait for 10 seconds
logging.info("Wait for 10 seconds...")
time.sleep(TENSECONDS)
### update rules for failure recovery
logging.info("Start failrecovering...")
failure_recovery(parameters, fail_switch_id, chains)
### wait for 10 seconds
logging.info("Wait for 10 seconds...")
time.sleep(TENSECONDS)
### clean environment
read_host.monitor()
write_host.monitor()
net.stop()
clean()
return
def usage():
print "Usage:"
print " To run test in normal case: python run_test.py normal"
print " To run test in failure case: python run_test.py failure"
return
if __name__ == "__main__":
exe_cmd("rm -f *.log.txt")
logging.basicConfig(level=logging.INFO)
if (len(sys.argv) != 2):
usage()
quit()
config_file = "config/config.xml"
parameters = Parameters(config_file)
if sys.argv[1] == "normal":
test_normal(parameters)
elif sys.argv[1] == "failure":
test_failure(parameters, 1)
else:
usage()
quit()
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetAppsAppResult',
'GetProductsProductResult',
]
@pulumi.output_type
class GetAppsAppResult(dict):
def __init__(__self__, *,
app_key: str,
app_name: str,
bundle_id: str,
create_time: str,
encoded_icon: str,
id: str,
industry_id: str,
package_name: str,
product_id: str,
type: str):
"""
:param str app_key: Application AppKey, which uniquely identifies an application when requested by the interface
:param str app_name: The Name of the App.
:param str bundle_id: iOS application ID. Required when creating an iOS app. **NOTE:** Either `bundle_id` or `package_name` must be set.
:param str create_time: The CreateTime of the App.
:param str encoded_icon: Base64 string of picture.
:param str id: The ID of the App.
:param str industry_id: The Industry ID of the app. For information about Industry and how to use it, MHUB[Industry](https://help.aliyun.com/document_detail/201638.html).
:param str package_name: Android App package name. **NOTE:** Either `bundle_id` or `package_name` must be set.
:param str product_id: The ID of the Product.
:param str type: The type of the App. Valid values: `Android` and `iOS`.
"""
pulumi.set(__self__, "app_key", app_key)
pulumi.set(__self__, "app_name", app_name)
pulumi.set(__self__, "bundle_id", bundle_id)
pulumi.set(__self__, "create_time", create_time)
pulumi.set(__self__, "encoded_icon", encoded_icon)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "industry_id", industry_id)
pulumi.set(__self__, "package_name", package_name)
pulumi.set(__self__, "product_id", product_id)
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="appKey")
def app_key(self) -> str:
"""
Application AppKey, which uniquely identifies an application when requested by the interface
"""
return pulumi.get(self, "app_key")
@property
@pulumi.getter(name="appName")
def app_name(self) -> str:
"""
The Name of the App.
"""
return pulumi.get(self, "app_name")
@property
@pulumi.getter(name="bundleId")
def bundle_id(self) -> str:
"""
iOS application ID. Required when creating an iOS app. **NOTE:** Either `bundle_id` or `package_name` must be set.
"""
return pulumi.get(self, "bundle_id")
@property
@pulumi.getter(name="createTime")
def create_time(self) -> str:
"""
The CreateTime of the App.
"""
return pulumi.get(self, "create_time")
@property
@pulumi.getter(name="encodedIcon")
def encoded_icon(self) -> str:
"""
Base64 string of picture.
"""
return pulumi.get(self, "encoded_icon")
@property
@pulumi.getter
def id(self) -> str:
"""
The ID of the App.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="industryId")
def industry_id(self) -> str:
"""
The Industry ID of the app. For information about Industry and how to use it, MHUB[Industry](https://help.aliyun.com/document_detail/201638.html).
"""
return pulumi.get(self, "industry_id")
@property
@pulumi.getter(name="packageName")
def package_name(self) -> str:
"""
Android App package name. **NOTE:** Either `bundle_id` or `package_name` must be set.
"""
return pulumi.get(self, "package_name")
@property
@pulumi.getter(name="productId")
def product_id(self) -> str:
"""
The ID of the Product.
"""
return pulumi.get(self, "product_id")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the App. Valid values: `Android` and `iOS`.
"""
return pulumi.get(self, "type")
@pulumi.output_type
class GetProductsProductResult(dict):
def __init__(__self__, *,
id: str,
product_id: str,
product_name: str):
"""
:param str id: The ID of the Product.
:param str product_id: The ID of the Product.
:param str product_name: The name of the Product.
"""
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "product_id", product_id)
pulumi.set(__self__, "product_name", product_name)
@property
@pulumi.getter
def id(self) -> str:
"""
The ID of the Product.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="productId")
def product_id(self) -> str:
"""
The ID of the Product.
"""
return pulumi.get(self, "product_id")
@property
@pulumi.getter(name="productName")
def product_name(self) -> str:
"""
The name of the Product.
"""
return pulumi.get(self, "product_name")
|
# coding=utf-8
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Computes the prior, posterior, and likelihood."""
from typing import Dict
from group_testing import utils
import jax
import jax.numpy as np
from jax.scipy import special
@jax.jit
def log_likelihood(particles: np.ndarray, test_results: np.ndarray,
groups: np.ndarray, log_specificity: np.ndarray,
log_1msensitivity: np.ndarray) -> float:
"""Computes individual (parallel) log_likelihood of k_groups test results.
Args:
particles: np.ndarray<bool>[n_particles, n_patients]. Each one is a possible
scenario of a disease status of n patients.
test_results: np.ndarray<bool>[n_groups] the results given by the wet lab for
each of the tested groups.
groups: np.ndarray<bool>[num_groups, num_patients] the definition of the
group that were tested.
log_specificity: np.ndarray. Depending on the configuration, it can be an
array of size one or more if we have different sensitivities per group
size.
log_1msensitivity: np.ndarray. Depending on the configuration, it can be an
array of size one or more if we have different specificities per group
size.
Returns:
The log likelihood of the particles given the test results.
"""
positive_in_groups = np.dot(groups, np.transpose(particles)) > 0
group_sizes = np.sum(groups, axis=1)
log_specificity = utils.select_from_sizes(log_specificity, group_sizes)
log_1msensitivity = utils.select_from_sizes(log_1msensitivity, group_sizes)
logit_specificity = special.logit(np.exp(log_specificity))
logit_sensitivity = -special.logit(np.exp(log_1msensitivity))
gamma = log_1msensitivity - log_specificity
add_logits = logit_specificity + logit_sensitivity
ll = np.sum(
positive_in_groups * (gamma + test_results * add_logits)[:, np.newaxis],
axis=0)
return ll + np.sum(log_specificity - test_results * logit_specificity)
@jax.jit
def log_prior(particles: np.ndarray,
base_infection_rate: np.ndarray) -> np.ndarray:
"""Computes log of prior probability of state using infection rate."""
# here base_infection can be either a single number per patient or an array
if np.size(base_infection_rate) == 1: # only one rate
return (np.sum(particles, axis=-1) * special.logit(base_infection_rate) +
particles.shape[0] * np.log(1 - base_infection_rate))
elif base_infection_rate.shape[0] == particles.shape[-1]: # prior per patient
return np.sum(
particles * special.logit(base_infection_rate)[np.newaxis, :] +
np.log(1 - base_infection_rate)[np.newaxis, :],
axis=-1)
else:
raise ValueError("Vector of prior probabilities is not of correct size")
@jax.jit
def log_probability(particles: np.ndarray,
test_results: np.ndarray,
groups: np.ndarray,
log_prior_specificity: np.ndarray,
log_prior_1msensitivity: np.ndarray,
prior_infection_rate: np.ndarray):
"""Given past tests and prior, outputs unnormalized log-probabilities.
Args:
particles: np.ndarray, computing tempered log_posterior for each of these.
test_results: np.ndarray, probability depends on recorded test results
groups: np.ndarray ... tests above defined using these groups.
log_prior_specificity: np.ndarray, specificity expected from test device
log_prior_1msensitivity: np.ndarray, 1-sensitivity expected from test device
prior_infection_rate: np.ndarray, prior on infection.
Returns:
a vector of log probabilities
"""
log_prob = np.zeros((particles.shape[0],))
if test_results is not None:
# if sampling from scratch, include prior and rescale temperature.
log_prob += log_likelihood(particles, test_results, groups,
log_prior_specificity, log_prior_1msensitivity)
if prior_infection_rate is not None:
log_prob += log_prior(particles, prior_infection_rate)
return log_prob
def tempered_logpos_logbase(particles: np.ndarray,
log_posterior_params: Dict[str, np.ndarray],
log_base_measure_params: Dict[str, np.ndarray],
temperature: float):
"""Computes a tempered log posterior and adds a base measure."""
lp_p = log_probability(particles, **log_posterior_params)
lp_b = log_probability(particles, **log_base_measure_params)
return temperature * lp_p + lp_b
|
n = int(input())
wait_time = list(map(int, input().split()))
wait_time.sort(reverse=True)
sum = 0
for i, t in enumerate(wait_time):
sum += (i + 1) * t
print(sum)
|
def tenbase(s, alpha, w, h):
l = []
while s != "":
l.append(s[:(w+1) * h - 1])
s = s[(w+1) * h:]
l = l[::-1]
x = 0
for i in range(len(l) - 1, -1, -1):
x += alpha.index(l[i]) * 20**i
return x
def maybase(x, alpha):
l = []
if x == 0:
l.append(alpha[0])
while x != 0:
a = x // 20
b = x % 20
l.append(alpha[b])
x = a
return "\n".join(l[::-1])
w, h = [int(i) for i in input().split()]
line = []
for i in range(h):
line.append(input())
alpha = []
for i in range(len(line[0]) // w):
s = ""
f = True
for j in range(w):
if f:
f = False
else:
s += "\n"
s += line[j][i * w: i * w + w]
alpha.append(s)
s1 = "\n".join([input() for i in range(int(input()))])
s2 = "\n".join([input() for i in range(int(input()))])
x1 = tenbase(s1, alpha, w ,h)
x2 = tenbase(s2, alpha, w, h)
o = input()
if o == "+": print(maybase(x1 + x2, alpha))
elif o == "-": print(maybase(x1 - x2, alpha))
elif o == "*": print(maybase(x1 * x2, alpha))
elif o == "/": print(maybase(x1 // x2, alpha))
|
print('===== DESAFIO 006 =====')
x = int(input('Digite um numero: '))
print(f'Esse é seu dobro {x*2}\n Seu triplo {x*3}\n Sua Raiz Quadrada {x**(1/2)}')
|
from random import randint
from enums import *
from stix_generator import *
from util import Util as u
def make_cybox_object_list(objects):
"""
Makes an object list out of cybox objects to put in a cybox container
"""
cybox_objects = {}
for i in range(len(objects)):
cybox_objects[str(i)] = objects[i]
return cybox_objects
def make_cybox_container(objects):
"""
makes cybox container
"""
return {
"spec_version": "3.0",
"objects": make_cybox_object_list(objects)
}
def make_cybox_object(ctype, desc = "", extended_properties = {}):
"""
makes a cybox object (that goes in cybox list then container)
"""
cybox_object = {}
cybox_object["type"] = ctype
cybox_object['description'] = desc
cybox_object['extended_properties'] = extended_properties
return cybox_object
def make_extended_properties(extensions = []):
"""
Makes an extended property for a cybox object
"""
props = {}
for ext in extensions:
props.update(ext)
return props
def make_extension(name, content):
"""
Makes extensions for cybox objects
"""
return {name: content}
def make_type(ttype):
"""
Makes an object with custom type
"""
return {"type": ttype}
def make_file_object(file_name = "", description = "", hashes = {}, size = 0, file_name_enc = "",
file_name_bin = "", magic_number = -1, mime_type = "", created = u.getcurrenttime(),
modified = u.getcurrenttime(), accessed = u.getcurrenttime(),
parent_directory_ref = "", is_encrypted = False, encryption_algorithm = "",
decryption_key = "", contains_refs = "", file_content_ref = "", extended_properties = {}):
"""
make an object of type file
"""
cybox_object = make_cybox_object('file', description, extended_properties)
cybox_object['file_name'] = file_name
cybox_object['hashes'] = hashes
cybox_object['size'] = size
cybox_object['file_name_enc'] = file_name_enc
cybox_object['file_name_bin'] = file_name_bin
cybox_object['magic_number'] = magic_number
cybox_object['mime_type'] = mime_type
cybox_object['created'] = created
cybox_object['modified'] = modified
cybox_object['accessed'] = accessed
cybox_object['parent_directory_ref'] = parent_directory_ref
cybox_object['is_encrypted'] = is_encrypted
cybox_object['encryption_algorithm'] = encryption_algorithm
cybox_object['decryption_key'] = decryption_key
cybox_object['contains_refs'] = contains_refs
cybox_object['file_content_ref'] = file_content_ref
return cybox_object
def make_ntfs_file_ext(sid = "", alternate_data_streams = []):
"""
extention to make_file, makes ntfs file extensions
"""
content = {}
content['sid'] = sid
content['alternate_data_streams'] = alternate_data_streams
return make_extension("ntfs-ext", content)
def make_raster_img_file_ext(image_height = -1, image_width = -1, bits_per_pixel = -1, image_compression_algorithm = "", exif_tags = {}):
"""
extention to make_file, makes raster image file extensions
"""
content = {}
content['image_height'] = image_height
content['image_width'] = image_width
content['bits_per_pixel'] = bits_per_pixel
content['image_compression_algorithm'] = image_compression_algorithm
content['exif_tags'] = exif_tags
return make_extension("ntfs-ext", content)
#TODO file extensions
def make_alternate_data_stream_type(name, size = -1, hashes = ""):
"""
makes alternate_data_stream objects for ntfs-ext
"""
ttype = make_type("alternate_data_streams")
ttype['name'] = name
ttype['size'] = size
ttype['hashes'] = hashes
return ttype
def make_directory_object(path, description = "", path_enc = "", path_bin = "", created = u.getcurrenttime(addition = -1000), modified = u.getcurrenttime(), accessed = u.getcurrenttime(), contains_refs = [], extended_properties = {}):
"""
makes directory object
"""
cybox_object = make_cybox_object('directory', description, extended_properties)
cybox_object['path'] = path
cybox_object['path_enc'] = path_enc
cybox_object['path_bin'] = path_bin
cybox_object['created'] = created
cybox_object['modified'] = modified
cybox_object['accessed'] = accessed
cybox_object['contains_refs'] = contains_refs
return cybox_object
def make_win_reg_key_object(key, description = "", values = [], modified = u.getcurrenttime(), created_by_ref = "", number_of_subkeys = -1, extended_properties = {}):
"""
makes windows-registry-key object
"""
cybox_object = make_cybox_object('windows-registry-key', description, extended_properties)
cybox_object['key'] = key
cybox_object['description'] = description
cybox_object['values'] = values
cybox_object['modified'] = modified
cybox_object['created_by_ref'] = created_by_ref
cybox_object['number_of_subkeys'] = number_of_subkeys
return cybox_object
#TODO registry key value, windows-registry-data-type-cv
def make_mutex_object(name, description = "", extended_properties = {}):
"""
makes mutex object
"""
cybox_object = make_cybox_object('mutex', description, extended_properties)
cybox_object['name'] = name
return cybox_object
def make_x509_cert_object(description = "", is_self_signed = False, hashes = {}, version = "1.0", serial_number = "", signature_algorithm = "", issuer = "", validity_not_before = u.getcurrenttime(), validity_not_after = u.getcurrenttime(addition = 100000), subject = "", subject_public_key_modulus = "", subject_public_key_exponent = -1, x509_v3_extensions = "", extended_properties = {}):
"""
makes x509 certificate object
"""
cybox_object = make_cybox_object('mutex', description, extended_properties)
cybox_object['is_self_signed'] = is_self_signed
cybox_object['hashes'] = hashes
cybox_object['version'] = version
cybox_object['serial_number'] = serial_number
cybox_object['signature_algorithm'] = signature_algorithm
cybox_object['issuer'] = issuer
cybox_object['validity_not_before'] = validity_not_before
cybox_object['validity_not_after'] = validity_not_after
cybox_object['subject'] = subject
cybox_object['subject_public_key_modulus'] = subject_public_key_modulus
cybox_object['subject_public_key_exponent'] = subject_public_key_exponent
cybox_object['x509_v3_extensions'] = x509_v3_extensions
return cybox_object
#TODO x509 v3 ext type
def make_software_object(name, description = "", language = "", vendor = "", version = "", swid = "", extended_properties = {}):
"""
makes a software object
"""
cybox_object = make_cybox_object('software', description, extended_properties)
cybox_object['name'] = name
cybox_object['language'] = language
cybox_object['vendor'] = vendor
cybox_object['version'] = version
cybox_object['swid'] = swid
return cybox_object
def make_artifact_object(description = "", mime_type = "", payload = "", url = "", hashes={},extended_properties = {}):
"""
makes an artitifact object
"""
cybox_object = make_cybox_object('artifact', description, extended_properties)
cybox_object['mime_type'] = mime_type
cybox_object['payload'] = payload
cybox_object['url'] = url
cybox_object['hashes'] = hashes
return cybox_object
def make_process_object(description = "", is_hidden = False, pid = -1, name = "", created = u.getcurrenttime(), cwd = "", arguments = [], environment_variables = {}, opened_connection_refs = [], creator_user_ref = "", binary_ref = "", parent_ref = "", child_refs = [], extended_properties = {}):
"""
makes a process object
"""
cybox_object = make_cybox_object('process', description, extended_properties)
cybox_object['is_hidden'] = is_hidden
cybox_object['pid'] = pid
cybox_object['name'] = name
cybox_object['created'] = created
cybox_object['cwd'] = cwd
cybox_object['arguments'] = arguments
cybox_object['environment_variables'] = environment_variables
cybox_object['opened_connection_refs'] = opened_connection_refs
cybox_object['creator_user_ref'] = creator_user_ref
cybox_object['binary_ref'] = binary_ref
cybox_object['parent_ref'] = parent_ref
cybox_object['child_refs'] = child_refs
return cybox_object
#TODO windows process ext, windows service + vocab,
def make_user_account_object(user_id, description = "", account_login = "", account_type = "", display_name = "", is_service_account = False, is_privileged = False, can_escalate_privs = False, is_disabled = False, account_created = u.getcurrenttime(addition = -10000), account_expires = u.getcurrenttime(addition = 10000), password_last_changed = u.getcurrenttime(), account_first_login = u.getcurrenttime(), account_last_login = u.getcurrenttime(), extended_properties = {}):
"""
makes a user account object
"""
cybox_object = make_cybox_object('mutex', description, extended_properties)
cybox_object['user_id'] = user_id
cybox_object['account_login'] = account_login
cybox_object['account_type'] = account_type
cybox_object['display_name'] = display_name
cybox_object['is_service_account'] = is_service_account
cybox_object['is_privileged'] = is_privileged
cybox_object['can_escalate_privs'] = can_escalate_privs
cybox_object['is_disabled'] = is_disabled
cybox_object['account_created'] = account_created
cybox_object['account_expires'] = account_expires
cybox_object['password_last_changed'] = password_last_changed
cybox_object['account_first_login'] = account_first_login
cybox_object['account_last_login'] = account_last_login
return cybox_object
#TODO vocab, UNIX ext,
def make_ip4v_addr_object(value, description = "", resolves_to_refs = [], belongs_to_refs = [], extended_properties = {}):
"""
makes ipv4 object
"""
cybox_object = make_cybox_object("ipv4-addr", description, extended_properties)
cybox_object['value'] = value
cybox_object['resolves_to_refs'] = resolves_to_refs
cybox_object['belongs_to_refs'] = belongs_to_refs
return cybox_object
def make_ip6v_addr_object(value, description = "", resolves_to_refs = [], belongs_to_refs = [], extended_properties = {}):
"""
makes ipv6 object
"""
cybox_object = make_cybox_object("ipv6-addr", description, extended_properties)
cybox_object['value'] = value
cybox_object['resolves_to_refs'] = resolves_to_refs
cybox_object['belongs_to_refs'] = belongs_to_refs
return cybox_object
def make_mac_addr_object(value, description = "", extended_properties = {}):
"""
make mac address object
"""
cybox_object = make_cybox_object("mac-addr", description, extended_properties)
cybox_object['value'] = value
return cybox_object
def make_email_addr_object(value, description = "", display_name = "", belongs_to_refs = [], extended_properties = {}):
"""
makes email object
"""
cybox_object = make_cybox_object("email-addr", description, extended_properties)
cybox_object['value'] = value
cybox_object['display_name'] = display_name
cybox_object['belongs_to_refs'] = belongs_to_refs
return cybox_object
def make_url_object(value, description = "", extended_properties = {}):
"""
makes url object
"""
cybox_object = make_cybox_object("url", description, extended_properties)
cybox_object['value'] = value
return cybox_object
def make_domain_name_object(value, description = "", resolves_to_refs = [], extended_properties = {}):
"""
makes domain object
"""
cybox_object = make_cybox_object("domain-name", description, extended_properties)
cybox_object['value'] = value
cybox_object['resolves_to_refs'] = resolves_to_refs
return cybox_object
def make_as_object(number, description = "", name = "", rir = "", extended_properties = {}):
"""
makes autonomous-system object
"""
cybox_object = make_cybox_object("autonomous-system", description, extended_properties)
cybox_object['number'] = number
cybox_object['name'] = name
cybox_object['rir'] = rir
return cybox_object
def make_net_traffic_object(value, description = "", start = u.getcurrenttime(), end = u.getcurrenttime(addition = 10000), is_active = True, src_ref = "", dst_ref = "", src_port = -1, dst_port = -1, protocols = [], src_byte_count = -1, dst_byte_count = -1, src_packets = -1, dst_packets = -1, ipfix = {}, src_payload_ref = "", dst_payload_ref = "", encapsulates_refs = "", encapsulated_by_ref = "", extended_properties = {}):
"""
makes new traffic object
"""
cybox_object = make_cybox_object("network-traffic", description, extended_properties)
cybox_object['value'] = value
cybox_object['end'] = end
cybox_object['is_active'] = is_active
cybox_object['src_ref'] = src_ref
cybox_object['dst_ref'] = dst_ref
cybox_object['src_port'] = src_port
cybox_object['dst_port'] = dst_port
cybox_object['protocols'] = protocols
cybox_object['src_byte_count'] = src_byte_count
cybox_object['dst_byte_count'] = dst_byte_count
cybox_object['src_packets'] = src_packets
cybox_object['dst_packets'] = dst_packets
cybox_object['ipfix'] = ipfix
cybox_object['src_payload_ref'] = src_payload_ref
cybox_object['dst_payload_ref'] = dst_payload_ref
cybox_object['encapsulates_refs'] = encapsulates_refs
cybox_object['encapsulated_by_ref'] = encapsulated_by_ref
return cybox_object
#TODO HTTP Extension, TCP Extension, ICMP ext, net socket ext,
def make_email_msg__object(is_mulyipart, description = "", date = u.getcurrenttime(addition = -1000), content_type = "", from_ref = "", sender_ref = "", to_ref = "", cc_refs = "", bcc_refs = "", subject = "", received_lines = [], additional_header_fields = {}, body = "", body_multipart = False, raw_email_ref = "", extended_properties = {}):
"""
makes email message object
"""
cybox_object = make_cybox_object("email-message", description, extended_properties)
cybox_object['is_mulyipart'] = is_mulyipart
cybox_object['date'] = date
cybox_object['content_type'] = content_type
cybox_object['from_ref'] = from_ref
cybox_object['sender_ref'] = sender_ref
cybox_object['o_ref'] = to_ref
cybox_object['cc_refs'] = cc_refs
cybox_object['bcc_refs'] = bcc_refs
cybox_object['subject'] = subject
cybox_object['received_lines'] = received_lines
cybox_object['additional_header_fields'] = additional_header_fields
cybox_object['body'] = body
cybox_object['body_multipart'] = body_multipart
cybox_object['raw_email_ref'] = raw_email_ref
return cybox_object
def generate_pattern_eq_ipv4_list(ips):
"""
makes makes a pattern that detects a list of patterns
"""
pattern = ""
for ip in ips:
pattern += generate_pattern_eq_ipv4(ip)
if ip is not ips[len(ips) - 1]:
pattern += " OR "
return pattern
def generate_pattern_eq_ipv4(value):
"""
makes a pattern to check an ip address
"""
return "ipv4-addr:value = '" + value + "'"
def generate_dummy_ip():
"""
makes a random ip address as a string
"""
return str(randint(0,255)) + "." + str(randint(0,255)) + "." + str(randint(0,255)) + "." + str(randint(0,255))
def generate_random_ip_list(count = randint(10, 30)):
"""
makes a list of random ip addresses
"""
ips = []
for i in range(0, count):
ips.append(generate_dummy_ip())
return ips
def generate_tardis_cybox():
"""
A test container maker function
"""
cy = make_file_object("cybox_generator.py", "cybox generating python script", size = 4021)
co = [
make_directory_object('c://Users/TheDoctor/'),
make_file_object('tardis.exe', hashes = {"md5":"B4D33B0C7306351B9ED96578465C5579"}, parent_directory_ref = "0", extended_properties = make_extended_properties([make_ntfs_file_ext("as", [make_alternate_data_stream_type("second.stream", 25543)])]))
]
for o in co:
o = u.clean_dict(o)
cc = make_cybox_container(co)
return cc
#TODO verify that this pattern generation system validates
def finish_pattern(pattern):
"""
pattern functions only make the inside stuff, so this finished
"""
return '[' + pattern + ']'
def main():
print finish_pattern(generate_pattern_eq_ipv4_list(generate_random_ip_list(10)))
if __name__ == "__main__":
main()
|
import glob
import os
import shutil
import time
import random
def sleep_time():
time.sleep(2)
def git_automation(repository_name,local_file_path,script_name,git_script_path):
print('E:\\Github\\'+repository_name)
os.chdir(r'E:\\Github\\'+repository_name)
sleep_time()
os.system('git checkout main')
print("checkedout to main branch")
sleep_time()
os.system('git pull')
sleep_time()
print('pull completed for the main branch')
print('copying the new file to repository')
print(local_file_path)
shutil.copy(local_file_path, git_script_path)
sleep_time()
print('creating new branch')
branch_name = repository_name+'_'+script_name +'_'+str(random.randint(0,999999))
os.system('git checkout -b '+str(branch_name))
sleep_time()
print('checking the status')
os.system('git status')
sleep_time()
print('Adding the files to the branch')
os.system('git add .')
sleep_time()
commit_statement=input('Please enter the commit statement: ')
os.system('git commit -m '+ commit_statement)
sleep_time()
os.system('git push --set-upstream origin '+str(branch_name))
input('waiting')
os.system('git checkout main')
print("checkedout to main branch")
sleep_time()
os.system('git branch')
sleep_time()
print( "Deleting the branch")
os.system('git branch -D '+str(branch_name))
sleep_time()
os.system('git branch')
os.system('git pull')
print(" Everything is done ")
def script_names_from_local_paths(files):
script_names=[]
for i in files:
k = i.split('\\')
script_names.append(k[-1])
return script_names
git_files= glob.glob(r"E:\Github\*/*/*.py")
git_files1=glob.glob(r"E:\Github\*/*.py")
git_files.extend(git_files1)
git_files2=glob.glob(r"E:\Github\*/*.ipynb")
git_files.extend(git_files2)
git_files_without_path=script_names_from_local_paths(git_files)
l = len(git_files)
print(git_files)
local_files=glob.glob(r"E:\Manikanta\*/*.py")
local_files_without_path=script_names_from_local_paths(local_files)
print(local_files)
files_to_be_checked= []
for i in range(l):
print(git_files_without_path[i])
if git_files_without_path[i] in local_files_without_path:
j = local_files_without_path.index(git_files_without_path[i])
compare_output = os.system('FC '+git_files[i] +' '+local_files[j] )
print(git_files_without_path[i], compare_output)
#FC File1.txt File2.txt >NUL && Echo Same || Echo Different or error
# -1 Invalid syntax(e.g.only one file passed)
# 0 The files are identical.
#1 The files are different.
#2 Cannot find at least one of the files.
if compare_output >= 1:
repositorty_name = git_files[i].split('\\')
time.sleep(5)
print(repositorty_name)
git_automation(repositorty_name[2],local_files[j],git_files_without_path[i],git_files[i])
time.sleep(10)
|
'''
problem--
Given a time in 12-hour AM/PM format, convert it to military (24-hour) time.
Note: Midnight is 12:00:00AM on a 12-hour clock, and 00:00:00 on a 24-hour clock. Noon is 12:00:00PM on a 12-hour clock, and 12:00:00 on a 24-hour clock.
Function Description--
Complete the timeConversion function in the editor below. It should return a new string representing the input time in 24 hour format.
timeConversion has the following parameter(s):
s: a string representing time in 12 hour format
Input Format--
A single string containing a time in 12-hour clock format (i.e.:hh:mm:ssAM or hh:mm:ssPM),where 00<=hh<=12 and 00<=mm,ss<=59.
Constraints--
All input times are valid
Output Format--
Convert and print the given time in 24-hour format, where 00<=hh<=23.
Sample Input 0--
07:05:45PM
Sample Output 0--
19:05:45
'''
#code here
#!/bin/python3
import os
import sys
def timeConversion(s):
for i in range(2):
x=s[0]+s[1]
if s[8]+s[9]=='PM' and int(x)!=12:
x=str(int(x)+12)
if s[8]+s[9]=='AM' and int(x)==12:
x='00'
for i in range(2,8):
x=x+s[i]
return x
if __name__ == '__main__':
f = open(os.environ['OUTPUT_PATH'], 'w')
s = input()
result = timeConversion(s)
f.write(result + '\n')
f.close()
|
import unittest
from pyval import evaluate
class TestEvaluate(unittest.TestCase):
def test_evaluate(self):
self.assertEqual(evaluate("(2 + 3) * 5"), 25)
self.assertEqual(evaluate("2 + 3 * 5"), 17)
self.assertEqual(evaluate("(4 - 6) * ((4 - 2) * 2)"), -8)
self.assertEqual(evaluate("(4 ** (2 + 1)) + (4 ** (2 + 1))"), 128)
self.assertEqual(evaluate("10.5 - 1.4"), 9.1)
if __name__ == '__main__':
unittest.main()
|
"""
__init__.py
"""
__author__ = 'Gavin M. Roy'
__email__ = 'gmr@myyearbook.com'
__since__ = '2012-06-05'
__version__ = '0.3.2'
import api
import apps
import cli
import crashes
import crashlog
|
"""
Pipeline pressure containment according to DNVGL-ST-F101 (2017-12)
refs:
Notes:
to run:
python DnvStF101_pressure_containment.py
"""
from pdover2t.dnvstf101 import *
from pdover2t.pipe import pipe_Do_Di_WT
# Gas Export (0-0.3 km) location class II, 60°C max. Temp. 340-370m depth
D_i = 0.6172 # (m) pipe internal diameter
t_nom = 0.0242 # (m) pipe wall thickness
t_corr = 0.0005 # (m) corrosion allowance
t_fab = 0.001 # (m) thickness negative fabrication tolerance
SMYS = 450.e6 # (Pa) pipe steel SMYS
f_ytemp = 6.e6 # (Pa) steel yield strength temperature de-rating
SMTS = 535.e6 # (Pa) pipe steel SMTS
f_utemp = 6.e6 # (Pa) steel ultimate strength temperature de-rating
α_U = 1.0 # material strength factor
#α_Ahoop = 1.0 # anisoptropy factor, hoop
γ_m = 1.15 # material resistance factor
p_d = 240.e5 # (Pa) design pressure at reference elevation Z_ref
γ_inc = 1.10 # incidental to design pressure ratio
h_ref = 30.0 # (m) reference elevation for pressure (LAT=0m)
ρ_cont = 275. # (kg/m3) density of pipeline contents
ρ_t = 1027. # test fluid density
MSL = 340. # (m) seawater depth (wrt MSL)
LAT = MSL - 1.1 # adjust tide
h_l = -LAT # adjust h_l for tide and
ρ_seawater = 1027. # (kg/m3) density of seawater
α_spt = 1.05 # DNVGL-ST-F101 (2017-12) p94
α_mpt = 1.251 # p94
α_spt = 1.05 # p94
γ_SCPC = 1.308 # safety class resistance factor for pressure containment
D_o, _, _ = pipe_Do_Di_WT(Di=D_i, WT=t_nom)
f_y = char_strength(SMYS, α_U, f_ytemp=f_ytemp)
f_u = char_strength(SMTS, α_U, f_ytemp=f_utemp)
p_inc = p_incid_ref(p_d, γ_inc)
p_li = p_incid_loc(p_d, ρ_cont, h_l, h_ref, γ_inc)
p_t = p_system_test_ref(p_d, γ_inc, α_spt)
p_e = p_ext(h_l, ρ_seawater)
p_t = p_system_test_ref(p_d, γ_inc, α_spt)
p_lt = p_test_loc(p_t, ρ_t, h_l, h_ref)
t_min_mill_test = char_WT(t_nom, t_fab, t_corr=0.0)
p_mpt = p_mill_test(D_o, t_min_mill_test, SMYS, SMTS, α_U, α_mpt, k=1.15)
t_1 = char_WT(t_nom, t_fab, t_corr)
p_b = p_contain_resist(D_o, t_1, f_y, f_u)
p_cont_res_uty = p_contain_resist_uty(p_li, p_e, p_b, γ_m, γ_SCPC)
p_lt_uty = p_test_loc_uty(α_spt, p_lt, p_li, p_e)
p_mpt_uty = p_mill_test_uty(p_li, p_e, p_mpt)
p_cont_uty = p_contain_uty(p_cont_res_uty, p_lt_uty, p_mpt_uty)
# repeat 3 cases as a single array ---------------------
# t_nom = np.array([0.0242, 0.0242, 0.0214])
# D_o, _, _ = pipe_Do_Di_WT(Di=D_i, WT=t_nom)
# t_1 = char_WT(t_nom, t_fab, t_corr)
# MSL = np.array([340., 340., 250.])
# LAT = MSL - 1.1
# h_l = -LAT
# f_utemp = f_ytemp = np.array([6.e6, 6.e6, 0.0])
# γ_SCPC = np.array([1.308, 1.138, 1.138])
# arr = pressure_containment_all(p_d,
# D_o, t_nom, t_corr, t_fab,
# h_l, h_ref, ρ_cont, ρ_seawater, ρ_t,
# γ_inc=γ_inc,
# γ_m=γ_m, γ_SCPC=γ_SCPC, α_U=α_U,
# α_spt=α_spt, α_mpt=α_mpt,
# SMYS=SMYS, SMTS=SMTS,
# f_ytemp=f_ytemp, f_utemp=f_utemp)
|
import qutip as qt
import numpy as np
import scipy
from scipy import constants
from scipy.linalg import expm, sinm, cosm
import matplotlib.pyplot as plt
pi = np.pi
e = constants.e
h = constants.h
hbar = constants.hbar
ep0 = constants.epsilon_0
mu0 = constants.mu_0
Phi0 = h/(2*e)
kb = constants.Boltzmann
def to_dBm(P):
return 10*np.log10(P/1e-3)
def to_Watt(dBm):
return 1e-3*10**(dBm/10)
def Anbe(R):
# input : R in ohm
# output : Ic in A
return 1.764*pi*kb*1.2/(2*e*R)
def Ic_to_R(Ic):
# input : Ic in A
# output : R in ohm
return 1.764*pi*kb*1.2/(2*e*Ic)
def EJ_to_Ic(EJ):
# input : EJ in Hz
# output : Ic in A
return (EJ) * (4*pi*e)
def Ic_to_EJ(Ic):
# input : EJ in Hz
# output : Ic in A
return (Ic) / (4*pi*e)
def EJ_to_LJ(EJ):
# input : EJ in Hz
# output : LJ in H
IC = EJ_to_Ic(EJ)
return Phi0/(2*pi*IC)
def Ic_to_LJ(IC):
# input : IC in A
# output : LJ in H
return Phi0/(2*pi*IC)
def LJ_to_Ic(LJ):
# input : LJ in H
# output : IC in A
return Phi0/(2*pi*LJ)
def Ec_to_C(EC):
# input : EC in Hz
# output : C in F
return (e**2)/(2*h*EC)
def C_to_Ec(C):
# input : C in F
# output : EC in Hz
return (e**2)/(2*h*C)
def calc_EJ_from_R(R):
# input : R in ohm
# output : EJ in Hz
ic = Anbe(R)
ej = ic/(4*pi*e)
return ej
def calc_c_to_g2(Cr, Cq, Cg, fr, fq):
# coupling const between resonator and Transmon
# output : g in Hz
A = 4 * pi * 1 * np.sqrt( (Cr * Cq) / (fr * fq * 4 * pi ** 2) )
return Cg/A
def calc_Cg2(Cr, Cq, fr, fq, g_target):
# g_target in Hz
# return target coupling capacitance between Q & R
return 4 * pi * g_target * np.sqrt( (Cr * Cq) / (fr * fq * 4 * pi ** 2) )
def calc_g_direct(c1, c2, cg):
# coupling const between Transmons
return (4*e**2)*cg/(c1*c2)/h
def H_Transmon(Ec, Ej, N, ng):
"""
Return the charge qubit hamiltonian as a Qobj instance.
Ej : josephson energy in Hz
Ec : charging energy in Hz
N : maximum cooper pair deference
ng : voltage bias for island
"""
Ec = Ec*1e9
Ej = Ej*1e9
m = np.diag(4 * Ec * (np.arange(-N,N+1)-ng)**2) + 0.5 * Ej * (np.diag(-np.ones(2*N), 1) + np.diag(-np.ones(2*N), -1))
return qt.Qobj(m)
def Transmon_ene_levels(EJ, EC, N, PLOT=1):
# Ej : josephson energy in Hz
# Ec : charging energy in Hz
# N : maximum cooper pair deference
if PLOT==1:
ng = 0
enes = H_Transmon(EC, EJ, N, ng).eigenenergies()
elif PLOT==0:
ng_vec = np.linspace(-4, 4, 100)
energies = np.array([H_Transmon(EC, EJ, N, ng).eigenenergies() for ng in ng_vec])
enes = energies[49]
if PLOT==0:
fig, axes = plt.subplots(1,2, figsize=(16,6))
for n in range(len(energies[0,:])):
ene = energies[:,n] - energies[:,0]
axes[0].plot(ng_vec, ene)
axes[0].plot(ng_vec, [9.8 for _ in range(len(ng_vec))], linestyle='dashed', color='red')
axes[0].set_ylim(0.1, 50)
axes[0].set_xlabel(r'$n_g$', fontsize=18)
axes[0].set_ylabel(r'$E_n$', fontsize=18)
axes[0].grid()
for n in range(len(energies[0,:])):
axes[1].plot(ng_vec, (energies[:,n]-energies[:,0])/(energies[:,1]-energies[:,0]))
axes[1].set_ylim(-0.1, 3)
axes[1].set_xlabel(r'$n_g$', fontsize=18)
axes[1].set_ylabel(r'$(E_n-E_0)/(E_1-E_0)$', fontsize=18)
axes[1].grid()
return [enes[i]-enes[0] for i in range(len(enes))]
|
r"""Reference.
http://cocodataset.org/#detection-eval
https://arxiv.org/pdf/1502.05082.pdf
https://github.com/rafaelpadilla/Object-Detection-Metrics/issues/22
"""
import collections
from .base import EvaluationMetric
from .records import Records
class AverageRecallBBox2D(EvaluationMetric):
"""2D Bounding Box Average Recall metrics.
Attributes:
label_records (dict): save prediction records for each label
gt_bboxes_count (dict): ground truth box count for each label
iou_threshold (float): iou threshold
max_detections (int): max detections per image
Args:
iou_threshold (float): iou threshold (default: 0.5)
max_detections (int): max detections per image (default: 100)
"""
def __init__(self, iou_threshold=0.5, max_detections=100):
self.label_records = collections.defaultdict(
lambda: Records(iou_threshold=self.iou_threshold)
)
self.gt_bboxes_count = collections.defaultdict(int)
self.iou_threshold = iou_threshold
self.max_detections = max_detections
def reset(self):
"""Reset AR metrics."""
self.label_records = collections.defaultdict(
lambda: Records(iou_threshold=self.iou_threshold)
)
self.gt_bboxes_count = collections.defaultdict(int)
def update(self, mini_batch):
"""Update records per mini batch.
Args:
mini_batch (list(list)): a list which contains batch_size of
gt bboxes and pred bboxes pair in each image.
For example, if batch size = 2, mini_batch looks like:
[[gt_bboxes1, pred_bboxes1], [gt_bboxes2, pred_bboxes2]]
where gt_bboxes1, pred_bboxes1 contain gt bboxes and pred bboxes
in one image
"""
for bboxes in mini_batch:
gt_bboxes, pred_bboxes = bboxes
for gt_bbox in gt_bboxes:
self.gt_bboxes_count[gt_bbox.label] += 1
bboxes_per_label = self.label_bboxes(
pred_bboxes, self.max_detections
)
for label in bboxes_per_label:
self.label_records[label].add_records(
gt_bboxes, bboxes_per_label[label]
)
def compute(self):
"""Compute AR for each label.
Return:
average_recall (dict): a dictionary of AR scores per label.
"""
average_recall = {}
label_records = self.label_records
for label in self.gt_bboxes_count:
# if there are no predicted boxes with this label
if label not in label_records:
average_recall[label] = 0
continue
pred_infos = label_records[label].pred_infos
gt_bboxes_count = self.gt_bboxes_count[label]
# The number of TP
sum_tp = sum(list(zip(*pred_infos))[1])
max_recall = sum_tp / gt_bboxes_count
average_recall[label] = max_recall
return average_recall
@staticmethod
def label_bboxes(pred_bboxes, max_detections):
"""Save bboxes with same label in to a dictionary.
This operation only apply to predictions for a single image.
Args:
pred_bboxes (list): a list of prediction bounding boxes list
max_detections (int): max detections per label
Returns:
labels (dict): a dictionary of prediction boundign boxes
"""
labels = collections.defaultdict(list)
for box in pred_bboxes:
labels[box.label].append(box)
for label, boxes in labels.items():
boxes = sorted(boxes, key=lambda bbox: bbox.score, reverse=True)
# only consider the top confident predictions
if len(boxes) > max_detections:
labels[label] = boxes[:max_detections]
return labels
|
#
# Working with the OS
#
import os
from os import path
import datetime
from datetime import date, time, timedelta
import time
def main():
# Print the name of the OS
print(os.name)
# Check for if an item exists and what is its type
print("Item exists: " + str(path.exists("textfile.txt")))
print("Item is a file:" + str(path.isfile("textfile.txt")))
print("Item is directory:" + str(path.isdir("textfile.txt")))
# work with file paths
file_path = str(path.realpath("textfile.txt"))
print("File path: " + file_path)
print("File path and name: " + str(path.split(file_path)))
from pathlib import Path
cwd = Path.cwd()
demo_file = Path(Path.joinpath(cwd, 'demo.txt'))
# Get the file name
print(demo_file.name)
# Get the extension
print(demo_file.suffix)
# Get the folder
print(demo_file.parent.name)
# Get the size
print(demo_file.stat().st_size)
if __name__ == "__main__":
main()
|
from rest_framework import serializers
from base.models import Task, Subtask, Category, Attachment
from django.contrib.auth.models import User
from taggit.serializers import (TagListSerializerField,
TaggitSerializer)
from taggit.models import Tag, TaggedItem
from base.reports import ReportParams
class CategorySerializer(serializers.ModelSerializer):
class Meta:
model = Category
fields = ("owner", "name")
class AttachmentSerializer(serializers.ModelSerializer):
class Meta:
model = Attachment
fields = ("upload",)
class SubtaskSerializer(serializers.ModelSerializer):
class Meta:
model = Subtask
fields = (
"task",
"title",
"complete",
"created",
)
class ReadOnlyUserSerializer:
class Meta:
model = User
fields = ("id", "username")
class ReadTaskSerializer(TaggitSerializer, serializers.ModelSerializer):
category = serializers.StringRelatedField()
subtasks = SubtaskSerializer(many=True)
attachments = AttachmentSerializer(many=True)
owner = ReadOnlyUserSerializer()
tags = TagListSerializerField()
class Meta:
model = Task
fields = (
"id",
"owner",
"title",
"description",
"complete",
"created",
"priority",
"category",
"tags",
"subtasks",
"attachments",
"tags",
)
class WriteTaskSerializer(serializers.ModelSerializer):
owner = serializers.HiddenField(default=serializers.CurrentUserDefault())
category = serializers.SlugRelatedField(slug_field="name", queryset=Category.objects.all())
class Meta:
model = Task
fields = (
"owner",
"title",
"description",
"complete",
"created",
"priority",
"category",
)
# enable to create tasks under category that authenticated user created
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
owner = self.context["request"].user
self.fields["category"].queryset = owner.categories.all()
class ReportTaskSerializer(TaggitSerializer, serializers.Serializer):
category = CategorySerializer()
tag = serializers.CharField(max_length=32)
completed_subtasks_count = serializers.DecimalField(max_digits=15, decimal_places=2)
class ParamsTaskSerializer(TaggitSerializer, serializers.Serializer):
start_date = serializers.DateTimeField(required=False)
end_date = serializers.DateTimeField(required=False)
priority = serializers.IntegerField(required=False)
complete = serializers.BooleanField(required=False)
user = serializers.HiddenField(default=serializers.CurrentUserDefault())
def create(self, validated_data):
return ReportParams(**validated_data)
|
#Fatiamento
#frase = 'Curso em Vídeo Python'
#print(frase[9::3])
# Análise
frase = "Fabio Rodrigues Dias"
print(len(frase))
print(frase.count("o"))
print(frase.count("o", 0, 20))
print(frase.lower().find("Dias"))
print(frase.count("abio"))
print("Dias"in frase)
print('\n')
#Transformação
print(frase.replace("Python", "Fabio"))
print(frase.upper())
print(frase.lower())
print(frase.split())
print(frase.capitalize())
print(frase.title())
print(frase.strip())
novafrase = frase.split()
print('-'.join(novafrase))
|
"""
Database event signals
"""
from asgiref.sync import async_to_sync
from channels.layers import get_channel_layer
from django.db.models.signals import post_delete, post_save
from django.dispatch import receiver
from apps.chat.models import Room
from apps.chat.serializers import RoomHeavySerializer
@receiver(post_save, sender=Room)
def room_upsert(sender, instance, **kwargs):
"""
...
"""
group_name: str = "rooms"
channel_layer = get_channel_layer()
serializer = RoomHeavySerializer(instance)
# print(serializer.data)
async_to_sync(channel_layer.group_send)(
group_name, {"type": "room_event", "method": "U", "data": serializer.data,}
)
@receiver(post_delete, sender=Room)
def room_deleted(sender, instance, **kwargs):
"""
...
"""
group_name: str = "rooms"
channel_layer = get_channel_layer()
serializer = RoomHeavySerializer(instance)
async_to_sync(channel_layer.group_send)(
group_name, {"type": "room_event", "method": "D", "data": serializer.data,}
)
|
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtWidgets import QMainWindow
import math
class BuyCityWindow(QMainWindow):
"""
This window is used for proposing price for city buyout.
"""
def __init__(self, parent, grandparent):
super(BuyCityWindow, self).__init__()
self.resize(440, 280)
self.setWindowTitle("City Trade")
self.gold_dict = {"gold": 0, "wood": 0, "stone": 0,
"food": 0} # this dictionary is only used for holding keep of gold, but it's used couple of times so it will be easier to declare it.
self.parent = parent
self.grandparent = grandparent
self.centralwidget = QtWidgets.QWidget()
""" HOW MUCH PART """
self.gold_slider = QtWidgets.QSlider(self.centralwidget)
self.gold_slider.setGeometry(QtCore.QRect(40, 80, 241, 16))
self.gold_slider.setOrientation(QtCore.Qt.Horizontal)
self.gold_slider.setMinimum(1)
self.gold_slider.setMaximum(self.grandparent.city.owner.granary.gold)
self.gold_slider.setTickInterval(
self.grandparent.city.owner.granary.gold / 25) # works when you click next to slider
self.gold_slider.setSingleStep(self.grandparent.city.owner.granary.gold / 25)
self.gold_slider.valueChanged.connect(self.recalculate_how_much_gold)
self.gold_slider.setValue(1)
self.gold_edit = QtWidgets.QLineEdit(self.centralwidget)
self.gold_edit.setGeometry(QtCore.QRect(300, 70, 113, 25))
self.gold_edit.setText("1")
self.gold_edit.textChanged.connect(self.change_slider_from_line_edit) # both way changing value
self.how_many_label = QtWidgets.QLabel(self.centralwidget)
self.how_many_label.setGeometry(QtCore.QRect(70, 30, 350, 17))
self.how_many_label.setText("How much would you like to pay for this city?")
""" MAKE OFFER PART """
self.make_offer_button = QtWidgets.QPushButton(self.centralwidget)
self.make_offer_button.setGeometry(QtCore.QRect(80, 180, 271, 61))
self.make_offer_button.setText("Make offer")
self.make_offer_button.clicked.connect(self.make_offer)
self.setCentralWidget(self.centralwidget)
def make_offer(self):
"""
This method sends diplomacy message to city owner. Cost from slider or line_edit is limited by player's granary
so no more checks are needed.
"""
self.grandparent.top_bar.me.granary.pay_for(self.gold_dict) # paying for materials
# TODO Diplomacy procedure - 'buying city'
# receiver, price, resource, quantity
# self.grandparent.client.buy_resource(self.grandparent.city.owner.nick, self.how_much_for_piece_edit.text(),
# self.material_type_holder, self.gold_edit.text())
city = self.grandparent.city
receiver = city.owner.nick
coords = city.tile.coords
self.grandparent.client.buy_city(receiver, self.gold_dict['gold'], coords)
self.hide()
self.grandparent.window.back_to_game()
self.parent.kill_app()
def recalculate_how_much_gold(self):
self.gold_edit.setText(str(self.gold_slider.value()))
def calculate_total_cost(self):
payment_cost = int(self.gold_edit.text())
self.gold_dict["gold"] = payment_cost
def change_slider_from_line_edit(self):
try:
number = int(self.gold_edit.text())
if number < 1:
number = 1
self.gold_edit.setText(str(number))
elif number > self.grandparent.city.owner.granary.gold:
number = self.grandparent.city.owner.granary.gold
self.gold_edit.setText(str(number))
except: # catches all strange input
number = 1
self.gold_edit.setText(str(number))
self.gold_slider.setValue(number)
self.calculate_total_cost()
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
win = BuyCityWindow(None, None)
win.show()
sys.exit(app.exec_())
|
import random
import generate_histogram
#module to select one word based on commonness(frequency) in the given histogram
def weighted_random_word(histogram_dict):
''' Takes a histogram and returns a random weighted word '''
# Raise an exception if we are given an empty histogram
if len(histogram_dict) == 0:
raise Exception("You can't sample from an empty histogram")
#Creates a running total value
total_word_count = 0
#Gets a random number between 0 and the total sum of all frequencies
sum_dictionary = sum(histogram_dict.values())
# Either [1, sum] or [0, sum - 1] otherwise it repeats inappropraitely
random_word_index = random.randint(0, sum_dictionary - 1)
# ".items()" allows the dictionary to be iterated over
for key, value in histogram_dict.items():
total_word_count += value
if total_word_count > random_word_index:
return key
else:
continue
def weighted_random_test():
''' Tests to make sure that the weighted random sampling is correct '''
histogram = word_count.create_histogram('small_sample.txt')
for _ in range(0,1000): #do the following 1000 times
result_dict = {} #init empty dict to store results
chosen_word = weighted_random_word(histogram)
print(word_selected)
if word_selected in result_dict:
result_dict[chosen_word] += 1
else:
result_dict[chosen_word] = 1
print(result_dict)
return result_dict
if __name__ == "__main__":
print(weighted_random_word(word_count.create_histogram('small_sample.txt')))
weighted_random_test()
|
#
# @lc app=leetcode id=283 lang=python3
#
# [283] Move Zeroes
#
from typing import List
class Solution:
def moveZeroes(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
index = 0
for i, n in enumerate(nums):
if n != 0:
nums[i], nums[index], index = nums[index], nums[i], index + 1
if __name__ == '__main__':
nums = [0, 1, 0, 3, 12]
# nums = [0, 0, 0, 1]
Solution().moveZeroes(nums)
print(nums)
|
from easydict import EasyDict
cartpole_dqn_config = dict(
env=dict(
collector_env_num=8,
collector_episode_num=2,
evaluator_env_num=5,
evaluator_episode_num=1,
stop_value=195,
),
policy=dict(
cuda=False,
model=dict(
obs_shape=4,
action_shape=2,
encoder_hidden_size_list=[128, 128, 64],
dueling=True,
),
nstep=3,
discount_factor=0.97,
learn=dict(
batch_size=32,
learning_rate=0.001,
learner=dict(
learner_num=1,
send_policy_freq=1,
),
),
collect=dict(
n_sample=16,
collector=dict(
collector_num=2,
update_policy_second=3,
),
),
eval=dict(evaluator=dict(eval_freq=50, )),
other=dict(
eps=dict(
type='exp',
start=0.95,
end=0.1,
decay=100000,
),
replay_buffer=dict(
replay_buffer_size=100000,
enable_track_used_data=False,
),
commander=dict(
collector_task_space=2,
learner_task_space=1,
eval_interval=5,
),
),
),
)
cartpole_dqn_config = EasyDict(cartpole_dqn_config)
main_config = cartpole_dqn_config
cartpole_dqn_create_config = dict(
env=dict(
type='cartpole',
import_names=['dizoo.classic_control.cartpole.envs.cartpole_env'],
),
env_manager=dict(type='base'),
policy=dict(type='dqn_command'),
learner=dict(type='base', import_names=['ding.worker.learner.base_learner']),
collector=dict(
type='zergling',
import_names=['ding.worker.collector.zergling_collector'],
),
commander=dict(
type='solo',
import_names=['ding.worker.coordinator.solo_parallel_commander'],
),
comm_learner=dict(
type='flask_fs',
import_names=['ding.worker.learner.comm.flask_fs_learner'],
),
comm_collector=dict(
type='flask_fs',
import_names=['ding.worker.collector.comm.flask_fs_collector'],
),
)
cartpole_dqn_create_config = EasyDict(cartpole_dqn_create_config)
create_config = cartpole_dqn_create_config
cartpole_dqn_system_config = dict(
coordinator=dict(),
path_data='./data',
path_policy='./policy',
communication_mode='auto',
learner_gpu_num=1,
)
cartpole_dqn_system_config = EasyDict(cartpole_dqn_system_config)
system_config = cartpole_dqn_system_config
if __name__ == '__main__':
from ding.entry.parallel_entry import parallel_pipeline
parallel_pipeline([main_config, create_config, system_config], seed=9)
|
"""
Main script for semantic experiments
Author: Vivien Sainte Fare Garnot (github/VSainteuf)
License: MIT
"""
import argparse
import json
import os
import pickle as pkl
import pprint
import time
import numpy as np
import torch
import torch.nn as nn
import torch.utils.data as data
import torchnet as tnt
from src import utils, model_utils
from src.dataset import PASTIS_Dataset
from src.learning.metrics import confusion_matrix_analysis
from src.learning.miou import IoU
from src.learning.weight_init import weight_init
parser = argparse.ArgumentParser()
# Model parameters
parser.add_argument(
"--model",
default="utae",
type=str,
help="Type of architecture to use. Can be one of: (utae/unet3d/fpn/convlstm/convgru/uconvlstm/buconvlstm)",
)
## U-TAE Hyperparameters
parser.add_argument("--encoder_widths", default="[64,64,64,128]", type=str)
parser.add_argument("--decoder_widths", default="[32,32,64,128]", type=str)
parser.add_argument("--out_conv", default="[32, 20]")
parser.add_argument("--str_conv_k", default=4, type=int)
parser.add_argument("--str_conv_s", default=2, type=int)
parser.add_argument("--str_conv_p", default=1, type=int)
parser.add_argument("--agg_mode", default="att_group", type=str)
parser.add_argument("--encoder_norm", default="group", type=str)
parser.add_argument("--n_head", default=16, type=int)
parser.add_argument("--d_model", default=256, type=int)
parser.add_argument("--d_k", default=4, type=int)
# Set-up parameters
parser.add_argument(
"--dataset_folder",
default="",
type=str,
help="Path to the folder where the results are saved.",
)
parser.add_argument(
"--res_dir",
default="./results",
help="Path to the folder where the results should be stored",
)
parser.add_argument(
"--num_workers", default=8, type=int, help="Number of data loading workers"
)
parser.add_argument("--rdm_seed", default=1, type=int, help="Random seed")
parser.add_argument(
"--device",
default="cuda",
type=str,
help="Name of device to use for tensor computations (cuda/cpu)",
)
parser.add_argument(
"--display_step",
default=50,
type=int,
help="Interval in batches between display of training metrics",
)
parser.add_argument(
"--cache",
dest="cache",
action="store_true",
help="If specified, the whole dataset is kept in RAM",
)
# Training parameters
parser.add_argument("--epochs", default=100, type=int, help="Number of epochs per fold")
parser.add_argument("--batch_size", default=4, type=int, help="Batch size")
parser.add_argument("--lr", default=0.001, type=float, help="Learning rate")
parser.add_argument("--mono_date", default=None, type=str)
parser.add_argument("--ref_date", default="2018-09-01", type=str)
parser.add_argument(
"--fold",
default=None,
type=int,
help="Do only one of the five fold (between 1 and 5)",
)
parser.add_argument("--num_classes", default=20, type=int)
parser.add_argument("--ignore_index", default=-1, type=int)
parser.add_argument("--pad_value", default=0, type=float)
parser.add_argument("--padding_mode", default="reflect", type=str)
parser.add_argument(
"--val_every",
default=1,
type=int,
help="Interval in epochs between two validation steps.",
)
parser.add_argument(
"--val_after",
default=0,
type=int,
help="Do validation only after that many epochs.",
)
list_args = ["encoder_widths", "decoder_widths", "out_conv"]
parser.set_defaults(cache=False)
def iterate(
model, data_loader, criterion, config, optimizer=None, mode="train", device=None
):
loss_meter = tnt.meter.AverageValueMeter()
iou_meter = IoU(
num_classes=config.num_classes,
ignore_index=config.ignore_index,
cm_device=config.device,
)
t_start = time.time()
for i, batch in enumerate(data_loader):
if device is not None:
batch = recursive_todevice(batch, device)
(x, dates), y = batch
y = y.long()
if mode != "train":
with torch.no_grad():
out = model(x, batch_positions=dates)
else:
optimizer.zero_grad()
out = model(x, batch_positions=dates)
loss = criterion(out, y)
if mode == "train":
loss.backward()
optimizer.step()
with torch.no_grad():
pred = out.argmax(dim=1)
iou_meter.add(pred, y)
loss_meter.add(loss.item())
if (i + 1) % config.display_step == 0:
miou, acc = iou_meter.get_miou_acc()
print(
"Step [{}/{}], Loss: {:.4f}, Acc : {:.2f}, mIoU {:.2f}".format(
i + 1, len(data_loader), loss_meter.value()[0], acc, miou
)
)
t_end = time.time()
total_time = t_end - t_start
print("Epoch time : {:.1f}s".format(total_time))
miou, acc = iou_meter.get_miou_acc()
metrics = {
"{}_accuracy".format(mode): acc,
"{}_loss".format(mode): loss_meter.value()[0],
"{}_IoU".format(mode): miou,
"{}_epoch_time".format(mode): total_time,
}
if mode == "test":
return metrics, iou_meter.conf_metric.value() # confusion matrix
else:
return metrics
def recursive_todevice(x, device):
if isinstance(x, torch.Tensor):
return x.to(device)
elif isinstance(x, dict):
return {k: recursive_todevice(v, device) for k, v in x.items()}
else:
return [recursive_todevice(c, device) for c in x]
def prepare_output(config):
os.makedirs(config.res_dir, exist_ok=True)
for fold in range(1, 6):
os.makedirs(os.path.join(config.res_dir, "Fold_{}".format(fold)), exist_ok=True)
def checkpoint(fold, log, config):
with open(
os.path.join(config.res_dir, "Fold_{}".format(fold), "trainlog.json"), "w"
) as outfile:
json.dump(log, outfile, indent=4)
def save_results(fold, metrics, conf_mat, config):
with open(
os.path.join(config.res_dir, "Fold_{}".format(fold), "test_metrics.json"), "w"
) as outfile:
json.dump(metrics, outfile, indent=4)
pkl.dump(
conf_mat,
open(
os.path.join(config.res_dir, "Fold_{}".format(fold), "conf_mat.pkl"), "wb"
),
)
def overall_performance(config):
cm = np.zeros((config.num_classes, config.num_classes))
for fold in range(1, 6):
cm += pkl.load(
open(
os.path.join(config.res_dir, "Fold_{}".format(fold), "conf_mat.pkl"),
"rb",
)
)
if config.ignore_index is not None:
cm = np.delete(cm, config.ignore_index, axis=0)
cm = np.delete(cm, config.ignore_index, axis=1)
_, perf = confusion_matrix_analysis(cm)
print("Overall performance:")
print("Acc: {}, IoU: {}".format(perf["Accuracy"], perf["MACRO_IoU"]))
with open(os.path.join(config.res_dir, "overall.json"), "w") as file:
file.write(json.dumps(perf, indent=4))
def main(config):
fold_sequence = [
[[1, 2, 3], [4], [5]],
[[2, 3, 4], [5], [1]],
[[3, 4, 5], [1], [2]],
[[4, 5, 1], [2], [3]],
[[5, 1, 2], [3], [4]],
]
np.random.seed(config.rdm_seed)
torch.manual_seed(config.rdm_seed)
prepare_output(config)
device = torch.device(config.device)
fold_sequence = (
fold_sequence if config.fold is None else [fold_sequence[config.fold - 1]]
)
for fold, (train_folds, val_fold, test_fold) in enumerate(fold_sequence):
if config.fold is not None:
fold = config.fold - 1
# Dataset definition
dt_args = dict(
folder=config.dataset_folder,
norm=True,
reference_date=config.ref_date,
mono_date=config.mono_date,
target="semantic",
sats=["S2"],
)
dt_train = PASTIS_Dataset(**dt_args, folds=train_folds, cache=config.cache)
dt_val = PASTIS_Dataset(**dt_args, folds=val_fold, cache=config.cache)
dt_test = PASTIS_Dataset(**dt_args, folds=test_fold)
collate_fn = lambda x: utils.pad_collate(x, pad_value=config.pad_value)
train_loader = data.DataLoader(
dt_train,
batch_size=config.batch_size,
shuffle=True,
drop_last=True,
collate_fn=collate_fn,
)
val_loader = data.DataLoader(
dt_val,
batch_size=config.batch_size,
shuffle=True,
drop_last=True,
collate_fn=collate_fn,
)
test_loader = data.DataLoader(
dt_test,
batch_size=config.batch_size,
shuffle=True,
drop_last=True,
collate_fn=collate_fn,
)
print(
"Train {}, Val {}, Test {}".format(len(dt_train), len(dt_val), len(dt_test))
)
# Model definition
model = model_utils.get_model(config, mode="semantic")
config.N_params = utils.get_ntrainparams(model)
with open(os.path.join(config.res_dir, "conf.json"), "w") as file:
file.write(json.dumps(vars(config), indent=4))
print(model)
print("TOTAL TRAINABLE PARAMETERS :", config.N_params)
print("Trainable layers:")
for name, p in model.named_parameters():
if p.requires_grad:
print(name)
model = model.to(device)
model.apply(weight_init)
# Optimizer and Loss
optimizer = torch.optim.Adam(model.parameters(), lr=config.lr)
weights = torch.ones(config.num_classes, device=device).float()
weights[config.ignore_index] = 0
criterion = nn.CrossEntropyLoss(weight=weights)
# Training loop
trainlog = {}
best_mIoU = 0
for epoch in range(1, config.epochs + 1):
print("EPOCH {}/{}".format(epoch, config.epochs))
model.train()
train_metrics = iterate(
model,
data_loader=train_loader,
criterion=criterion,
config=config,
optimizer=optimizer,
mode="train",
device=device,
)
if epoch % config.val_every == 0 and epoch > config.val_after:
print("Validation . . . ")
model.eval()
val_metrics = iterate(
model,
data_loader=val_loader,
criterion=criterion,
config=config,
optimizer=optimizer,
mode="val",
device=device,
)
print(
"Loss {:.4f}, Acc {:.2f}, IoU {:.4f}".format(
val_metrics["val_loss"],
val_metrics["val_accuracy"],
val_metrics["val_IoU"],
)
)
trainlog[epoch] = {**train_metrics, **val_metrics}
checkpoint(fold + 1, trainlog, config)
if val_metrics["val_IoU"] >= best_mIoU:
best_mIoU = val_metrics["val_IoU"]
torch.save(
{
"epoch": epoch,
"state_dict": model.state_dict(),
"optimizer": optimizer.state_dict(),
},
os.path.join(
config.res_dir, "Fold_{}".format(fold + 1), "model.pth.tar"
),
)
else:
trainlog[epoch] = {**train_metrics}
checkpoint(fold + 1, trainlog, config)
print("Testing best epoch . . .")
model.load_state_dict(
torch.load(
os.path.join(
config.res_dir, "Fold_{}".format(fold + 1), "model.pth.tar"
)
)["state_dict"]
)
model.eval()
test_metrics, conf_mat = iterate(
model,
data_loader=test_loader,
criterion=criterion,
config=config,
optimizer=optimizer,
mode="test",
device=device,
)
print(
"Loss {:.4f}, Acc {:.2f}, IoU {:.4f}".format(
test_metrics["test_loss"],
test_metrics["test_accuracy"],
test_metrics["test_IoU"],
)
)
save_results(fold + 1, test_metrics, conf_mat.cpu().numpy(), config)
if config.fold is None:
overall_performance(config)
if __name__ == "__main__":
config = parser.parse_args()
for k, v in vars(config).items():
if k in list_args and v is not None:
v = v.replace("[", "")
v = v.replace("]", "")
config.__setattr__(k, list(map(int, v.split(","))))
assert config.num_classes == config.out_conv[-1]
pprint.pprint(config)
main(config)
|
# generate figs for processing
import predusion.immaker as immaker
from predusion.color_deg import Degree_color
import numpy as np
import os
n_image, n_image_w = 4, 4
imshape = (128, 160)
l, a, b, r = 80, 22, 14, 70
n_deg = 30
deg_color = Degree_color(center_l=l, center_a=a, center_b=b, radius=r)
center = 10
width = int(imshape[0] / 2)
degree = np.linspace(0, 360, n_deg)
color_list = deg_color.out_color(degree, fmat='RGB', is_upscaled=True)
square = immaker.Square(imshape, background=150)
#color_list = np.zeros((30, 3))
#i = 0
#for ci in np.arange(0, 1.25, 0.25):
# color_list[i] = [ci, 1, 0]
# color_list[i + 1] = [0, 1, ci]
# color_list[i + 2] = [0, ci, 1]
# color_list[i + 3] = [ci, 0, 1]
# color_list[i + 4] = [1, 0, ci]
# color_list[i + 5] = [1, ci, 0]
# i = i + 6
#color_list = color_list * 255
for color in color_list:
r, g, b = color
square.set_square(width=width, rgb_color=color)
save_path_dir = './kitti_data/raw/square_{center}_{width}/square_{r}_{g}_{b}'.format(center=center, width=width, r=str(round(r, 2)), g=str(round(g, 2)), b=str(round(b, 2)))
if not os.path.exists(save_path_dir): os.makedirs(save_path_dir)
# generate n_image colored square
for i in range(n_image):
save_path = os.path.join(save_path_dir, 'square{i}.png'.format(i=str(i).zfill(2)))
square.save_fig(save_path=save_path)
# generate n_image_w white squares
bg = square.background
square.set_square(width=width, rgb_color=(bg, bg, bg))
for i in range(n_image, n_image + n_image_w):
save_path = os.path.join(save_path_dir, 'square{i}.png'.format(i=str(i).zfill(2)))
square.save_fig(save_path=save_path)
|
from __future__ import print_function
import tensorflow as tf
try:
tf.contrib.eager.enable_eager_execution()
except ValueError:
print('value error!')
else:
print(tf.executing_eagerly())
graph = tf.Graph()
with graph.as_default():
c = tf.constant('hello world!')
with tf.Session(graph=graph) as sess:
print(sess.run([c]))
|
#coding:utf-8
N = int(raw_input())
s = list(raw_input())
K = int(raw_input()) % 26
for i, char in enumerate(s):
if char >= 'A' and char <= 'Z':
num = ord(char) + K
if num > 90:
num -= 26
elif char >= 'a' and char <= 'z':
num = ord(char) + K
if num > 122:
num -= 26
else:
continue
s[i] = chr(num)
print ''.join(s)
|
import os
import unittest
from os.path import join as pjoin
from e2xgrader.models import TaskModel
from ..test_utils.test_utils import create_temp_course, add_question_to_task
class TestTaskModel(unittest.TestCase):
def setUp(self):
tmp_dir, coursedir = create_temp_course()
self.tmp_dir = tmp_dir
self.model = TaskModel(coursedir)
def test_list_empty(self):
pool = "TestPool"
assert (
len(self.model.list(pool=pool)) == 0
), "Model should not list anything in an empty course"
def test_list_not_empty(self):
names = ["TestTask", "TestTask1", "TestTask2"]
pool = "TestPool"
for name in names:
self.model.new(name=name, pool=pool)
assert len(self.model.list(pool=pool)) == 3
for task in self.model.list(pool=pool):
assert task["name"] in names
def test_create_and_remove_valid_task(self):
name = "TestTask"
pool = "TestPool"
res = self.model.new(name=name, pool=pool)
assert res["success"], "New task could not be created"
assert os.path.exists(
pjoin(self.model.base_path(), pool, name)
), "New task directory missing"
for directory in ["img", "data"]:
msg = f"New task subdirectory {directory} missing!"
assert os.path.exists(
pjoin(self.model.base_path(), pool, name, directory)
), msg
os.path.exists(
pjoin(self.model.base_path(), pool, name, f"{name}.ipynb")
), "New task notebook missing"
self.model.remove(name=name, pool=pool)
assert not os.path.exists(
pjoin(self.model.base_path(), pool, name)
), "Task should be deleted"
def test_create_existing_task(self):
name = "TestTask"
pool = "TestPool"
res = self.model.new(name=name, pool=pool)
assert res["success"], "New task could not be created"
res = self.model.new(name=name, pool=pool)
assert not res["success"]
assert res["error"] == f"A task with the name {name} already exists!"
def test_get_task_info(self):
name = "TestTask"
pool = "TestPool"
points = 5
res = self.model.new(name=name, pool=pool)
add_question_to_task(
self.model.coursedir,
res["path"],
"Freetext",
grade_id="task1",
points=points,
)
res = self.model.get(name=name, pool=pool)
assert res["name"] == name
assert res["pool"] == pool
assert res["questions"] == 1
assert res["points"] == points
def test_create_invalid_name(self):
name = "$Invalid.Name"
pool = "TestPool"
res = self.model.new(name=name, pool=pool)
assert not res["success"]
assert res["error"] == "Invalid name"
def tearDown(self):
self.tmp_dir.cleanup()
|
DEFAULT = False
called = DEFAULT
def reset_app():
global called
called = DEFAULT
def generate_sampledata(options):
global called
assert called == DEFAULT
called = True
|
#!/usr/bin/python
#
# flg - FOX lexer generator.
#
# usage:
# flg [ -l,--language=<outputlanguage> ] [ -n,--name=<name> ]
# [ -o,--outputfile=<filename> ] [ -v,--showtables ] <inputfile>|-
#
# options:
# -l,--language=<outputlanguage>
# What language to output the lexer in. Currently, only "python"
# is supported.
# -n,--name=<name>
# The lexer class name. Also used for default output filename.
# -o,--outputfile=<filename>
# The output filename. Can be "-" for stdout.
# -v,--showtables
# After parsing, output the generated tables in a descriptive format
# to stdout.
# [[ currently does nothing! ]]
#
# python imports
import sys
import os
import optparse
# CAT imports that will be merged into the final flg file using the buildStandalone script
from version import *
from flgDrivers import *
from foxLanguages import *
########################################################################
##
## set up options parser
##
########################################################################
PROG = u'flg'
optparser = optparse.OptionParser( prog = PROG, version = "%prog version " + VERSION_STR )
optparser.set_description('''The FOX Lexer Generator. Given suitable flg source file, this generates lexer code capable of recognizing and tokenizing an input stream into the tokens defined by the flg source file.''')
optparser.set_usage('''%prog [<options>...] <inputfile> | -''')
optparser.disable_interspersed_args()
languages = [x + ',' for x in sorted(foxLanguages.keys())]
languages[-1] = languages[-1][:-1]
if len(languages) == 2:
languages[0] = languages[0][:-1]
if len(languages) > 1:
languages[-1] = 'and ' + languages[-1]
if len(languages) == 1:
languages = 'The choice currently available is: ' + languages[0]
else:
languages = 'Possible choices are: ' + ' '.join(languages)
optparser.add_option(
'-l', '--language',
metavar = '<outputlanguage>',
dest = 'language',
type = 'choice',
choices = foxLanguages.keys(),
default = 'python',
help = '''The language for which the lexer code will be generated. ''' + languages
)
optparser.add_option(
'-D', '--debugging',
dest = 'debugging',
action = 'store_true',
default = False,
help = '''Enable the use of debugging hooks from the lexer.'''
)
optparser.add_option(
'-n', '--name',
metavar = '<name>',
dest = 'name',
type = 'string',
help = '''Required: The lexer class name.'''
)
optparser.add_option(
'-o', '--outputfile',
metavar = '<filename>',
dest = 'outputfile',
type = 'string',
help = '''The filename to generate. Defaults to the lexer <name> (see --name) with a language-approprate extension. If no "." appears in <filename>, then a language-appropriate extension will be added. If <filename> is "-" (a single dash), then the generated lexer will be displayed on standard output.'''
)
optparser.add_option(
'-v', '--showtables',
dest = 'showtables',
action = 'store_true',
default = False,
help = '''After creating the lexer tables, output the generated tables in descriptive format to standard output. [[Not currently implemented]]'''
)
# don't display the --version and --help options in the usage
opt = optparser.get_option('--help')
if opt is not None:
opt.help = optparse.SUPPRESS_HELP
opt = optparser.get_option('--version')
if opt is not None:
opt.help = optparse.SUPPRESS_HELP
########################################################################
##
## parse and process the options and command line
##
########################################################################
(options, args) = optparser.parse_args(sys.argv[1:])
if len(args) == 0:
optparser.print_help()
sys.exit(0)
if len(args) > 1:
optparser.print_usage(sys.stderr)
sys.exit(22)
if options.showtables:
optparser.error('--showtables option is not yet implemented.')
if options.name is None:
optparser.error('''option --name must be specified''')
langClass = foxLanguages[options.language]
infilename = args[0]
if infilename == '-':
infile = sys.stdin
else:
try:
infile = open(infilename, 'r')
except IOError,e:
optparser.error('Unable to open %s for reading: %s' % (infilename, e.strerror))
########################################################################
##
## parse the input file and generate the lexer tables
##
########################################################################
lexer = flgLexerDriver(source=infile, filename=infilename)
parser = flgParserDriver()
lexer_tables = parser.parse(lexer)
########################################################################
##
## generate the output file
##
########################################################################
if options.outputfile == '-':
outputfile = sys.stdout
else:
if options.outputfile is None:
options.outputfile = options.name
if options.outputfile != '-' and '.' not in options.outputfile.rsplit('/',1)[-1]:
options.outputfile += langClass.extension
try:
outputfile = open(options.outputfile, 'w')
except IOError,e:
optparser.error('Unable to open %s for writing: %s' % (options.outputfile, e.strerror))
vars = {
'VERSION': VERSION,
'VERSION_STR': VERSION_STR,
'name': options.name,
'debug': options.debugging,
'flg_tables': lexer_tables,
'pythonexec': sys.executable,
}
langClass.writeFile(outputfile, langClass.templates[PROG], vars)
|
from .base import ByteableList
from .stroke import Stroke
class Layer(ByteableList):
__slots__ = "name"
@classmethod
def child_type(cls):
return Stroke
def __init__(self, name=None):
self.name = name
super().__init__()
def __str__(self):
return f"Layer: nobjs={len(self.objects)}"
|
from indy_node.server.request_handlers.domain_req_handlers.attribute_handler import AttributeHandler
from plenum.server.request_handlers.handler_interfaces.read_request_handler import ReadRequestHandler
from indy_common.constants import ATTRIB, GET_ATTR
from indy_node.server.request_handlers.utils import validate_attrib_keys
from plenum.common.constants import RAW, ENC, HASH, TARGET_NYM, DOMAIN_LEDGER_ID
from plenum.common.exceptions import InvalidClientRequest
from plenum.common.request import Request
from plenum.common.txn_util import get_request_data
from plenum.server.database_manager import DatabaseManager
from stp_core.common.log import getlogger
logger = getlogger()
class GetAttributeHandler(ReadRequestHandler):
def __init__(self, database_manager: DatabaseManager):
super().__init__(database_manager, GET_ATTR, DOMAIN_LEDGER_ID)
def get_result(self, request: Request):
self._validate_request_type(request)
identifier, req_id, operation = get_request_data(request)
if not validate_attrib_keys(operation):
raise InvalidClientRequest(identifier, req_id,
'{} should have one and only one of '
'{}, {}, {}'
.format(ATTRIB, RAW, ENC, HASH))
nym = operation[TARGET_NYM]
if RAW in operation:
attr_type = RAW
elif ENC in operation:
# If attribute is encrypted, it will be queried by its hash
attr_type = ENC
else:
attr_type = HASH
attr_key = operation[attr_type]
value, last_seq_no, last_update_time, proof = \
self.get_attr(did=nym, key=attr_key, attr_type=attr_type)
attr = None
if value is not None:
if HASH in operation:
attr = attr_key
else:
attr = value
return self.make_result(request=request,
data=attr,
last_seq_no=last_seq_no,
update_time=last_update_time,
proof=proof)
def get_attr(self,
did: str,
key: str,
attr_type,
is_committed=True) -> (str, int, int, list):
assert did is not None
assert key is not None
path = AttributeHandler.make_state_path_for_attr(did, key, attr_type == HASH)
try:
hashed_val, last_seq_no, last_update_time, proof = \
self.lookup(path, is_committed, with_proof=True)
except KeyError:
return None, None, None, None
if not hashed_val or hashed_val == '':
# Its a HASH attribute
return hashed_val, last_seq_no, last_update_time, proof
else:
try:
value = self.database_manager.attribute_store.get(hashed_val)
except KeyError:
logger.error('Could not get value from attribute store for {}'
.format(hashed_val))
return None, None, None, None
return value, last_seq_no, last_update_time, proof
|
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 1 23:22:03 2019
@author: SERELPA1
"""
# Feature Scaling
from sklearn.preprocessing import StandardScaler
def feature_scaling(ds_train):
sc = StandardScaler()
ds_train_scaled = sc.fit_transform(ds_train)
sc_predict = StandardScaler()
sc_predict.fit_transform(ds_train[:,0:1])
return(sc, sc_predict, ds_train_scaled)
|
from django import forms
from django.utils.translation import ugettext as _
from dal import autocomplete
from dal import forward
from ..models import Country
class CountryForm(forms.ModelForm):
"""
Custom Country Form.
"""
class Meta:
model = Country
fields = ('__all__')
widgets = {
'jurisdiction': autocomplete.ModelSelect2(
url='geoware:country-autocomplete',
attrs={
'data-placeholder': _('LOCATION.COUNTRY.JURISDICTION'),
'data-minimum-input-length': 0,
}
),
'currency': autocomplete.ModelSelect2(
url='geoware:currency-autocomplete',
attrs={
'data-placeholder': _('LOCATION.CURRENCY'),
'data-minimum-input-length': 0,
}
),
'capital': autocomplete.ModelSelect2(
url='geoware:city-autocomplete',
forward=[forward.Field(src="jurisdiction", dst="country"), ],
attrs={
'data-placeholder': _('LOCATION.CAPITAL'),
'data-minimum-input-length': 0,
}
),
}
|
import astra
import numpy as np
import os
import pickle
from tqdm import tqdm
from time import time
from astropy.io import fits
from astropy.table import Table
from astra.utils import (log, timer)
from astra.tasks import BaseTask
from astra.tasks.io import LocalTargetTask
from astra.tasks.io.sdss5 import ApStarFile
from astra.tasks.io.sdss4 import SDSS4ApStarFile
from astra.tasks.targets import (DatabaseTarget, LocalTarget, AstraSource)
from astra.tasks.continuum import Sinusoidal
from astra.tools.spectrum import Spectrum1D
from astra.tools.spectrum.writers import create_astra_source
from astra.contrib.thepayne import training, test as testing
from astra.tasks.slurm import (slurm_mixin_factory, slurmify)
from astra.database import astradb
from luigi.parameter import (
BoolParameter, IntParameter, FloatParameter, Parameter
)
SlurmMixin = slurm_mixin_factory("ThePayne")
class ThePayneMixin(SlurmMixin, BaseTask):
task_namespace = "ThePayne"
n_steps = IntParameter(
default=100000,
config_path=dict(section=task_namespace, name="n_steps")
)
n_neurons = IntParameter(
default=300,
config_path=dict(section=task_namespace, name="n_neurons")
)
weight_decay = FloatParameter(
default=0.0,
config_path=dict(section=task_namespace, name="weight_decay")
)
learning_rate = FloatParameter(
default=0.001,
config_path=dict(section=task_namespace, name="learning_rate")
)
training_set_path = Parameter(
config_path=dict(section=task_namespace, name="training_set_path")
)
class TrainThePayne(ThePayneMixin):
"""
Train a single-layer neural network given a pre-computed grid of synthetic spectra.
:param training_set_path:
The path where the training set spectra and labels are stored.
This should be a binary pickle file that contains a dictionary with the following keys:
- wavelength: an array of shape (P, ) where P is the number of pixels
- spectra: an array of shape (N, P) where N is the number of spectra and P is the number of pixels
- labels: an array of shape (L, P) where L is the number of labels and P is the number of pixels
- label_names: a tuple of length L that contains the names of the labels
:param n_steps: (optional)
The number of steps to train the network for (default 100000).
:param n_neurons: (optional)
The number of neurons to use in the hidden layer (default: 300).
:param weight_decay: (optional)
The weight decay to use during training (default: 0)
:param learning_rate: (optional)
The learning rate to use during training (default: 0.001).
"""
def requires(self):
""" The requirements of this task."""
return LocalTargetTask(path=self.training_set_path)
@slurmify
def run(self):
""" Execute this task. """
wavelength, label_names, \
training_labels, training_spectra, \
validation_labels, validation_spectra = training.load_training_data(self.input().path)
state, model, optimizer = training.train(
training_spectra,
training_labels,
validation_spectra,
validation_labels,
label_names,
n_neurons=self.n_neurons,
n_steps=self.n_steps,
learning_rate=self.learning_rate,
weight_decay=self.weight_decay
)
with open(self.output().path, "wb") as fp:
pickle.dump(dict(
state=state,
wavelength=wavelength,
label_names=label_names,
),
fp
)
def output(self):
""" The output of this task. """
path = os.path.join(
self.output_base_dir,
f"{self.task_id}.pkl"
)
os.makedirs(os.path.dirname(path), exist_ok=True)
return LocalTarget(path)
class EstimateStellarLabels(ThePayneMixin):
"""
Use a pre-trained neural network to estimate stellar labels. This should be sub-classed to inherit properties from the type of spectra to be analysed.
:param training_set_path:
The path where the training set spectra and labels are stored.
This should be a binary pickle file that contains a dictionary with the following keys:
- wavelength: an array of shape (P, ) where P is the number of pixels
- spectra: an array of shape (N, P) where N is the number of spectra and P is the number of pixels
- labels: an array of shape (L, P) where L is the number of labels and P is the number of pixels
- label_names: a tuple of length L that contains the names of the labels
:param n_steps: (optional)
The number of steps to train the network for (default 100000).
:param n_neurons: (optional)
The number of neurons to use in the hidden layer (default: 300).
:param weight_decay: (optional)
The weight decay to use during training (default: 0)
:param learning_rate: (optional)
The learning rate to use during training (default: 0.001).
"""
max_batch_size = 10_000
analyze_individual_visits = BoolParameter(default=False)
def prepare_observation(self):
""" Prepare the observations for analysis. """
data_slice = None if self.analyze_individual_visits else [0, 1]
observation = Spectrum1D.read(
self.input()["observation"].path,
data_slice=slice(*data_slice)
)
if "continuum" in self.input():
continuum_path = self.input()["continuum"]["continuum"].path
while True:
with open(continuum_path, "rb") as fp:
continuum = pickle.load(fp)
# If there is a shape mis-match between the observations and the continuum
# then it likely means that there have been observations taken since the
# continuum task was run. In this case we need to re-run the continuum
# normalisation.
#log.debug(f"Continuum for {self} original shape {continuum.shape}")
if self.analyze_individual_visits is not None:
continuum = continuum[slice(*data_slice)]
#log.debug(f"New shapes {observation.flux.shape} {continuum.shape}")
O = observation.flux.shape[0]
C = continuum.shape[0]
# TODO: Consider if this is what we want to be doing..
if O == C:
break
else:
if O > C:
log.warn(f"Re-doing continuum for task {self} at runtime")
else:
log.warn(f"More continuum than observations in {self}?!")
os.unlink(continuum_path)
self.requires()["continuum"].run()
else:
continuum = 1
normalized_flux = observation.flux.value / continuum
normalized_ivar = continuum * observation.uncertainty.array * continuum
return (observation, continuum, normalized_flux, normalized_ivar)
@slurmify
def run(self):
""" Execute this task. """
# Load the model.
log.info(f"Loading model for {self}")
state = testing.load_state(self.input()["model"].path)
# We can run this in batch mode.
label_names = state["label_names"]
tqdm_kwds = dict(total=self.get_batch_size(), desc="The Payne")
for init, task in tqdm(timer(self.get_batch_tasks()), **tqdm_kwds):
if task.complete():
continue
#log.debug(f"Running {task}")
spectrum, continuum, normalized_flux, normalized_ivar = task.prepare_observation()
#log.debug(f"Prepared observations for {task}")
p_opt, p_cov, model_flux, meta = testing.test(
spectrum.wavelength.value,
normalized_flux,
normalized_ivar,
**state
)
#log.debug(f"Completed inference on {task}. p_opt has shape {p_opt.shape}")
results = dict(zip(label_names, p_opt.T))
# Note: we count the number of label names here in case we are sometimes using
# radial velocity determination or not, before we add in the SNR.
L = len(results)
# Add in uncertainties on parameters.
results.update(dict(zip(
(f"u_{ln}" for ln in label_names),
np.sqrt(p_cov[:, np.arange(L), np.arange(L)].T)
)))
# Add in SNR values for conveninence.
results.update(snr=spectrum.meta["snr"])
# Write AstraSource object.
if "AstraSource" in task.output():
#log.debug(f"Writing AstraSource object for {task}")
task.output()["AstraSource"].write(
spectrum=spectrum,
normalized_flux=normalized_flux,
normalized_ivar=normalized_ivar,
continuum=continuum,
model_flux=model_flux,
# TODO: Project uncertainties to flux space.
model_ivar=None,
results_table=Table(results)
)
# Write output to database.
if "database" in task.output():
#log.debug(f"Writing database output for {task}")
task.output()["database"].write(results)
# Trigger this event as complete, and record task duration.
task.trigger_event_processing_time(time() - init, cascade=True)
return None
def output(self):
""" The output of this task. """
if self.is_batch_mode:
return (task.output() for task in self.get_batch_tasks())
return dict(
database=DatabaseTarget(astradb.ThePayne, self),
#AstraSource=AstraSource(self)
)
class ContinuumNormalizeGivenApStarFile(Sinusoidal, ApStarFile):
""" Pseudo-continuum normalise ApStar spectra using a sum of sines and cosines. """
def requires(self):
return self.clone(ApStarFile)
def output(self):
if self.is_batch_mode:
return (task.output() for task in self.get_batch_tasks())
# TODO: Re-factor to allow for SDSS-IV.
path = os.path.join(
self.output_base_dir,
f"star/{self.telescope}/{int(self.healpix/1000)}/{self.healpix}/",
f"Continuum-{self.apred}-{self.obj}-{self.task_id}.pkl"
)
# Create the directory structure if it does not exist already.
os.makedirs(os.path.dirname(path), exist_ok=True)
return dict(continuum=LocalTarget(path))
class ContinuumNormalizeGivenSDSS4ApStarFile(Sinusoidal, SDSS4ApStarFile):
""" Pseudo-continuum normalise SDSS-IV ApStar spectra using a sum of sines and cosines. """
def requires(self):
return self.clone(SDSS4ApStarFile)
def output(self):
if self.is_batch_mode:
return (task.output() for task in self.get_batch_tasks())
# TODO: What is the path system for SDSS-IV products?
path = os.path.join(
self.output_base_dir,
f"sdss4/{self.release}/{self.apred}/{self.telescope}/{self.field}/",
f"Continuum-{self.release}-{self.apred}-{self.telescope}-{self.obj}-{self.task_id}.pkl"
)
# Create directory structure if it does not exist already.
os.makedirs(os.path.dirname(path), exist_ok=True)
return dict(continuum=LocalTarget(path))
class EstimateStellarLabelsGivenApStarFile(EstimateStellarLabels, Sinusoidal, ApStarFile):
"""
Estimate stellar labels given a single-layer neural network and an ApStar file.
This task also requires all parameters that `astra.tasks.io.sdss5.ApStarFile` requires,
and that the `astra.tasks.continuum.Sinusoidal` task requires.
:param training_set_path:
The path where the training set spectra and labels are stored.
This should be a binary pickle file that contains a dictionary with the following keys:
- wavelength: an array of shape (P, ) where P is the number of pixels
- spectra: an array of shape (N, P) where N is the number of spectra and P is the number of pixels
- labels: an array of shape (L, P) where L is the number of labels and P is the number of pixels
- label_names: a tuple of length L that contains the names of the labels
:param n_steps: (optional)
The number of steps to train the network for (default: 100000).
:param n_neurons: (optional)
The number of neurons to use in the hidden layer (default: 300).
:param weight_decay: (optional)
The weight decay to use during training (default: 0)
:param learning_rate: (optional)
The learning rate to use during training (default: 0.001).
:param continuum_regions_path:
A path containing a list of (start, end) wavelength values that represent the regions to
fit as continuum.
"""
max_batch_size = 10_000
def requires(self):
return dict(
model=self.clone(TrainThePayne),
observation=self.clone(ApStarFile),
continuum=self.clone(ContinuumNormalizeGivenApStarFile)
)
class EstimateStellarLabelsGivenSDSS4ApStarFile(EstimateStellarLabels, Sinusoidal, SDSS4ApStarFile):
"""
Estimate stellar labels given a single-layer neural network and a SDSS-IV ApStar file.
This task also requires all parameters that `astra.tasks.io.sdss4.ApStarFile` requires,
and that the `astra.tasks.continuum.Sinusoidal` task requires.
:param training_set_path:
The path where the training set spectra and labels are stored.
This should be a binary pickle file that contains a dictionary with the following keys:
- wavelength: an array of shape (P, ) where P is the number of pixels
- spectra: an array of shape (N, P) where N is the number of spectra and P is the number of pixels
- labels: an array of shape (L, P) where L is the number of labels and P is the number of pixels
- label_names: a tuple of length L that contains the names of the labels
:param n_steps: (optional)
The number of steps to train the network for (default: 100000).
:param n_neurons: (optional)
The number of neurons to use in the hidden layer (default: 300).
:param weight_decay: (optional)
The weight decay to use during training (default: 0)
:param learning_rate: (optional)
The learning rate to use during training (default: 0.001).
:param continuum_regions_path:
A path containing a list of (start, end) wavelength values that represent the regions to
fit as continuum.
"""
max_batch_size = 10_000
def requires(self):
return dict(
model=self.clone(TrainThePayne),
observation=self.clone(SDSS4ApStarFile),
continuum=self.clone(ContinuumNormalizeGivenSDSS4ApStarFile)
)
|
# -*- coding: utf-8 -*-
# Copyright 2017-2019 ControlScan, Inc.
#
# This file is part of Cyphon Engine.
#
# Cyphon Engine is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# Cyphon Engine is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cyphon Engine. If not, see <http://www.gnu.org/licenses/>.
"""
Functional tests for the Tastes app.
"""
# local
from tests.fixture_manager import get_fixtures
from tests.functional_tests import ModelAdminFunctionalTest
from .pages import TastePage, InlineTastePage
class TasteFunctionalTest(ModelAdminFunctionalTest):
"""
"""
url = '/admin/tastes/taste/add/'
fixtures = get_fixtures(['containers'])
def setUp(self):
super(TasteFunctionalTest, self).setUp()
self.page = TastePage(self.driver)
def test_taste(self):
"""
Tests autocompleted choices on the Taste admin page.
"""
self.page.scroll_to_bottom()
self.assertEqual(self.page.datetime.count(), 0)
self.assertEqual(self.page.location.count(), 0)
self.assertEqual(self.page.content.count(), 0)
self.assertEqual(self.page.title.count(), 0)
self.assertEqual(self.page.author.count(), 0)
self.page.container = 'labeled_post'
self.assertEqual(self.page.datetime.count(), 1)
self.assertEqual(self.page.location.count(), 2)
self.assertEqual(self.page.content.count(), 12)
self.assertEqual(self.page.title.count(), 12)
self.assertEqual(self.page.author.count(), 12)
self.page.container = 'mail'
self.assertEqual(self.page.datetime.count(), 1)
self.assertEqual(self.page.location.count(), 0)
self.assertEqual(self.page.content.count(), 4)
self.assertEqual(self.page.title.count(), 4)
self.assertEqual(self.page.author.count(), 4)
class InlineTasteFunctionalTest(ModelAdminFunctionalTest):
"""
Tests autocompleted choices on the Taste inline admin form.
"""
url = '/admin/containers/container/add/'
fixtures = get_fixtures(['containers'])
def setUp(self):
super(InlineTasteFunctionalTest, self).setUp()
self.page = InlineTastePage(self.driver)
def test_inline_taste(self):
"""
"""
self.page.scroll_to_bottom()
self.assertEqual(self.page.datetime.count(), 0)
self.assertEqual(self.page.location.count(), 0)
self.assertEqual(self.page.content.count(), 0)
self.assertEqual(self.page.title.count(), 0)
self.assertEqual(self.page.author.count(), 0)
self.page.scroll_to_top()
self.page.bottle = 'post'
self.page.scroll_to_bottom()
self.assertEqual(self.page.datetime.count(), 1)
self.assertEqual(self.page.location.count(), 1)
self.assertEqual(self.page.content.count(), 11)
self.assertEqual(self.page.title.count(), 11)
self.assertEqual(self.page.author.count(), 11)
self.page.scroll_to_top()
self.page.label = 'mail'
self.page.scroll_to_bottom()
self.assertEqual(self.page.datetime.count(), 1)
self.assertEqual(self.page.location.count(), 2)
self.assertEqual(self.page.content.count(), 12)
self.assertEqual(self.page.title.count(), 12)
self.assertEqual(self.page.author.count(), 12)
|
# -*- coding: utf-8 -*-
# snapshottest: v1 - https://goo.gl/zC4yUc
from __future__ import unicode_literals
from snapshottest import Snapshot
snapshots = Snapshot()
snapshots['test_login_shortcut 1'] = {
'data': {
'node': {
'id': 'TG9naW5TaG9ydGN1dDoxMA==',
'name': 'foo'
}
}
}
snapshots['test_author 1'] = {
'data': {
'node': {
'extra': '{"movies": 1}',
'firstName': 'Winston',
'hasCollidingName': False,
'id': 'QXV0aG9yOjE=',
'lastName': 'Wolfe',
'totalReports': 2
}
}
}
snapshots['test_author__returns_only_if_is_author 1'] = {
'data': {
'node': None
}
}
snapshots['test_report 1'] = {
'data': {
'node': {
'author': {
'extra': '{"movies": 1}',
'firstName': 'Winston',
'hasCollidingName': False,
'id': 'QXV0aG9yOjE=',
'lastName': 'Wolfe',
'totalReports': 2
},
'body': 'Long story short: we got the Ring!',
'date': '2018-01-01 00:00:00+00:00',
'edited': '2018-01-02 03:00:00+00:00',
'extra': None,
'id': 'UmVwb3J0OjE=',
'isDraft': False,
'otherParticipants': 'Saruman',
'ourParticipants': 'Frodo, Gandalf',
'providedBenefit': '',
'published': '2018-01-02 00:00:00+00:00',
'receivedBenefit': 'The Ring',
'title': 'The Fellowship of the Ring'
}
}
}
snapshots['test_user__unauthorized 1'] = {
'data': {
'node': None
}
}
snapshots['test_user__not_a_viewer 1'] = {
'data': {
'node': None
}
}
snapshots['test_user 1'] = {
'data': {
'node': {
'extra': '{"e": "mc2"}',
'firstName': 'Albert',
'hasCollidingName': False,
'id': 'VXNlcjo4',
'isAuthor': False,
'lastName': 'Einstein',
'openidUid': 'albert@einstein.id'
}
}
}
snapshots['test_report__is_draft__unauthorized_viewer 1'] = {
'data': {
'node': None
}
}
snapshots['test_report__is_draft__viewer_is_not_author 1'] = {
'data': {
'node': None
}
}
snapshots['test_report__is_draft 1'] = {
'data': {
'node': {
'id': 'UmVwb3J0OjQ=',
'isDraft': True,
'title': 'The Silmarillion'
}
}
}
snapshots['test_report__without_revisions 1'] = {
'data': {
'node': {
'hasRevisions': False,
'id': 'UmVwb3J0OjM=',
'revisions': [
],
'title': 'The Return of the King'
}
}
}
snapshots['test_report__with_revisions 1'] = {
'data': {
'node': {
'body': 'Another long story.',
'date': '2018-01-03 00:00:00+00:00',
'edited': '2018-01-04 05:00:00+00:00',
'extra': '{"rings": 1}',
'hasRevisions': True,
'id': 'UmVwb3J0OjI=',
'isDraft': False,
'otherParticipants': 'Saruman, Sauron',
'ourParticipants': 'Frodo, Gimli, Legolas',
'providedBenefit': '',
'published': '2018-01-04 00:00:00+00:00',
'receivedBenefit': 'Mithrill Jacket',
'revisions': [
{
'body': 'What am I doing?',
'date': '2018-01-03 00:00:00+00:00',
'edited': '2018-02-05 00:00:00+00:00',
'extra': '{"rings": 1}',
'id': 'UmVwb3J0Ojc=',
'isDraft': False,
'otherParticipants': '',
'ourParticipants': 'Ringo Starr',
'providedBenefit': 'The Ringo',
'published': '2018-01-04 00:00:00+00:00',
'receivedBenefit': 'Jacket',
'title': 'The Towels'
},
{
'body': 'Nothing yet.',
'date': '2018-01-03 00:00:00+00:00',
'edited': '2018-02-01 00:00:00+00:00',
'extra': None,
'id': 'UmVwb3J0OjY=',
'isDraft': False,
'otherParticipants': '',
'ourParticipants': '',
'providedBenefit': '',
'published': '2018-01-04 00:00:00+00:00',
'receivedBenefit': 'old bread',
'title': 'Oldest story'
}
],
'title': 'The Two Towers'
}
}
}
|
import logging
from .paths import DetectionPaths
class DetectionLogs():
# Set Logging configs
LOG_FORMAT = "%(levelname)s %(asctime)s - - %(message)s"
logging.basicConfig(filename=DetectionPaths.LOG_PATH,
level=logging.DEBUG,
format=LOG_FORMAT)
logger = logging.getLogger()
@classmethod
def info(cls, location, information):
# Notify that there was not folder
cls.logger.info('FROM {} :: {}'.format(location, information))
@classmethod
def warning(cls, location, warning):
cls.logger.warning('FROM {} :: {}'.format(location, warning))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.